repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
huawei-cloud/compass-core
|
compass/tests/config_management/installers/test_os_installer.py
|
4
|
1069
|
import unittest2
from compass.config_management.installers import os_installer
class DummyInstaller(os_installer.Installer):
NAME = 'dummy'
def __init__(self):
pass
class Dummy2Installer(os_installer.Installer):
NAME = 'dummy'
def __init__(self):
pass
class TestInstallerFunctions(unittest2.TestCase):
def setUp(self):
os_installer.INSTALLERS = {}
def tearDown(self):
os_installer.INSTALLERS = {}
def test_found_installer(self):
os_installer.register(DummyInstaller)
intaller = os_installer.get_installer_by_name(DummyInstaller.NAME)
self.assertIsInstance(intaller, DummyInstaller)
def test_notfound_unregistered_installer(self):
self.assertRaises(KeyError, os_installer.get_installer_by_name,
DummyInstaller.NAME)
def test_multi_registered_installer(self):
os_installer.register(DummyInstaller)
self.assertRaises(KeyError, os_installer.register, Dummy2Installer)
if __name__ == '__main__':
unittest2.main()
|
apache-2.0
|
akosyakov/intellij-community
|
python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/main.py
|
250
|
11605
|
"""
Main program for 2to3.
"""
from __future__ import with_statement
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
A refactoring tool that can avoid overwriting its input files.
Prints output to stdout.
Output files can optionally be written to a different directory and or
have an extra file suffix appended to their name for use in situations
where you do not want to replace the input files.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs,
input_base_dir='', output_dir='', append_suffix=''):
"""
Args:
fixers: A list of fixers to import.
options: A dict with RefactoringTool configuration.
explicit: A list of fixers to run even if they are explicit.
nobackups: If true no backup '.bak' files will be created for those
files that are being refactored.
show_diffs: Should diffs of the refactoring be printed to stdout?
input_base_dir: The base directory for all input files. This class
will strip this path prefix off of filenames before substituting
it with output_dir. Only meaningful if output_dir is supplied.
All files processed by refactor() must start with this path.
output_dir: If supplied, all converted files will be written into
this directory tree instead of input_base_dir.
append_suffix: If supplied, all files output by this tool will have
this appended to their filename. Useful for changing .py to
.py3 for example by passing append_suffix='3'.
"""
self.nobackups = nobackups
self.show_diffs = show_diffs
if input_base_dir and not input_base_dir.endswith(os.sep):
input_base_dir += os.sep
self._input_base_dir = input_base_dir
self._output_dir = output_dir
self._append_suffix = append_suffix
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
orig_filename = filename
if self._output_dir:
if filename.startswith(self._input_base_dir):
filename = os.path.join(self._output_dir,
filename[len(self._input_base_dir):])
else:
raise ValueError('filename %s does not start with the '
'input_base_dir %s' % (
filename, self._input_base_dir))
if self._append_suffix:
filename += self._append_suffix
if orig_filename != filename:
output_dir = os.path.dirname(filename)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
if not self.nobackups:
# Make backup
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except os.error, err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except os.error, err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
if orig_filename != filename:
# Preserve the file mode in the new output directory.
shutil.copymode(orig_filename, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print line
sys.stdout.flush()
else:
for line in diff_lines:
print line
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print >> sys.stderr, "WARNING: %s" % (msg,)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation from being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files")
parser.add_option("-o", "--output-dir", action="store", type="str",
default="", help="Put output files in this directory "
"instead of overwriting the input files. Requires -n.")
parser.add_option("-W", "--write-unchanged-files", action="store_true",
help="Also write files even if no changes were required"
" (useful with --output-dir); implies -w.")
parser.add_option("--add-suffix", action="store", type="str", default="",
help="Append this string to all output filenames."
" Requires -n if non-empty. "
"ex: --add-suffix='3' will generate .py3 files.")
# Parse command line arguments
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if options.write_unchanged_files:
flags["write_unchanged_files"] = True
if not options.write:
warn("--write-unchanged-files/-W implies -w.")
options.write = True
# If we allowed these, the original files would be renamed to backup names
# but not replaced.
if options.output_dir and not options.nobackups:
parser.error("Can't use --output-dir/-o without -n.")
if options.add_suffix and not options.nobackups:
parser.error("Can't use --add-suffix without -n.")
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print "Available transformations for the -f/--fix option:"
for fixname in refactor.get_all_fix_names(fixer_pkg):
print fixname
if not args:
return 0
if not args:
print >> sys.stderr, "At least one file or directory argument required."
print >> sys.stderr, "Use --help to show usage."
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print >> sys.stderr, "Can't write to stdin."
return 2
if options.print_function:
flags["print_function"] = True
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
logger = logging.getLogger('lib2to3.main')
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
input_base_dir = os.path.commonprefix(args)
if (input_base_dir and not input_base_dir.endswith(os.sep)
and not os.path.isdir(input_base_dir)):
# One or more similar names were passed, their directory is the base.
# os.path.commonprefix() is ignorant of path elements, this corrects
# for that weird API.
input_base_dir = os.path.dirname(input_base_dir)
if options.output_dir:
input_base_dir = input_base_dir.rstrip(os.sep)
logger.info('Output in %r will mirror the input directory %r layout.',
options.output_dir, input_base_dir)
rt = StdoutRefactoringTool(
sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs,
input_base_dir=input_base_dir,
output_dir=options.output_dir,
append_suffix=options.add_suffix)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print >> sys.stderr, "Sorry, -j isn't " \
"supported on this platform."
return 1
rt.summarize()
# Return error status (0 if rt.errors is zero)
return int(bool(rt.errors))
|
apache-2.0
|
ckuethe/gr-nmea
|
docs/doxygen/swig_doc.py
|
220
|
8657
|
#
# Copyright 2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Creates the swig_doc.i SWIG interface file.
Execute using: python swig_doc.py xml_path outputfilename
The file instructs SWIG to transfer the doxygen comments into the
python docstrings.
"""
import sys
try:
from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base
except ImportError:
from gnuradio.doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base
def py_name(name):
bits = name.split('_')
return '_'.join(bits[1:])
def make_name(name):
bits = name.split('_')
return bits[0] + '_make_' + '_'.join(bits[1:])
class Block(object):
"""
Checks if doxyxml produced objects correspond to a gnuradio block.
"""
@classmethod
def includes(cls, item):
if not isinstance(item, DoxyClass):
return False
# Check for a parsing error.
if item.error():
return False
return item.has_member(make_name(item.name()), DoxyFriend)
def utoascii(text):
"""
Convert unicode text into ascii and escape quotes.
"""
if text is None:
return ''
out = text.encode('ascii', 'replace')
out = out.replace('"', '\\"')
return out
def combine_descriptions(obj):
"""
Combines the brief and detailed descriptions of an object together.
"""
description = []
bd = obj.brief_description.strip()
dd = obj.detailed_description.strip()
if bd:
description.append(bd)
if dd:
description.append(dd)
return utoascii('\n\n'.join(description)).strip()
entry_templ = '%feature("docstring") {name} "{docstring}"'
def make_entry(obj, name=None, templ="{description}", description=None):
"""
Create a docstring entry for a swig interface file.
obj - a doxyxml object from which documentation will be extracted.
name - the name of the C object (defaults to obj.name())
templ - an optional template for the docstring containing only one
variable named 'description'.
description - if this optional variable is set then it's value is
used as the description instead of extracting it from obj.
"""
if name is None:
name=obj.name()
if "operator " in name:
return ''
if description is None:
description = combine_descriptions(obj)
docstring = templ.format(description=description)
if not docstring:
return ''
return entry_templ.format(
name=name,
docstring=docstring,
)
def make_func_entry(func, name=None, description=None, params=None):
"""
Create a function docstring entry for a swig interface file.
func - a doxyxml object from which documentation will be extracted.
name - the name of the C object (defaults to func.name())
description - if this optional variable is set then it's value is
used as the description instead of extracting it from func.
params - a parameter list that overrides using func.params.
"""
if params is None:
params = func.params
params = [prm.declname for prm in params]
if params:
sig = "Params: (%s)" % ", ".join(params)
else:
sig = "Params: (NONE)"
templ = "{description}\n\n" + sig
return make_entry(func, name=name, templ=utoascii(templ),
description=description)
def make_class_entry(klass, description=None):
"""
Create a class docstring for a swig interface file.
"""
output = []
output.append(make_entry(klass, description=description))
for func in klass.in_category(DoxyFunction):
name = klass.name() + '::' + func.name()
output.append(make_func_entry(func, name=name))
return "\n\n".join(output)
def make_block_entry(di, block):
"""
Create class and function docstrings of a gnuradio block for a
swig interface file.
"""
descriptions = []
# Get the documentation associated with the class.
class_desc = combine_descriptions(block)
if class_desc:
descriptions.append(class_desc)
# Get the documentation associated with the make function
make_func = di.get_member(make_name(block.name()), DoxyFunction)
make_func_desc = combine_descriptions(make_func)
if make_func_desc:
descriptions.append(make_func_desc)
# Get the documentation associated with the file
try:
block_file = di.get_member(block.name() + ".h", DoxyFile)
file_desc = combine_descriptions(block_file)
if file_desc:
descriptions.append(file_desc)
except base.Base.NoSuchMember:
# Don't worry if we can't find a matching file.
pass
# And join them all together to make a super duper description.
super_description = "\n\n".join(descriptions)
# Associate the combined description with the class and
# the make function.
output = []
output.append(make_class_entry(block, description=super_description))
creator = block.get_member(block.name(), DoxyFunction)
output.append(make_func_entry(make_func, description=super_description,
params=creator.params))
return "\n\n".join(output)
def make_swig_interface_file(di, swigdocfilename, custom_output=None):
output = ["""
/*
* This file was automatically generated using swig_doc.py.
*
* Any changes to it will be lost next time it is regenerated.
*/
"""]
if custom_output is not None:
output.append(custom_output)
# Create docstrings for the blocks.
blocks = di.in_category(Block)
make_funcs = set([])
for block in blocks:
try:
make_func = di.get_member(make_name(block.name()), DoxyFunction)
make_funcs.add(make_func.name())
output.append(make_block_entry(di, block))
except block.ParsingError:
print('Parsing error for block %s' % block.name())
# Create docstrings for functions
# Don't include the make functions since they have already been dealt with.
funcs = [f for f in di.in_category(DoxyFunction) if f.name() not in make_funcs]
for f in funcs:
try:
output.append(make_func_entry(f))
except f.ParsingError:
print('Parsing error for function %s' % f.name())
# Create docstrings for classes
block_names = [block.name() for block in blocks]
klasses = [k for k in di.in_category(DoxyClass) if k.name() not in block_names]
for k in klasses:
try:
output.append(make_class_entry(k))
except k.ParsingError:
print('Parsing error for class %s' % k.name())
# Docstrings are not created for anything that is not a function or a class.
# If this excludes anything important please add it here.
output = "\n\n".join(output)
swig_doc = file(swigdocfilename, 'w')
swig_doc.write(output)
swig_doc.close()
if __name__ == "__main__":
# Parse command line options and set up doxyxml.
err_msg = "Execute using: python swig_doc.py xml_path outputfilename"
if len(sys.argv) != 3:
raise StandardError(err_msg)
xml_path = sys.argv[1]
swigdocfilename = sys.argv[2]
di = DoxyIndex(xml_path)
# gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined!
# This is presumably a bug in SWIG.
#msg_q = di.get_member(u'gr_msg_queue', DoxyClass)
#insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction)
#delete_head = msg_q.get_member(u'delete_head', DoxyFunction)
output = []
#output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail'))
#output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head'))
custom_output = "\n\n".join(output)
# Generate the docstrings interface file.
make_swig_interface_file(di, swigdocfilename, custom_output=custom_output)
|
gpl-3.0
|
RalfJung/lilass
|
qt_frontend.py
|
1
|
7267
|
# DSL - easy Display Setup for Laptops
# Copyright (C) 2012-2015 Ralf Jung <post@ralfj.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys, os
from screen import RelativeScreenPosition, ScreenSetup
try:
# Be fine with PyQt4 not being installed
from PyQt5 import QtCore, QtWidgets, uic
class PositionSelection(QtWidgets.QDialog):
def __init__(self, situation):
# set up main window
super(PositionSelection, self).__init__()
self._situation = situation
uifile = os.path.join(os.path.dirname(__file__), 'qt_dialogue.ui')
uic.loadUi(uifile, self)
# fill relative position box
for pos in RelativeScreenPosition:
self.relPos.addItem(pos.text, pos)
# keep resolutions in sync when in mirror mode
def syncIfMirror(source, target):
def _slot(idx):
if self.isMirror:
target.setCurrentIndex(idx)
source.currentIndexChanged.connect(_slot)
syncIfMirror(self.intRes, self.extRes)
syncIfMirror(self.extRes, self.intRes)
# if situation has a previousSetup, use its values as initial state
if situation.previousSetup:
p = situation.previousSetup
self.intEnabled.setChecked(p.intResolution is not None)
self.extEnabled.setChecked(p.extResolution is not None)
if p.relPosition:
self.relPos.setCurrentIndex(p.relPosition.value - 1)
if p.extIsPrimary:
self.extPrimary.setChecked(True)
else:
self.intPrimary.setChecked(True)
# Pre-select the previous resolution
self._intDefaultRes = p.intResolution
self._extDefaultRes = p.extResolution
self._mirrorDefaultRes = p.intResolution if p.relPosition == RelativeScreenPosition.MIRROR else None # in case of a mirror, they would be the same anyway
else:
self._intDefaultRes = situation.internalConnector.getPreferredResolution()
self._extDefaultRes = situation.externalConnector.getPreferredResolution()
self._mirrorDefaultRes = None
# connect the update function
self.intEnabled.toggled.connect(self.updateEnabledControls)
self.extEnabled.toggled.connect(self.updateEnabledControls)
self.relPos.currentIndexChanged.connect(self.updateEnabledControls)
# make sure we are in a correct state
self.updateEnabledControls()
def getRelativeScreenPosition(self):
idx = self.relPos.currentIndex()
return self.relPos.itemData(idx)
def fillResolutionBox(self, box, resolutions, select = None):
# if the count did not change, update in-place (this avoids flicker)
if box.count() == len(resolutions):
for idx, res in enumerate(resolutions):
box.setItemText(idx, str(res))
box.setItemData(idx, res)
if res == select:
box.setCurrentIndex(idx)
else:
# first clear it
while box.count() > 0:
box.removeItem(0)
# then fill it
for res in resolutions:
box.addItem(str(res), res)
if res == select:
box.setCurrentIndex(box.count() - 1) # select the most recently added one
def updateEnabledControls(self):
intEnabled = self.intEnabled.isChecked()
extEnabled = self.extEnabled.isChecked()
bothEnabled = intEnabled and extEnabled
self.isMirror = bothEnabled and self.getRelativeScreenPosition() == RelativeScreenPosition.MIRROR # only if both are enabled, we can really mirror
# configure screen controls
self.intRes.setEnabled(intEnabled)
self.intPrimary.setEnabled(intEnabled and not self.isMirror)
self.extRes.setEnabled(extEnabled)
self.extPrimary.setEnabled(extEnabled and not self.isMirror)
if not intEnabled and extEnabled:
self.extPrimary.setChecked(True)
elif not extEnabled and intEnabled:
self.intPrimary.setChecked(True)
# which resolutions do we offer?
if self.isMirror:
commonRes = self._situation.commonResolutions()
self.fillResolutionBox(self.intRes, commonRes, select = self._mirrorDefaultRes)
self.fillResolutionBox(self.extRes, commonRes, select = self._mirrorDefaultRes)
self.intRes.setCurrentIndex(self.extRes.currentIndex())
else:
self.fillResolutionBox(self.intRes, self._situation.internalConnector.getResolutionList(), select = self._intDefaultRes)
self.fillResolutionBox(self.extRes, self._situation.externalConnector.getResolutionList(), select = self._extDefaultRes)
# configure position control
self.posGroup.setEnabled(bothEnabled)
self.posLabel1.setEnabled(bothEnabled)
self.posLabel2.setEnabled(bothEnabled)
self.relPos.setEnabled(bothEnabled)
# avoid having no screen
self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(intEnabled or extEnabled)
def run(self):
self.exec_()
if not self.result(): return None
intRes = self.intRes.itemData(self.intRes.currentIndex()) if self.intEnabled.isChecked() else None
extRes = self.extRes.itemData(self.extRes.currentIndex()) if self.extEnabled.isChecked() else None
return ScreenSetup(intRes, extRes, self.getRelativeScreenPosition(), self.extPrimary.isChecked())
except ImportError:
pass
# Qt frontend
class QtFrontend:
def __init__(self):
from PyQt5 import QtWidgets
self.app = QtWidgets.QApplication(sys.argv)
print("Qt loaded")
def error(self, message):
from PyQt5 import QtWidgets
QtWidgets.QMessageBox.critical(None, 'Fatal error', message)
def setup(self, situation):
return PositionSelection(situation).run()
@staticmethod
def isAvailable():
try:
import PyQt5
return True
except ImportError:
return False
|
gpl-2.0
|
RulersOfAsgard/ALAMO-worker
|
alamo_worker/plugins/tests/test_evaluate.py
|
1
|
14081
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from unittest.case import TestCase
from unittest import mock
from alamo_common.test.utils import override_settings
from alamo_worker.plugins.druid import DruidResult
from alamo_worker.plugins.evaluate import ResultEvaluator
from alamo_worker.plugins.http_check import HttpResult
from alamo_worker.plugins.graphite import GraphiteResult
from alamo_worker.plugins.prometheus import PrometheusResult
from alamo_worker.plugins.mixins import (
RESULT_OK,
RESULT_FAILED,
RESULT_UNKNOWN,
)
class TestResultEvaluator(TestCase):
@staticmethod
def get_payload(rule):
return {
'id': 2206299,
'triggers': [
{
'rule': rule,
'url': 'http://some.url',
}],
}
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluation_for_unknown_status(self, stats):
rule = 'check_name.values < 200'
plugin_results = [
# unknown status
GraphiteResult('check_name', {}, env='test')
]
evaluator = ResultEvaluator()
payload = self.get_payload(rule)
payload['unknown_as_failure'] = True
result = evaluator.evaluate(payload, plugin_results)
expected = {
'status': RESULT_FAILED,
'message': ('Metric not found in `check_name` source, '
'check your query'),
'hysteresis': RESULT_OK
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_passing_rule(self, stats):
rule = 'check_name.status_code != 200'
plugin_results = [
HttpResult('check_name', 200)
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_failing_rule(self, stats):
rule = 'check_name.status_code != 200'
plugin_results = [
HttpResult('check_name', 404)
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
msg = 'Check failed for rule `{}` which evaluates to `True`'.format(
rule)
expected = {
'status': RESULT_FAILED,
'message': msg,
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_successful_results(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 200, status=RESULT_UNKNOWN),
HttpResult('check2', 200, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@override_settings(FAIL_ON_UNKNOWN=False)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_failed_results(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 404, status=RESULT_UNKNOWN),
HttpResult('check2', 404, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
msg = 'Check failed for rule `check1.status_code != 200` '\
'which evaluates to `True`, Check failed for rule '\
'`check2.status_code != 200` which evaluates to `True`'
expected = {
'status': RESULT_FAILED,
'message': msg,
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@override_settings(FAIL_ON_UNKNOWN=True)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_failed_results_with_strict_fail(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 404, status=RESULT_UNKNOWN),
HttpResult('check2', 404, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'hysteresis': 0,
'message': '',
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_different_results(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 404, status=RESULT_OK),
HttpResult('check2', 200, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluate_multiple_sources_with_diff_statuses_and_one_is_unknown(
self, *args
):
plugin_results = [
GraphiteResult('check_first', {'a': [5]}, ),
# this result is unknown
GraphiteResult('check_second', {}),
]
payload = {
'id': 999,
'triggers': [
{'rule': 'check_first.values < 1'},
{'rule': 'check_second.values < 200'},
]
}
result = ResultEvaluator().evaluate(payload, plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK,
}
self.assertEqual(result['triggers'][0]['result'], expected)
self.assertEqual(result['triggers'][1]['result'], {
'status': RESULT_UNKNOWN,
'message': ('Metric not found in `check_second` source, '
'check your query'),
'hysteresis': RESULT_OK,
})
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluation_build_proper_target_for_trigger(self, *args):
plugin_results = [
GraphiteResult('foo', {'foo': [1, 2, 3]}, metric='stats.test.foo'),
GraphiteResult('bar', {'bar': [1, 2, 3]}, metric='stats.test.bar'),
GraphiteResult('baz', {'baz': [1, 2, 3]}, metric='stats.test.baz'),
]
trigger = {'rule': 'foo.values > 10 OR bar.values > 10'}
payload = {
'id': 999,
'triggers': [trigger]
}
ResultEvaluator().evaluate(payload, plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK
}
self.assertEqual(trigger['result'], expected)
self.assertEqual(trigger['target'], 'stats.test.bar,stats.test.foo')
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_invalid_rule(self, stats):
rule = 'cat dog'
plugin_results = [
HttpResult('check', 200),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
stats.increment.incr.assert_called_once_with(
'manager.warnings.invalid_rule')
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_failed_plugin_result(self, stats):
rule = 'check.status_code != 200'
content = 'some content'
plugin_results = [
HttpResult('check', 200, status=RESULT_FAILED,
message='some message', content=content),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_FAILED,
'message': 'some message',
'hysteresis': RESULT_OK,
'http_response_body': content
}
expected_meta = {
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
},
}
self.assertEqual(result['triggers'][0]['result'], expected)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_unknown_plugin_result(self, stats):
rule = 'druid.histogram_90 > 70000'
plugin_results = [
DruidResult('druid', [], status=RESULT_UNKNOWN,
message='Unknown Message'),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'message': 'Unknown Message',
'hysteresis': RESULT_OK
}
self.assertEqual(
result['triggers'][0]['result'], expected
)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluate_successful_graphite_result(self, mock_build_meta, stats):
rule = 'check.values > 6'
plugin_results = [
GraphiteResult('check', {'bunny.a': [1, 2, 3], 'bunny.b': [4, 5]}),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected_result = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK
}
expected_meta = {
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
}
}
self.assertEqual(result['triggers'][0]['result'], expected_result)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluate_failed_graphite_result(self, mock_build_meta, stats):
rule = 'check.values > 2'
plugin_results = [
GraphiteResult(
'check',
OrderedDict([('bunny.a', [1, 2, 3]), ('bunny.b', [4, 5])])
),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected_result = {
'status': RESULT_FAILED,
'message': 'Check failed for rule `check.values > 2` which '
"evaluates to `{'bunny.a': [3], 'bunny.b': [4, 5]}`",
'hysteresis': RESULT_OK
}
expected_meta = {
'failed_metrics': {'bunny.a': [3], 'bunny.b': [4, 5]},
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
}
}
self.assertEqual(result['triggers'][0]['result'], expected_result)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_successful_prometheus_result(self, stats):
rule = 'check.values > 2'
plugin_results = [
PrometheusResult(
'check',
{(('__name__', 'metric1'),): [0, 1]},
RESULT_OK, '', 2
),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_failed_prometheus_result(self, stats):
rule = 'check.values > 2'
plugin_results = [
PrometheusResult(
'check',
{(('__name__', 'metric1'),): [1, 2, 3]},
RESULT_OK, '', 2
),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected_result = {
'status': RESULT_FAILED,
'message': (
'Check failed for rule `check.values > 2` '
'which evaluates to `{((\'__name__\', \'metric1\'),): [3]}`'
),
'hysteresis': RESULT_OK
}
expected_meta = {
'failed_metrics': {"(('__name__', 'metric1'),)": [3]},
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
}
}
self.assertEqual(result['triggers'][0]['result'], expected_result)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
|
apache-2.0
|
UT-Austin-FIS/django-coverage
|
django_coverage/settings.py
|
6
|
4126
|
"""
Copyright 2009 55 Minutes (http://www.55minutes.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Changed by Mikhail Korobov.
"""
from django.conf import settings
# Specify the coverage test runner
COVERAGE_TEST_RUNNER = getattr(settings, 'COVERAGE_TEST_RUNNER',
'django_coverage.coverage_runner.CoverageRunner')
# Specify whether coverage data file is created or not.
COVERAGE_USE_CACHE = getattr(settings, 'COVERAGE_USE_CACHE', False)
# Specify regular expressions of code blocks the coverage analyzer should
# ignore as statements (e.g. ``raise NotImplemented``).
# These statements are not figured in as part of the coverage statistics.
# This setting is optional.
COVERAGE_CODE_EXCLUDES = getattr(settings, 'COVERAGE_CODE_EXCLUDES',[
'def __unicode__\(self\):',
'def get_absolute_url\(self\):',
'from .* import .*', 'import .*',
])
# Specify a list of regular expressions of paths to exclude from
# coverage analysis.
# Note these paths are ignored by the module introspection tool and take
# precedence over any package/module settings such as:
# TODO: THE SETTING FOR MODULES
# Use this to exclude subdirectories like ``r'.svn'``, for example.
# This setting is optional.
COVERAGE_PATH_EXCLUDES = getattr(settings, 'COVERAGE_PATH_EXCLUDES',
[r'.svn'])
# Specify a list of additional module paths to include
# in the coverage analysis. By default, only modules within installed
# apps are reported. If you have utility modules outside of the app
# structure, you can include them here.
# Note this list is *NOT* regular expression, so you have to be explicit,
# such as 'myproject.utils', and not 'utils$'.
# This setting is optional.
COVERAGE_ADDITIONAL_MODULES = getattr(settings, 'COVERAGE_ADDITIONAL_MODULES', [])
# Specify a list of regular expressions of module paths to exclude
# from the coverage analysis. Examples are ``'tests$'`` and ``'urls$'``.
# This setting is optional.
COVERAGE_MODULE_EXCLUDES = getattr(settings, 'COVERAGE_MODULE_EXCLUDES',
['tests$', 'settings$', 'urls$', 'locale$',
'common.views.test', '__init__', 'django',
'migrations'])
# Specify the directory where you would like the coverage report to create
# the HTML files.
# You'll need to make sure this directory exists and is writable by the
# user account running the test.
# You should probably set this one explicitly in your own settings file.
#COVERAGE_REPORT_HTML_OUTPUT_DIR = '/my_home/test_html'
COVERAGE_REPORT_HTML_OUTPUT_DIR = getattr(settings,
'COVERAGE_REPORT_HTML_OUTPUT_DIR',
None)
# True => html reports by 55minutes
# False => html reports by coverage.py
COVERAGE_CUSTOM_REPORTS = getattr(settings, 'COVERAGE_CUSTOM_REPORTS', True)
# True => Always output coverage reports to STDOUT.
# False => Don't output coverage reports to STDOUT.
# (not set) => output coverage reports to STDOUT only if COVERAGE_REPORT_HTML_OUTPUT_DIR is not set or None.
#
# This makes it possible to both generate HTML reports and see coverage
# information on STDOUT.
COVERAGE_USE_STDOUT = getattr(settings, 'COVERAGE_USE_STDOUT', COVERAGE_REPORT_HTML_OUTPUT_DIR is None)
# The name of the folder within utils/coverage_report/badges/ that
# contains the badges we want to use.
COVERAGE_BADGE_TYPE = getattr(settings, 'COVERAGE_BADGE_TYPE', 'drone.io')
|
apache-2.0
|
mollstam/UnrealPy
|
UnrealPyEmbed/Source/Python/Lib/python27/plat-mac/lib-scriptpackages/CodeWarrior/__init__.py
|
73
|
5499
|
"""
Package generated from /Volumes/Sap/Applications (Mac OS 9)/Metrowerks CodeWarrior 7.0/Metrowerks CodeWarrior/CodeWarrior IDE 4.2.5
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the CodeWarrior package is removed.", stacklevel=2)
import aetools
Error = aetools.Error
import CodeWarrior_suite
import Standard_Suite
import Metrowerks_Shell_Suite
import Required
_code_to_module = {
'CWIE' : CodeWarrior_suite,
'CoRe' : Standard_Suite,
'MMPR' : Metrowerks_Shell_Suite,
'reqd' : Required,
}
_code_to_fullname = {
'CWIE' : ('CodeWarrior.CodeWarrior_suite', 'CodeWarrior_suite'),
'CoRe' : ('CodeWarrior.Standard_Suite', 'Standard_Suite'),
'MMPR' : ('CodeWarrior.Metrowerks_Shell_Suite', 'Metrowerks_Shell_Suite'),
'reqd' : ('CodeWarrior.Required', 'Required'),
}
from CodeWarrior_suite import *
from Standard_Suite import *
from Metrowerks_Shell_Suite import *
from Required import *
def getbaseclasses(v):
if not getattr(v, '_propdict', None):
v._propdict = {}
v._elemdict = {}
for superclassname in getattr(v, '_superclassnames', []):
superclass = eval(superclassname)
getbaseclasses(superclass)
v._propdict.update(getattr(superclass, '_propdict', {}))
v._elemdict.update(getattr(superclass, '_elemdict', {}))
v._propdict.update(getattr(v, '_privpropdict', {}))
v._elemdict.update(getattr(v, '_privelemdict', {}))
import StdSuites
#
# Set property and element dictionaries now that all classes have been defined
#
getbaseclasses(character)
getbaseclasses(selection_2d_object)
getbaseclasses(application)
getbaseclasses(document)
getbaseclasses(text)
getbaseclasses(window)
getbaseclasses(file)
getbaseclasses(line)
getbaseclasses(insertion_point)
getbaseclasses(single_class_browser)
getbaseclasses(project_document)
getbaseclasses(symbol_browser)
getbaseclasses(editor_document)
getbaseclasses(file_compare_document)
getbaseclasses(class_browser)
getbaseclasses(subtarget)
getbaseclasses(message_document)
getbaseclasses(project_inspector)
getbaseclasses(text_document)
getbaseclasses(catalog_document)
getbaseclasses(class_hierarchy)
getbaseclasses(target)
getbaseclasses(build_progress_document)
getbaseclasses(target_file)
getbaseclasses(ToolServer_worksheet)
getbaseclasses(single_class_hierarchy)
getbaseclasses(File_Mapping)
getbaseclasses(browser_catalog)
getbaseclasses(Build_Settings)
getbaseclasses(ProjectFile)
getbaseclasses(VCS_Setup)
getbaseclasses(data_member)
getbaseclasses(Shielded_Folder)
getbaseclasses(Custom_Keywords)
getbaseclasses(Path_Information)
getbaseclasses(Segment)
getbaseclasses(Source_Tree)
getbaseclasses(Access_Paths)
getbaseclasses(Debugger_Windowing)
getbaseclasses(Relative_Path)
getbaseclasses(Environment_Variable)
getbaseclasses(base_class)
getbaseclasses(Debugger_Display)
getbaseclasses(Build_Extras)
getbaseclasses(Error_Information)
getbaseclasses(Editor)
getbaseclasses(Shielded_Folders)
getbaseclasses(Extras)
getbaseclasses(File_Mappings)
getbaseclasses(Function_Information)
getbaseclasses(Debugger_Target)
getbaseclasses(Syntax_Coloring)
getbaseclasses(class_)
getbaseclasses(Global_Source_Trees)
getbaseclasses(Target_Settings)
getbaseclasses(Debugger_Global)
getbaseclasses(member_function)
getbaseclasses(Runtime_Settings)
getbaseclasses(Plugin_Settings)
getbaseclasses(Browser_Coloring)
getbaseclasses(Font)
getbaseclasses(Target_Source_Trees)
#
# Indices of types declared in this module
#
_classdeclarations = {
'cha ' : character,
'csel' : selection_2d_object,
'capp' : application,
'docu' : document,
'ctxt' : text,
'cwin' : window,
'file' : file,
'clin' : line,
'cins' : insertion_point,
'1BRW' : single_class_browser,
'PRJD' : project_document,
'SYMB' : symbol_browser,
'EDIT' : editor_document,
'COMP' : file_compare_document,
'BROW' : class_browser,
'SBTG' : subtarget,
'MSSG' : message_document,
'INSP' : project_inspector,
'TXTD' : text_document,
'CTLG' : catalog_document,
'HIER' : class_hierarchy,
'TRGT' : target,
'PRGS' : build_progress_document,
'SRCF' : target_file,
'TOOL' : ToolServer_worksheet,
'1HIR' : single_class_hierarchy,
'FMap' : File_Mapping,
'Cata' : browser_catalog,
'BSTG' : Build_Settings,
'SrcF' : ProjectFile,
'VCSs' : VCS_Setup,
'DtMb' : data_member,
'SFit' : Shielded_Folder,
'CUKW' : Custom_Keywords,
'PInf' : Path_Information,
'Seg ' : Segment,
'SrcT' : Source_Tree,
'PATH' : Access_Paths,
'DbWN' : Debugger_Windowing,
'RlPt' : Relative_Path,
'EnvV' : Environment_Variable,
'BsCl' : base_class,
'DbDS' : Debugger_Display,
'LXTR' : Build_Extras,
'ErrM' : Error_Information,
'EDTR' : Editor,
'SHFL' : Shielded_Folders,
'GXTR' : Extras,
'FLMP' : File_Mappings,
'FDef' : Function_Information,
'DbTG' : Debugger_Target,
'SNTX' : Syntax_Coloring,
'Clas' : class_,
'GSTs' : Global_Source_Trees,
'TARG' : Target_Settings,
'DbGL' : Debugger_Global,
'MbFn' : member_function,
'RSTG' : Runtime_Settings,
'PSTG' : Plugin_Settings,
'BRKW' : Browser_Coloring,
'mFNT' : Font,
'TSTs' : Target_Source_Trees,
}
class CodeWarrior(CodeWarrior_suite_Events,
Standard_Suite_Events,
Metrowerks_Shell_Suite_Events,
Required_Events,
aetools.TalkTo):
_signature = 'CWIE'
_moduleName = 'CodeWarrior'
|
mit
|
Lyleo/OmniMarkupPreviewer
|
OmniMarkupLib/Renderers/libs/python3/genshi/input.py
|
2
|
16564
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support for constructing markup streams from files, strings, or other
sources.
"""
from itertools import chain
import codecs
import html.entities as entities
import html.parser as html
from xml.parsers import expat
from genshi.core import Attrs, QName, Stream, stripentities
from genshi.core import START, END, XML_DECL, DOCTYPE, TEXT, START_NS, \
END_NS, START_CDATA, END_CDATA, PI, COMMENT
from genshi.compat import StringIO, BytesIO
__all__ = ['ET', 'ParseError', 'XMLParser', 'XML', 'HTMLParser', 'HTML']
__docformat__ = 'restructuredtext en'
def ET(element):
"""Convert a given ElementTree element to a markup stream.
:param element: an ElementTree element
:return: a markup stream
"""
tag_name = QName(element.tag.lstrip('{'))
attrs = Attrs([(QName(attr.lstrip('{')), value)
for attr, value in list(element.items())])
yield START, (tag_name, attrs), (None, -1, -1)
if element.text:
yield TEXT, element.text, (None, -1, -1)
for child in element.getchildren():
for item in ET(child):
yield item
yield END, tag_name, (None, -1, -1)
if element.tail:
yield TEXT, element.tail, (None, -1, -1)
class ParseError(Exception):
"""Exception raised when fatal syntax errors are found in the input being
parsed.
"""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
"""Exception initializer.
:param message: the error message from the parser
:param filename: the path to the file that was parsed
:param lineno: the number of the line on which the error was encountered
:param offset: the column number where the error was encountered
"""
self.msg = message
if filename:
message += ', in ' + filename
Exception.__init__(self, message)
self.filename = filename or '<string>'
self.lineno = lineno
self.offset = offset
class XMLParser(object):
"""Generator-based XML parser based on roughly equivalent code in
Kid/ElementTree.
The parsing is initiated by iterating over the parser object:
>>> parser = XMLParser(StringIO('<root id="2"><child>Foo</child></root>'))
>>> for kind, data, pos in parser:
... print(('%s %s' % (kind, data)))
START (QName('root'), Attrs([(QName('id'), '2')]))
START (QName('child'), Attrs())
TEXT Foo
END child
END root
"""
_entitydefs = ['<!ENTITY %s "&#%d;">' % (name, value) for name, value in
list(entities.name2codepoint.items())]
_external_dtd = '\n'.join(_entitydefs).encode('utf-8')
def __init__(self, source, filename=None, encoding=None):
"""Initialize the parser for the given XML input.
:param source: the XML text as a file-like object
:param filename: the name of the file, if appropriate
:param encoding: the encoding of the file; if not specified, the
encoding is assumed to be ASCII, UTF-8, or UTF-16, or
whatever the encoding specified in the XML declaration
(if any)
"""
self.source = source
self.filename = filename
# Setup the Expat parser
parser = expat.ParserCreate(encoding, '}')
parser.buffer_text = True
# Python 3 does not have returns_unicode
if hasattr(parser, 'returns_unicode'):
parser.returns_unicode = True
parser.ordered_attributes = True
parser.StartElementHandler = self._handle_start
parser.EndElementHandler = self._handle_end
parser.CharacterDataHandler = self._handle_data
parser.StartDoctypeDeclHandler = self._handle_doctype
parser.StartNamespaceDeclHandler = self._handle_start_ns
parser.EndNamespaceDeclHandler = self._handle_end_ns
parser.StartCdataSectionHandler = self._handle_start_cdata
parser.EndCdataSectionHandler = self._handle_end_cdata
parser.ProcessingInstructionHandler = self._handle_pi
parser.XmlDeclHandler = self._handle_xml_decl
parser.CommentHandler = self._handle_comment
# Tell Expat that we'll handle non-XML entities ourselves
# (in _handle_other)
parser.DefaultHandler = self._handle_other
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.UseForeignDTD()
parser.ExternalEntityRefHandler = self._build_foreign
self.expat = parser
self._queue = []
def parse(self):
"""Generator that parses the XML source, yielding markup events.
:return: a markup event stream
:raises ParseError: if the XML text is not well formed
"""
def _generate():
try:
bufsize = 4 * 1024 # 4K
done = False
while 1:
while not done and len(self._queue) == 0:
data = self.source.read(bufsize)
if not data: # end of data
if hasattr(self, 'expat'):
self.expat.Parse('', True)
del self.expat # get rid of circular references
done = True
else:
if isinstance(data, str):
data = data.encode('utf-8')
self.expat.Parse(data, False)
for event in self._queue:
yield event
self._queue = []
if done:
break
except expat.ExpatError as e:
msg = str(e)
raise ParseError(msg, self.filename, e.lineno, e.offset)
return Stream(_generate()).filter(_coalesce)
def __iter__(self):
return iter(self.parse())
def _build_foreign(self, context, base, sysid, pubid):
parser = self.expat.ExternalEntityParserCreate(context)
parser.ParseFile(BytesIO(self._external_dtd))
return 1
def _enqueue(self, kind, data=None, pos=None):
if pos is None:
pos = self._getpos()
if kind is TEXT:
# Expat reports the *end* of the text event as current position. We
# try to fix that up here as much as possible. Unfortunately, the
# offset is only valid for single-line text. For multi-line text,
# it is apparently not possible to determine at what offset it
# started
if '\n' in data:
lines = data.splitlines()
lineno = pos[1] - len(lines) + 1
offset = -1
else:
lineno = pos[1]
offset = pos[2] - len(data)
pos = (pos[0], lineno, offset)
self._queue.append((kind, data, pos))
def _getpos_unknown(self):
return (self.filename, -1, -1)
def _getpos(self):
return (self.filename, self.expat.CurrentLineNumber,
self.expat.CurrentColumnNumber)
def _handle_start(self, tag, attrib):
attrs = Attrs([(QName(name), value) for name, value in
zip(*[iter(attrib)] * 2)])
self._enqueue(START, (QName(tag), attrs))
def _handle_end(self, tag):
self._enqueue(END, QName(tag))
def _handle_data(self, text):
self._enqueue(TEXT, text)
def _handle_xml_decl(self, version, encoding, standalone):
self._enqueue(XML_DECL, (version, encoding, standalone))
def _handle_doctype(self, name, sysid, pubid, has_internal_subset):
self._enqueue(DOCTYPE, (name, pubid, sysid))
def _handle_start_ns(self, prefix, uri):
self._enqueue(START_NS, (prefix or '', uri))
def _handle_end_ns(self, prefix):
self._enqueue(END_NS, prefix or '')
def _handle_start_cdata(self):
self._enqueue(START_CDATA)
def _handle_end_cdata(self):
self._enqueue(END_CDATA)
def _handle_pi(self, target, data):
self._enqueue(PI, (target, data))
def _handle_comment(self, text):
self._enqueue(COMMENT, text)
def _handle_other(self, text):
if text.startswith('&'):
# deal with undefined entities
try:
text = chr(entities.name2codepoint[text[1:-1]])
self._enqueue(TEXT, text)
except KeyError:
filename, lineno, offset = self._getpos()
error = expat.error('undefined entity "%s": line %d, column %d'
% (text, lineno, offset))
error.code = expat.errors.XML_ERROR_UNDEFINED_ENTITY
error.lineno = lineno
error.offset = offset
raise error
def XML(text):
"""Parse the given XML source and return a markup stream.
Unlike with `XMLParser`, the returned stream is reusable, meaning it can be
iterated over multiple times:
>>> xml = XML('<doc><elem>Foo</elem><elem>Bar</elem></doc>')
>>> print(xml)
<doc><elem>Foo</elem><elem>Bar</elem></doc>
>>> print((xml.select('elem')))
<elem>Foo</elem><elem>Bar</elem>
>>> print((xml.select('elem/text()')))
FooBar
:param text: the XML source
:return: the parsed XML event stream
:raises ParseError: if the XML text is not well-formed
"""
return Stream(list(XMLParser(StringIO(text))))
class HTMLParser(html.HTMLParser, object):
"""Parser for HTML input based on the Python `HTMLParser` module.
This class provides the same interface for generating stream events as
`XMLParser`, and attempts to automatically balance tags.
The parsing is initiated by iterating over the parser object:
>>> parser = HTMLParser(BytesIO('<UL compact><LI>Foo</UL>'.encode('utf-8')), encoding='utf-8')
>>> for kind, data, pos in parser:
... print(('%s %s' % (kind, data)))
START (QName('ul'), Attrs([(QName('compact'), 'compact')]))
START (QName('li'), Attrs())
TEXT Foo
END li
END ul
"""
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
def __init__(self, source, filename=None, encoding=None):
"""Initialize the parser for the given HTML input.
:param source: the HTML text as a file-like object
:param filename: the name of the file, if known
:param filename: encoding of the file; ignored if the input is unicode
"""
html.HTMLParser.__init__(self)
self.source = source
self.filename = filename
self.encoding = encoding
self._queue = []
self._open_tags = []
def parse(self):
"""Generator that parses the HTML source, yielding markup events.
:return: a markup event stream
:raises ParseError: if the HTML text is not well formed
"""
def _generate():
if self.encoding:
reader = codecs.getreader(self.encoding)
source = reader(self.source)
else:
source = self.source
try:
bufsize = 4 * 1024 # 4K
done = False
while 1:
while not done and len(self._queue) == 0:
data = source.read(bufsize)
if not data: # end of data
self.close()
done = True
else:
if not isinstance(data, str):
raise UnicodeError("source returned bytes, but no encoding specified")
self.feed(data)
for kind, data, pos in self._queue:
yield kind, data, pos
self._queue = []
if done:
open_tags = self._open_tags
open_tags.reverse()
for tag in open_tags:
yield END, QName(tag), pos
break
except html.HTMLParseError as e:
msg = '%s: line %d, column %d' % (e.msg, e.lineno, e.offset)
raise ParseError(msg, self.filename, e.lineno, e.offset)
return Stream(_generate()).filter(_coalesce)
def __iter__(self):
return iter(self.parse())
def _enqueue(self, kind, data, pos=None):
if pos is None:
pos = self._getpos()
self._queue.append((kind, data, pos))
def _getpos(self):
lineno, column = self.getpos()
return (self.filename, lineno, column)
def handle_starttag(self, tag, attrib):
fixed_attrib = []
for name, value in attrib: # Fixup minimized attributes
if value is None:
value = name
fixed_attrib.append((QName(name), stripentities(value)))
self._enqueue(START, (QName(tag), Attrs(fixed_attrib)))
if tag in self._EMPTY_ELEMS:
self._enqueue(END, QName(tag))
else:
self._open_tags.append(tag)
def handle_endtag(self, tag):
if tag not in self._EMPTY_ELEMS:
while self._open_tags:
open_tag = self._open_tags.pop()
self._enqueue(END, QName(open_tag))
if open_tag.lower() == tag.lower():
break
def handle_data(self, text):
self._enqueue(TEXT, text)
def handle_charref(self, name):
if name.lower().startswith('x'):
text = chr(int(name[1:], 16))
else:
text = chr(int(name))
self._enqueue(TEXT, text)
def handle_entityref(self, name):
try:
text = chr(entities.name2codepoint[name])
except KeyError:
text = '&%s;' % name
self._enqueue(TEXT, text)
def handle_pi(self, data):
if data.endswith('?'):
data = data[:-1]
try:
target, data = data.split(None, 1)
except ValueError:
# PI with no data
target = data
data = ''
self._enqueue(PI, (target.strip(), data.strip()))
def handle_comment(self, text):
self._enqueue(COMMENT, text)
def HTML(text, encoding=None):
"""Parse the given HTML source and return a markup stream.
Unlike with `HTMLParser`, the returned stream is reusable, meaning it can be
iterated over multiple times:
>>> html = HTML('<body><h1>Foo</h1></body>', encoding='utf-8')
>>> print(html)
<body><h1>Foo</h1></body>
>>> print((html.select('h1')))
<h1>Foo</h1>
>>> print((html.select('h1/text()')))
Foo
:param text: the HTML source
:return: the parsed XML event stream
:raises ParseError: if the HTML text is not well-formed, and error recovery
fails
"""
if isinstance(text, str):
# If it's unicode text the encoding should be set to None.
# The option to pass in an incorrect encoding is for ease
# of writing doctests that work in both Python 2.x and 3.x.
return Stream(list(HTMLParser(StringIO(text), encoding=None)))
return Stream(list(HTMLParser(BytesIO(text), encoding=encoding)))
def _coalesce(stream):
"""Coalesces adjacent TEXT events into a single event."""
textbuf = []
textpos = None
for kind, data, pos in chain(stream, [(None, None, None)]):
if kind is TEXT:
textbuf.append(data)
if textpos is None:
textpos = pos
else:
if textbuf:
yield TEXT, ''.join(textbuf), textpos
del textbuf[:]
textpos = None
if kind:
yield kind, data, pos
|
mit
|
vladsaveliev/bcbio-nextgen
|
bcbio/pipeline/main.py
|
1
|
27548
|
"""Main entry point for distributed next-gen sequencing pipelines.
Handles running the full pipeline based on instructions
"""
from __future__ import print_function
from collections import defaultdict
import copy
import os
import sys
import resource
import tempfile
import toolz as tz
from bcbio import log, heterogeneity, hla, structural, utils
from bcbio.cwl.inspect import initialize_watcher
from bcbio.distributed import prun
from bcbio.distributed.transaction import tx_tmpdir
from bcbio.log import logger, DEFAULT_LOG_DIR
from bcbio.ngsalign import alignprep
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import (archive, config_utils, disambiguate, region,
run_info, qcsummary, rnaseq)
from bcbio.provenance import profile, system
from bcbio.variation import (ensemble, genotype, population, validate, joint,
peddy)
from bcbio.chipseq import peaks, atac
def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None,
parallel=None, workflow=None):
"""Run variant analysis, handling command line options.
"""
# Set environment to standard to use periods for decimals and avoid localization
locale_to_use = utils.get_locale()
os.environ["LC_ALL"] = locale_to_use
os.environ["LC"] = locale_to_use
os.environ["LANG"] = locale_to_use
workdir = utils.safe_makedir(os.path.abspath(workdir))
os.chdir(workdir)
config, config_file = config_utils.load_system_config(config_file, workdir)
parallel = log.create_base_logger(config, parallel)
log.setup_local_logging(config, parallel)
logger.info(f"System YAML configuration: {os.path.abspath(config_file)}.")
logger.info(f"Locale set to {locale_to_use}.")
if config.get("log_dir", None) is None:
config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR)
if parallel["type"] in ["local", "clusterk"]:
_setup_resources()
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
elif parallel["type"] == "ipython":
assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)"
if parallel["scheduler"] != "sge":
assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)"
elif not parallel["queue"]:
parallel["queue"] = ""
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
else:
raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
def _setup_resources():
"""Attempt to increase resource limits up to hard limits.
This allows us to avoid out of file handle limits where we can
move beyond the soft limit up to the hard limit.
"""
target_procs = 10240
cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC)
target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc))
cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE)
target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
def _run_toplevel(config, config_file, work_dir, parallel,
fc_dir=None, run_info_yaml=None):
"""
Run toplevel analysis, processing a set of input files.
config_file -- Main YAML configuration file with system parameters
fc_dir -- Directory of fastq files to process
run_info_yaml -- YAML configuration file specifying inputs to process
"""
dirs = run_info.setup_directories(work_dir, fc_dir, config, config_file)
config_file = os.path.join(dirs["config"], os.path.basename(config_file))
pipelines, config = _pair_samples_with_pipelines(run_info_yaml, config)
system.write_info(dirs, parallel, config)
with tx_tmpdir(config if parallel.get("type") == "local" else None) as tmpdir:
tempfile.tempdir = tmpdir
for pipeline, samples in pipelines.items():
for xs in pipeline(config, run_info_yaml, parallel, dirs, samples):
pass
# ## Generic pipeline framework
def _wres(parallel, progs, fresources=None, ensure_mem=None):
"""Add resource information to the parallel environment on required programs and files.
Enables spinning up required machines and operating in non-shared filesystem
environments.
progs -- Third party tools used in processing
fresources -- Required file-based resources needed. These will be transferred on non-shared
filesystems.
ensure_mem -- Dictionary of required minimum memory for programs used. Ensures
enough memory gets allocated on low-core machines.
"""
parallel = copy.deepcopy(parallel)
parallel["progs"] = progs
if fresources:
parallel["fresources"] = fresources
if ensure_mem:
parallel["ensure_mem"] = ensure_mem
return parallel
def variant2pipeline(config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
# Assign GATK supplied memory if required for post-process recalibration
align_programs = ["aligner", "samtools", "sambamba"]
if any(tz.get_in(["algorithm", "recalibrate"], utils.to_single_data(d)) in [True, "gatk"] for d in samples):
align_programs.append("gatk")
with prun.start(_wres(parallel, align_programs,
(["reference", "fasta"], ["reference", "aligner"], ["files"])),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment preparation", dirs):
samples = run_parallel("prep_align_inputs", samples)
samples = run_parallel("disambiguate_split", [samples])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
samples = disambiguate.resolve(samples, run_parallel)
samples = alignprep.merge_split_alignments(samples, run_parallel)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = run_parallel("calculate_sv_bins", [samples])
samples = run_parallel("calculate_sv_coverage", samples)
samples = run_parallel("normalize_sv_coverage", [samples])
samples = region.clean_sample_data(samples)
with profile.report("hla typing", dirs):
samples = hla.run(samples, run_parallel)
## Variant calling on sub-regions of the input file (full cluster)
with prun.start(_wres(parallel, ["gatk", "picard", "variantcaller"]),
samples, config, dirs, "full",
multiplier=region.get_max_counts(samples), max_multicore=1) as run_parallel:
with profile.report("alignment post-processing", dirs):
samples = region.parallel_prep_region(samples, run_parallel)
with profile.report("variant calling", dirs):
samples = genotype.parallel_variantcall_region(samples, run_parallel)
STEP(samples)
with profile.report("joint squaring off/backfilling", dirs):
samples = joint.square_off(samples, run_parallel)
## Finalize variants, BAMs and population databases (per-sample multicore cluster)
with prun.start(_wres(parallel, ["gatk", "gatk-vqsr", "snpeff", "bcbio_variation",
"gemini", "samtools", "fastqc", "sambamba",
"bcbio-variation-recall", "qsignature",
"svcaller", "kraken", "preseq"]),
samples, config, dirs, "multicore2",
multiplier=structural.parallel_multiplier(samples)) as run_parallel:
with profile.report("variant post-processing", dirs):
samples = run_parallel("postprocess_variants", samples)
STEP(samples)
samples = run_parallel("split_variants_by_sample", samples)
with profile.report("prepped BAM merging", dirs):
samples = region.delayed_bamprep_merge(samples, run_parallel)
with profile.report("validation", dirs):
samples = run_parallel("compare_to_rm", samples)
samples = genotype.combine_multiple_callers(samples)
with profile.report("ensemble calling", dirs):
samples = ensemble.combine_calls_parallel(samples, run_parallel)
STEP(samples)
with profile.report("validation summary", dirs):
samples = validate.summarize_grading(samples)
with profile.report("structural variation", dirs):
samples = structural.run(samples, run_parallel, "initial")
STEP(samples)
with profile.report("structural variation", dirs):
samples = structural.run(samples, run_parallel, "standard")
STEP(samples)
with profile.report("structural variation ensemble", dirs):
samples = structural.run(samples, run_parallel, "ensemble")
STEP(samples)
with profile.report("structural variation validation", dirs):
samples = run_parallel("validate_sv", samples)
STEP(samples)
with profile.report("heterogeneity", dirs):
samples = heterogeneity.run(samples, run_parallel)
STEP(samples)
with profile.report("population database", dirs):
samples = population.prep_db_parallel(samples, run_parallel)
STEP(samples)
with profile.report("peddy check", dirs):
samples = peddy.run_peddy_parallel(samples, run_parallel)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
STEP(samples)
with profile.report("archive", dirs):
samples = archive.compress(samples, run_parallel)
STEP(samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def _debug_samples(i, samples):
print("---", i, len(samples))
for sample in (utils.to_single_data(x) for x in samples):
print(" ", sample["description"], sample.get("region"), \
utils.get_in(sample, ("config", "algorithm", "variantcaller")), \
utils.get_in(sample, ("config", "algorithm", "jointcaller")), \
utils.get_in(sample, ("metadata", "batch")), \
[x.get("variantcaller") for x in sample.get("variants", [])], \
sample.get("work_bam"), \
sample.get("vrn_file"))
def STEP(samples, title=None):
if title:
print(title + ':')
import pprint
pprint.pprint([(s[0]['description'], s[0]['metadata']['phenotype'], str(s[0]['config']['algorithm']['qc'])) for s in samples])
def standardpipeline(config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
with prun.start(_wres(parallel, ["aligner", "samtools", "sambamba"]),
samples, config, dirs, "multicore") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = region.clean_sample_data(samples)
## Quality control
with prun.start(_wres(parallel, ["fastqc", "qsignature", "kraken", "gatk", "samtools", "preseq"]),
samples, config, dirs, "multicore2") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def rnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"tophat": 10, "tophat2": 10, "star": 2, "hisat2": 8}),
samples, config, dirs, "alignment",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["samtools", "cufflinks"]),
samples, config, dirs, "rnaseqcount") as run_parallel:
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
with profile.report("transcript assembly", dirs):
samples = rnaseq.assemble_transcripts(run_parallel, samples)
with profile.report("estimate expression (threaded)", dirs):
samples = rnaseq.quantitate_expression_parallel(samples, run_parallel)
with prun.start(_wres(parallel, ["dexseq", "express"]), samples, config,
dirs, "rnaseqcount-singlethread", max_multicore=1) as run_parallel:
with profile.report("estimate expression (single threaded)", dirs):
samples = rnaseq.quantitate_expression_noparallel(samples, run_parallel)
samples = rnaseq.combine_files(samples)
with prun.start(_wres(parallel, ["gatk", "vardict"]), samples, config,
dirs, "rnaseq-variation") as run_parallel:
with profile.report("RNA-seq variant calling", dirs):
samples = rnaseq.rnaseq_variant_calling(samples, run_parallel)
with prun.start(_wres(parallel, ["samtools", "fastqc", "qualimap",
"kraken", "gatk", "preseq"], ensure_mem={"qualimap": 4}),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
with profile.report("bcbioRNAseq loading", dirs):
tools_on = dd.get_in_samples(samples, dd.get_tools_on)
bcbiornaseq_on = tools_on and "bcbiornaseq" in tools_on
if bcbiornaseq_on and len(samples) < 3:
logger.warn("bcbioRNASeq needs at least three samples total, skipping.")
else:
run_parallel("run_bcbiornaseqload", [sample])
logger.info("Timing: finished")
return samples
def fastrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
ww = initialize_watcher(samples)
with prun.start(_wres(parallel, ["samtools"]), samples, config,
dirs, "fastrnaseq") as run_parallel:
with profile.report("fastrnaseq", dirs):
samples = rnaseq.fast_rnaseq(samples, run_parallel)
ww.report("fastrnaseq", samples)
samples = rnaseq.combine_files(samples)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
ww.report("qcsummary", samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for samples in samples:
run_parallel("upload_samples_project", [samples])
logger.info("Timing: finished")
return samples
def singlecellrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["samtools", "rapmap"]), samples, config,
dirs, "singlecell-rnaseq") as run_parallel:
with profile.report("singlecell-rnaseq", dirs):
samples = rnaseq.singlecell_rnaseq(samples, run_parallel)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for samples in samples:
run_parallel("upload_samples_project", [samples])
logger.info("Timing: finished")
return samples
def smallrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
# causes a circular import at the top level
from bcbio.srna.group import report as srna_report
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"bowtie": 8, "bowtie2": 8, "star": 2}),
[samples[0]], config, dirs, "alignment") as run_parallel:
with profile.report("prepare", dirs):
samples = run_parallel("seqcluster_prepare", [samples])
with profile.report("seqcluster alignment", dirs):
samples = run_parallel("srna_alignment", [samples])
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"tophat": 10, "tophat2": 10, "star": 2, "hisat2": 8}),
samples, config, dirs, "alignment_samples",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["picard", "miraligner"]),
samples, config, dirs, "annotation") as run_parallel:
with profile.report("small RNA annotation", dirs):
samples = run_parallel("srna_annotation", samples)
with prun.start(_wres(parallel, ["seqcluster", "mirge"],
ensure_mem={"seqcluster": 8}),
[samples[0]], config, dirs, "cluster") as run_parallel:
with profile.report("cluster", dirs):
samples = run_parallel("seqcluster_cluster", [samples])
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("report", dirs):
srna_report(samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
return samples
def chipseqpipeline(config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["aligner", "picard"]),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment", dirs):
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_sample", samples)
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
samples = run_parallel("clean_chipseq_alignment", samples)
with prun.start(_wres(parallel, ["peakcaller"]),
samples, config, dirs, "peakcalling",
multiplier = peaks._get_multiplier(samples)) as run_parallel:
with profile.report("peakcalling", dirs):
samples = peaks.peakcall_prepare(samples, run_parallel)
samples = peaks.call_consensus(samples)
samples = run_parallel("run_chipseq_count", samples)
samples = peaks.create_peaktable(samples)
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
samples = atac.create_ataqv_report(samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def wgbsseqpipeline(config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["fastqc", "picard"], ensure_mem={"fastqc" : 4}),
samples, config, dirs, "trimming") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_bs_sample", samples)
with prun.start(_wres(parallel, ["aligner", "bismark", "picard", "samtools"]),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ['samtools']), samples, config, dirs,
'deduplication') as run_parallel:
with profile.report('deduplicate', dirs):
samples = run_parallel('deduplicate_bismark', samples)
with prun.start(_wres(parallel, ["caller"], ensure_mem={"caller": 5}),
samples, config, dirs, "multicore2",
multiplier=24) as run_parallel:
with profile.report("cpg calling", dirs):
samples = run_parallel("cpg_calling", samples)
# with prun.start(_wres(parallel, ["picard", "fastqc", "samtools"]),
# samples, config, dirs, "qc") as run_parallel:
# with profile.report("quality control", dirs):
# samples = qcsummary.generate_parallel(samples, run_parallel)
return samples
def rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples):
"""
organizes RNA-seq and small-RNAseq samples, converting from BAM if
necessary and trimming if necessary
"""
pipeline = dd.get_in_samples(samples, dd.get_analysis)
trim_reads_set = any([tz.get_in(["algorithm", "trim_reads"], d) for d in dd.sample_data_iterator(samples)])
resources = ["picard"]
needs_trimming = (_is_smallrnaseq(pipeline) or trim_reads_set)
if needs_trimming:
resources.append("atropos")
with prun.start(_wres(parallel, resources),
samples, config, dirs, "trimming",
max_multicore=1 if not needs_trimming else None) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
samples = run_parallel("prepare_sample", samples)
if needs_trimming:
with profile.report("adapter trimming", dirs):
if _is_smallrnaseq(pipeline):
samples = run_parallel("trim_srna_sample", samples)
else:
samples = run_parallel("trim_sample", samples)
return samples
def _get_pipeline(item):
from bcbio.log import logger
analysis_type = item.get("analysis", "").lower()
if analysis_type not in SUPPORTED_PIPELINES:
logger.error("Cannot determine which type of analysis to run, "
"set in the run_info under details.")
sys.exit(1)
else:
return SUPPORTED_PIPELINES[analysis_type]
def _pair_samples_with_pipelines(run_info_yaml, config):
"""Map samples defined in input file to pipelines to run.
"""
samples = config_utils.load_config(run_info_yaml)
if isinstance(samples, dict):
resources = samples.pop("resources")
samples = samples["details"]
else:
resources = {}
ready_samples = []
for sample in samples:
if "files" in sample:
del sample["files"]
# add any resources to this item to recalculate global configuration
usample = copy.deepcopy(sample)
usample.pop("algorithm", None)
if "resources" not in usample:
usample["resources"] = {}
for prog, pkvs in resources.items():
if prog not in usample["resources"]:
usample["resources"][prog] = {}
if pkvs is not None:
for key, val in pkvs.items():
usample["resources"][prog][key] = val
config = config_utils.update_w_custom(config, usample)
sample["resources"] = {}
ready_samples.append(sample)
paired = [(x, _get_pipeline(x)) for x in ready_samples]
d = defaultdict(list)
for x in paired:
d[x[1]].append([x[0]])
return d, config
SUPPORTED_PIPELINES = {"variant2": variant2pipeline,
"snp calling": variant2pipeline,
"variant": variant2pipeline,
"standard": standardpipeline,
"minimal": standardpipeline,
"rna-seq": rnaseqpipeline,
"smallrna-seq": smallrnaseqpipeline,
"chip-seq": chipseqpipeline,
"wgbs-seq": wgbsseqpipeline,
"fastrna-seq": fastrnaseqpipeline,
"scrna-seq": singlecellrnaseqpipeline}
def _is_smallrnaseq(pipeline):
return pipeline.lower() == "smallrna-seq"
|
mit
|
justinvforvendetta/electrum-pkb
|
lib/asn1tinydecoder.py
|
15
|
3835
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# This is a simple and fast ASN1 decoder without external libraries.
#
# In order to browse through the ASN1 structure you need only 3
# functions allowing you to navigate:
# asn1_node_root(...), asn1_node_next(...) and asn1_node_first_child(...)
#
####################### BEGIN ASN1 DECODER ############################
# Author: Jens Getreu, 8.11.2014
##### NAVIGATE
# The following 4 functions are all you need to parse an ASN1 structure
# gets the first ASN1 structure in der
def asn1_node_root(der):
return asn1_read_length(der,0)
# gets the next ASN1 structure following (ixs,ixf,ixl)
def asn1_node_next(der, (ixs,ixf,ixl)):
return asn1_read_length(der,ixl+1)
# opens the container (ixs,ixf,ixl) and returns the first ASN1 inside
def asn1_node_first_child(der, (ixs,ixf,ixl)):
if ord(der[ixs]) & 0x20 != 0x20:
raise ValueError('Error: can only open constructed types. '
+'Found type: 0x'+der[ixs].encode("hex"))
return asn1_read_length(der,ixf)
# is true if one ASN1 chunk is inside another chunk.
def asn1_node_is_child_of((ixs,ixf,ixl), (jxs,jxf,jxl)):
return ( (ixf <= jxs ) and (jxl <= ixl) ) or \
( (jxf <= ixs ) and (ixl <= jxl) )
##### END NAVIGATE
##### ACCESS PRIMITIVES
# get content and verify type byte
def asn1_get_value_of_type(der,(ixs,ixf,ixl),asn1_type):
asn1_type_table = {
'BOOLEAN': 0x01, 'INTEGER': 0x02,
'BIT STRING': 0x03, 'OCTET STRING': 0x04,
'NULL': 0x05, 'OBJECT IDENTIFIER': 0x06,
'SEQUENCE': 0x70, 'SET': 0x71,
'PrintableString': 0x13, 'IA5String': 0x16,
'UTCTime': 0x17, 'ENUMERATED': 0x0A,
'UTF8String': 0x0C, 'PrintableString': 0x13,
}
if asn1_type_table[asn1_type] != ord(der[ixs]):
raise ValueError('Error: Expected type was: '+
hex(asn1_type_table[asn1_type])+
' Found: 0x'+der[ixs].encode('hex'))
return der[ixf:ixl+1]
# get value
def asn1_get_value(der,(ixs,ixf,ixl)):
return der[ixf:ixl+1]
# get type+length+value
def asn1_get_all(der,(ixs,ixf,ixl)):
return der[ixs:ixl+1]
##### END ACCESS PRIMITIVES
##### HELPER FUNCTIONS
# converter
def bitstr_to_bytestr(bitstr):
if bitstr[0] != '\x00':
raise ValueError('Error: only 00 padded bitstr can be converted to bytestr!')
return bitstr[1:]
# converter
def bytestr_to_int(s):
# converts bytestring to integer
i = 0
for char in s:
i <<= 8
i |= ord(char)
return i
# ix points to the first byte of the asn1 structure
# Returns first byte pointer, first content byte pointer and last.
def asn1_read_length(der,ix):
first= ord(der[ix+1])
if (ord(der[ix+1]) & 0x80) == 0:
length = first
ix_first_content_byte = ix+2
ix_last_content_byte = ix_first_content_byte + length -1
else:
lengthbytes = first & 0x7F
length = bytestr_to_int(der[ix+2:ix+2+lengthbytes])
ix_first_content_byte = ix+2+lengthbytes
ix_last_content_byte = ix_first_content_byte + length -1
return (ix,ix_first_content_byte,ix_last_content_byte)
##### END HELPER FUNCTIONS
####################### END ASN1 DECODER ############################
|
gpl-3.0
|
p4datasystems/CarnotKEdist
|
dist/Lib/test/test_cmath.py
|
10
|
19937
|
from test.test_support import run_unittest, verbose
from test.test_math import parse_testfile, test_file
import unittest
import cmath, math
from cmath import phase, polar, rect, pi
INF = float('inf')
NAN = float('nan')
complex_zeros = [complex(x, y) for x in [0.0, -0.0] for y in [0.0, -0.0]]
complex_infinities = [complex(x, y) for x, y in [
(INF, 0.0), # 1st quadrant
(INF, 2.3),
(INF, INF),
(2.3, INF),
(0.0, INF),
(-0.0, INF), # 2nd quadrant
(-2.3, INF),
(-INF, INF),
(-INF, 2.3),
(-INF, 0.0),
(-INF, -0.0), # 3rd quadrant
(-INF, -2.3),
(-INF, -INF),
(-2.3, -INF),
(-0.0, -INF),
(0.0, -INF), # 4th quadrant
(2.3, -INF),
(INF, -INF),
(INF, -2.3),
(INF, -0.0)
]]
complex_nans = [complex(x, y) for x, y in [
(NAN, -INF),
(NAN, -2.3),
(NAN, -0.0),
(NAN, 0.0),
(NAN, 2.3),
(NAN, INF),
(-INF, NAN),
(-2.3, NAN),
(-0.0, NAN),
(0.0, NAN),
(2.3, NAN),
(INF, NAN)
]]
class CMathTests(unittest.TestCase):
# list of all functions in cmath
test_functions = [getattr(cmath, fname) for fname in [
'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh',
'cos', 'cosh', 'exp', 'log', 'log10', 'sin', 'sinh',
'sqrt', 'tan', 'tanh']]
# test first and second arguments independently for 2-argument log
test_functions.append(lambda x : cmath.log(x, 1729. + 0j))
test_functions.append(lambda x : cmath.log(14.-27j, x))
def setUp(self):
self.test_values = open(test_file)
def tearDown(self):
self.test_values.close()
def rAssertAlmostEqual(self, a, b, rel_err = 2e-15, abs_err = 5e-323,
msg=None):
"""Fail if the two floating-point numbers are not almost equal.
Determine whether floating-point values a and b are equal to within
a (small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps.
"""
# special values testing
if math.isnan(a):
if math.isnan(b):
return
self.fail(msg or '{!r} should be nan'.format(b))
if math.isinf(a):
if a == b:
return
self.fail(msg or 'finite result where infinity expected: '
'expected {!r}, got {!r}'.format(a, b))
# if both a and b are zero, check whether they have the same sign
# (in theory there are examples where it would be legitimate for a
# and b to have opposite signs; in practice these hardly ever
# occur).
if not a and not b:
if math.copysign(1., a) != math.copysign(1., b):
self.fail(msg or 'zero has wrong sign: expected {!r}, '
'got {!r}'.format(a, b))
# if a-b overflows, or b is infinite, return False. Again, in
# theory there are examples where a is within a few ulps of the
# max representable float, and then b could legitimately be
# infinite. In practice these examples are rare.
try:
absolute_error = abs(b-a)
except OverflowError:
pass
else:
# test passes if either the absolute error or the relative
# error is sufficiently small. The defaults amount to an
# error of between 9 ulps and 19 ulps on an IEEE-754 compliant
# machine.
if absolute_error <= max(abs_err, rel_err * abs(a)):
return
self.fail(msg or
'{!r} and {!r} are not sufficiently close'.format(a, b))
def test_constants(self):
e_expected = 2.71828182845904523536
pi_expected = 3.14159265358979323846
self.assertAlmostEqual(cmath.pi, pi_expected, places=9,
msg="cmath.pi is {}; should be {}".format(cmath.pi, pi_expected))
self.assertAlmostEqual(cmath.e, e_expected, places=9,
msg="cmath.e is {}; should be {}".format(cmath.e, e_expected))
def test_user_object(self):
# Test automatic calling of __complex__ and __float__ by cmath
# functions
# some random values to use as test values; we avoid values
# for which any of the functions in cmath is undefined
# (i.e. 0., 1., -1., 1j, -1j) or would cause overflow
cx_arg = 4.419414439 + 1.497100113j
flt_arg = -6.131677725
# a variety of non-complex numbers, used to check that
# non-complex return values from __complex__ give an error
non_complexes = ["not complex", 1, 5L, 2., None,
object(), NotImplemented]
# Now we introduce a variety of classes whose instances might
# end up being passed to the cmath functions
# usual case: new-style class implementing __complex__
class MyComplex(object):
def __init__(self, value):
self.value = value
def __complex__(self):
return self.value
# old-style class implementing __complex__
class MyComplexOS:
def __init__(self, value):
self.value = value
def __complex__(self):
return self.value
# classes for which __complex__ raises an exception
class SomeException(Exception):
pass
class MyComplexException(object):
def __complex__(self):
raise SomeException
class MyComplexExceptionOS:
def __complex__(self):
raise SomeException
# some classes not providing __float__ or __complex__
class NeitherComplexNorFloat(object):
pass
class NeitherComplexNorFloatOS:
pass
class MyInt(object):
def __int__(self): return 2
def __long__(self): return 2L
def __index__(self): return 2
class MyIntOS:
def __int__(self): return 2
def __long__(self): return 2L
def __index__(self): return 2
# other possible combinations of __float__ and __complex__
# that should work
class FloatAndComplex(object):
def __float__(self):
return flt_arg
def __complex__(self):
return cx_arg
class FloatAndComplexOS:
def __float__(self):
return flt_arg
def __complex__(self):
return cx_arg
class JustFloat(object):
def __float__(self):
return flt_arg
class JustFloatOS:
def __float__(self):
return flt_arg
for f in self.test_functions:
# usual usage
self.assertEqual(f(MyComplex(cx_arg)), f(cx_arg))
self.assertEqual(f(MyComplexOS(cx_arg)), f(cx_arg))
# other combinations of __float__ and __complex__
self.assertEqual(f(FloatAndComplex()), f(cx_arg))
self.assertEqual(f(FloatAndComplexOS()), f(cx_arg))
self.assertEqual(f(JustFloat()), f(flt_arg))
self.assertEqual(f(JustFloatOS()), f(flt_arg))
# TypeError should be raised for classes not providing
# either __complex__ or __float__, even if they provide
# __int__, __long__ or __index__. An old-style class
# currently raises AttributeError instead of a TypeError;
# this could be considered a bug.
self.assertRaises(TypeError, f, NeitherComplexNorFloat())
self.assertRaises(TypeError, f, MyInt())
self.assertRaises(Exception, f, NeitherComplexNorFloatOS())
self.assertRaises(Exception, f, MyIntOS())
# non-complex return value from __complex__ -> TypeError
for bad_complex in non_complexes:
self.assertRaises(TypeError, f, MyComplex(bad_complex))
self.assertRaises(TypeError, f, MyComplexOS(bad_complex))
# exceptions in __complex__ should be propagated correctly
self.assertRaises(SomeException, f, MyComplexException())
self.assertRaises(SomeException, f, MyComplexExceptionOS())
def test_input_type(self):
# ints and longs should be acceptable inputs to all cmath
# functions, by virtue of providing a __float__ method
for f in self.test_functions:
for arg in [2, 2L, 2.]:
self.assertEqual(f(arg), f(arg.__float__()))
# but strings should give a TypeError
for f in self.test_functions:
for arg in ["a", "long_string", "0", "1j", ""]:
self.assertRaises(TypeError, f, arg)
def test_cmath_matches_math(self):
# check that corresponding cmath and math functions are equal
# for floats in the appropriate range
# test_values in (0, 1)
test_values = [0.01, 0.1, 0.2, 0.5, 0.9, 0.99]
# test_values for functions defined on [-1., 1.]
unit_interval = test_values + [-x for x in test_values] + \
[0., 1., -1.]
# test_values for acosh, atanh
ge_one = [1.] + [1./x for x in test_values]
unit_open = test_values + [-x for x in test_values] + [0.]
# test_values for log, log10, sqrt
positive = test_values + ge_one
nonnegative = [0.] + positive
# test_values for functions defined on the whole real line
real_line = [0.] + positive + [-x for x in positive]
test_functions = {
'acos' : unit_interval,
'asin' : unit_interval,
'atan' : real_line,
'acosh' : ge_one, # Jython added
'asinh' : real_line, # Jython added
'atanh' : unit_open, # Jython added
'cos' : real_line,
'cosh' : real_line,
'exp' : real_line,
'log' : positive,
'log10' : positive,
'sin' : real_line,
'sinh' : real_line,
'sqrt' : nonnegative,
'tan' : real_line,
'tanh' : real_line}
for fn, values in test_functions.items():
float_fn = getattr(math, fn)
complex_fn = getattr(cmath, fn)
for v in values:
z = complex_fn(v)
self.rAssertAlmostEqual(float_fn(v), z.real)
self.assertEqual(0., z.imag)
# test two-argument version of log with various bases
for base in [0.5, 2., 10.]:
for v in positive:
z = cmath.log(v, base)
self.rAssertAlmostEqual(math.log(v, base), z.real)
self.assertEqual(0., z.imag)
def test_specific_values(self):
if not float.__getformat__("double").startswith("IEEE"):
self.skipTest('needs IEEE double')
def rect_complex(z):
"""Wrapped version of rect that accepts a complex number instead of
two float arguments."""
return cmath.rect(z.real, z.imag)
def polar_complex(z):
"""Wrapped version of polar that returns a complex number instead of
two floats."""
return complex(*polar(z))
raised_fmt = '\n' \
'{}: {}(complex({!r}, {!r}))\n' \
'Raised: {!r}\n' \
'Expected: complex({!r}, {!r})'
not_raised_fmt = '\n' \
'{} not raised in test {}: {}(complex({!r}, {!r}))\n' \
'Received: complex({!r}, {!r})'
value_fmt = '\n' \
'{}: {}(complex({!r}, {!r}))\n' \
'Expected: complex({!r}, {!r})\n' \
'Received: complex({!r}, {!r})\n' \
'Received value insufficiently close to expected value.'
failures = 0
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
arg = complex(ar, ai)
if fn == 'rect':
function = rect_complex
elif fn == 'polar':
function = polar_complex
else:
function = getattr(cmath, fn)
try:
# Catch and count failing test cases locally
if 'divide-by-zero' in flags or 'invalid' in flags:
try:
actual = function(arg)
except ValueError:
pass
else:
failures += 1
self.fail(not_raised_fmt.format('ValueError',
id, fn, ar, ai, actual.real, actual.imag))
elif 'overflow' in flags:
try:
actual = function(arg)
except OverflowError:
pass
else:
failures += 1
self.fail(not_raised_fmt.format('OverflowError',
id, fn, ar, ai, actual.real, actual.imag))
else :
actual = function(arg)
# Make sign of expected conform to actual, where ignored.
exr, exi = er, ei
if 'ignore-real-sign' in flags:
exr = math.copysign(er, actual.real)
if 'ignore-imag-sign' in flags:
exi = math.copysign(ei, actual.imag)
# for the real part of the log function, we allow an
# absolute error of up to 2e-15.
if fn in ('log', 'log10'):
real_abs_err = 2e-15
else:
real_abs_err = 5e-323
error_message = value_fmt.format(id, fn, ar, ai, er, ei,
actual.real, actual.imag)
self.rAssertAlmostEqual(exr, actual.real,
abs_err=real_abs_err,
msg=error_message)
self.rAssertAlmostEqual(exi, actual.imag,
msg=error_message)
except AssertionError as ex:
failures += 1
if verbose :
print(ex)
except BaseException as ex:
failures += 1
if verbose :
print(raised_fmt.format(id, fn, ar, ai, ex, er, ei))
if failures :
self.fail('{} discrepancies'.format(failures))
def assertCISEqual(self, a, b):
eps = 1E-7
if abs(a[0] - b[0]) > eps or abs(a[1] - b[1]) > eps:
self.fail((a ,b))
def test_polar(self):
self.assertCISEqual(polar(0), (0., 0.))
self.assertCISEqual(polar(1.), (1., 0.))
self.assertCISEqual(polar(-1.), (1., pi))
self.assertCISEqual(polar(1j), (1., pi/2))
self.assertCISEqual(polar(-1j), (1., -pi/2))
def test_phase(self):
self.assertAlmostEqual(phase(0), 0.)
self.assertAlmostEqual(phase(1.), 0.)
self.assertAlmostEqual(phase(-1.), pi)
self.assertAlmostEqual(phase(-1.+1E-300j), pi)
self.assertAlmostEqual(phase(-1.-1E-300j), -pi)
self.assertAlmostEqual(phase(1j), pi/2)
self.assertAlmostEqual(phase(-1j), -pi/2)
# zeros
self.assertEqual(phase(complex(0.0, 0.0)), 0.0)
self.assertEqual(phase(complex(0.0, -0.0)), -0.0)
self.assertEqual(phase(complex(-0.0, 0.0)), pi)
self.assertEqual(phase(complex(-0.0, -0.0)), -pi)
# infinities
self.assertAlmostEqual(phase(complex(-INF, -0.0)), -pi)
self.assertAlmostEqual(phase(complex(-INF, -2.3)), -pi)
self.assertAlmostEqual(phase(complex(-INF, -INF)), -0.75*pi)
self.assertAlmostEqual(phase(complex(-2.3, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(-0.0, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(0.0, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(2.3, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(INF, -INF)), -pi/4)
self.assertEqual(phase(complex(INF, -2.3)), -0.0)
self.assertEqual(phase(complex(INF, -0.0)), -0.0)
self.assertEqual(phase(complex(INF, 0.0)), 0.0)
self.assertEqual(phase(complex(INF, 2.3)), 0.0)
self.assertAlmostEqual(phase(complex(INF, INF)), pi/4)
self.assertAlmostEqual(phase(complex(2.3, INF)), pi/2)
self.assertAlmostEqual(phase(complex(0.0, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-0.0, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-2.3, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-INF, INF)), 0.75*pi)
self.assertAlmostEqual(phase(complex(-INF, 2.3)), pi)
self.assertAlmostEqual(phase(complex(-INF, 0.0)), pi)
# real or imaginary part NaN
for z in complex_nans:
self.assertTrue(math.isnan(phase(z)))
def test_abs(self):
# zeros
for z in complex_zeros:
self.assertEqual(abs(z), 0.0)
# infinities
for z in complex_infinities:
self.assertEqual(abs(z), INF)
# real or imaginary part NaN
self.assertEqual(abs(complex(NAN, -INF)), INF)
self.assertTrue(math.isnan(abs(complex(NAN, -2.3))))
self.assertTrue(math.isnan(abs(complex(NAN, -0.0))))
self.assertTrue(math.isnan(abs(complex(NAN, 0.0))))
self.assertTrue(math.isnan(abs(complex(NAN, 2.3))))
self.assertEqual(abs(complex(NAN, INF)), INF)
self.assertEqual(abs(complex(-INF, NAN)), INF)
self.assertTrue(math.isnan(abs(complex(-2.3, NAN))))
self.assertTrue(math.isnan(abs(complex(-0.0, NAN))))
self.assertTrue(math.isnan(abs(complex(0.0, NAN))))
self.assertTrue(math.isnan(abs(complex(2.3, NAN))))
self.assertEqual(abs(complex(INF, NAN)), INF)
self.assertTrue(math.isnan(abs(complex(NAN, NAN))))
# result overflows
if float.__getformat__("double").startswith("IEEE"):
self.assertRaises(OverflowError, abs, complex(1.4e308, 1.4e308))
def assertCEqual(self, a, b):
eps = 1E-7
if abs(a.real - b[0]) > eps or abs(a.imag - b[1]) > eps:
self.fail((a ,b))
def test_rect(self):
self.assertCEqual(rect(0, 0), (0, 0))
self.assertCEqual(rect(1, 0), (1., 0))
self.assertCEqual(rect(1, -pi), (-1., 0))
self.assertCEqual(rect(1, pi/2), (0, 1.))
self.assertCEqual(rect(1, -pi/2), (0, -1.))
def test_isnan(self):
self.assertFalse(cmath.isnan(1))
self.assertFalse(cmath.isnan(1j))
self.assertFalse(cmath.isnan(INF))
self.assertTrue(cmath.isnan(NAN))
self.assertTrue(cmath.isnan(complex(NAN, 0)))
self.assertTrue(cmath.isnan(complex(0, NAN)))
self.assertTrue(cmath.isnan(complex(NAN, NAN)))
self.assertTrue(cmath.isnan(complex(NAN, INF)))
self.assertTrue(cmath.isnan(complex(INF, NAN)))
def test_isinf(self):
self.assertFalse(cmath.isinf(1))
self.assertFalse(cmath.isinf(1j))
self.assertFalse(cmath.isinf(NAN))
self.assertTrue(cmath.isinf(INF))
self.assertTrue(cmath.isinf(complex(INF, 0)))
self.assertTrue(cmath.isinf(complex(0, INF)))
self.assertTrue(cmath.isinf(complex(INF, INF)))
self.assertTrue(cmath.isinf(complex(NAN, INF)))
self.assertTrue(cmath.isinf(complex(INF, NAN)))
def test_main():
run_unittest(CMathTests)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
tbombach/autorest
|
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyBoolean/autorestbooltestservice/auto_rest_bool_test_service.py
|
8
|
2187
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from .operations.bool_model_operations import BoolModelOperations
from . import models
class AutoRestBoolTestServiceConfiguration(Configuration):
"""Configuration for AutoRestBoolTestService
Note that all parameters used to create this instance are saved as instance
attributes.
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, base_url=None, filepath=None):
if not base_url:
base_url = 'http://localhost'
super(AutoRestBoolTestServiceConfiguration, self).__init__(base_url, filepath)
self.add_user_agent('autorestbooltestservice/{}'.format(VERSION))
class AutoRestBoolTestService(object):
"""Test Infrastructure for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestBoolTestServiceConfiguration
:ivar bool_model: BoolModel operations
:vartype bool_model: .operations.BoolModelOperations
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, base_url=None, filepath=None):
self.config = AutoRestBoolTestServiceConfiguration(base_url, filepath)
self._client = ServiceClient(None, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.bool_model = BoolModelOperations(
self._client, self.config, self._serialize, self._deserialize)
|
mit
|
trbs/kafka-python
|
kafka/consumer/multiprocess.py
|
15
|
10401
|
from __future__ import absolute_import
from collections import namedtuple
import logging
from multiprocessing import Process, Manager as MPManager
try:
from Queue import Empty, Full # python 3
except ImportError:
from queue import Empty, Full # python 2
import time
from .base import (
Consumer,
AUTO_COMMIT_MSG_COUNT, AUTO_COMMIT_INTERVAL,
NO_MESSAGES_WAIT_TIME_SECONDS,
FULL_QUEUE_WAIT_TIME_SECONDS
)
from .simple import SimpleConsumer
log = logging.getLogger(__name__)
Events = namedtuple("Events", ["start", "pause", "exit"])
def _mp_consume(client, group, topic, queue, size, events, **consumer_options):
"""
A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class
"""
# Make the child processes open separate socket connections
client.reinit()
# We will start consumers without auto-commit. Auto-commit will be
# done by the master controller process.
consumer = SimpleConsumer(client, group, topic,
auto_commit=False,
auto_commit_every_n=None,
auto_commit_every_t=None,
**consumer_options)
# Ensure that the consumer provides the partition information
consumer.provide_partition_info()
while True:
# Wait till the controller indicates us to start consumption
events.start.wait()
# If we are asked to quit, do so
if events.exit.is_set():
break
# Consume messages and add them to the queue. If the controller
# indicates a specific number of messages, follow that advice
count = 0
message = consumer.get_message()
if message:
while True:
try:
queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)
break
except Full:
if events.exit.is_set(): break
count += 1
# We have reached the required size. The controller might have
# more than what he needs. Wait for a while.
# Without this logic, it is possible that we run into a big
# loop consuming all available messages before the controller
# can reset the 'start' event
if count == size.value:
events.pause.wait()
else:
# In case we did not receive any message, give up the CPU for
# a while before we try again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
consumer.stop()
class MultiProcessConsumer(Consumer):
"""
A consumer implementation that consumes partitions for a topic in
parallel using multiple processes
Arguments:
client: a connected KafkaClient
group: a name for this consumer, used for offset storage and must be unique
If you are connecting to a server that does not support offset
commit/fetch (any prior to 0.8.1.1), then you *must* set this to None
topic: the topic to consume
Keyword Arguments:
partitions: An optional list of partitions to consume the data from
auto_commit: default True. Whether or not to auto commit the offsets
auto_commit_every_n: default 100. How many messages to consume
before a commit
auto_commit_every_t: default 5000. How much time (in milliseconds) to
wait before commit
num_procs: Number of processes to start for consuming messages.
The available partitions will be divided among these processes
partitions_per_proc: Number of partitions to be allocated per process
(overrides num_procs)
Auto commit details:
If both auto_commit_every_n and auto_commit_every_t are set, they will
reset one another when one is triggered. These triggers simply call the
commit method on this class. A manual call to commit will also reset
these triggers
"""
def __init__(self, client, group, topic,
partitions=None,
auto_commit=True,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL,
num_procs=1,
partitions_per_proc=0,
**simple_consumer_options):
# Initiate the base consumer class
super(MultiProcessConsumer, self).__init__(
client, group, topic,
partitions=partitions,
auto_commit=auto_commit,
auto_commit_every_n=auto_commit_every_n,
auto_commit_every_t=auto_commit_every_t)
# Variables for managing and controlling the data flow from
# consumer child process to master
manager = MPManager()
self.queue = manager.Queue(1024) # Child consumers dump messages into this
self.events = Events(
start = manager.Event(), # Indicates the consumers to start fetch
exit = manager.Event(), # Requests the consumers to shutdown
pause = manager.Event()) # Requests the consumers to pause fetch
self.size = manager.Value('i', 0) # Indicator of number of messages to fetch
# dict.keys() returns a view in py3 + it's not a thread-safe operation
# http://blog.labix.org/2008/06/27/watch-out-for-listdictkeys-in-python-3
# It's safer to copy dict as it only runs during the init.
partitions = list(self.offsets.copy().keys())
# By default, start one consumer process for all partitions
# The logic below ensures that
# * we do not cross the num_procs limit
# * we have an even distribution of partitions among processes
if partitions_per_proc:
num_procs = len(partitions) / partitions_per_proc
if num_procs * partitions_per_proc < len(partitions):
num_procs += 1
# The final set of chunks
chunks = [partitions[proc::num_procs] for proc in range(num_procs)]
self.procs = []
for chunk in chunks:
options = {'partitions': list(chunk)}
if simple_consumer_options:
simple_consumer_options.pop('partitions', None)
options.update(simple_consumer_options)
args = (client.copy(), self.group, self.topic, self.queue,
self.size, self.events)
proc = Process(target=_mp_consume, args=args, kwargs=options)
proc.daemon = True
proc.start()
self.procs.append(proc)
def __repr__(self):
return '<MultiProcessConsumer group=%s, topic=%s, consumers=%d>' % \
(self.group, self.topic, len(self.procs))
def stop(self):
# Set exit and start off all waiting consumers
self.events.exit.set()
self.events.pause.set()
self.events.start.set()
for proc in self.procs:
proc.join()
proc.terminate()
super(MultiProcessConsumer, self).stop()
def __iter__(self):
"""
Iterator to consume the messages available on this consumer
"""
# Trigger the consumer procs to start off.
# We will iterate till there are no more messages available
self.size.value = 0
self.events.pause.set()
while True:
self.events.start.set()
try:
# We will block for a small while so that the consumers get
# a chance to run and put some messages in the queue
# TODO: This is a hack and will make the consumer block for
# at least one second. Need to find a better way of doing this
partition, message = self.queue.get(block=True, timeout=1)
except Empty:
break
# Count, check and commit messages if necessary
self.offsets[partition] = message.offset + 1
self.events.start.clear()
self.count_since_commit += 1
self._auto_commit()
yield message
self.events.start.clear()
def get_messages(self, count=1, block=True, timeout=10):
"""
Fetch the specified number of messages
Keyword Arguments:
count: Indicates the maximum number of messages to be fetched
block: If True, the API will block till some messages are fetched.
timeout: If block is True, the function will block for the specified
time (in seconds) until count messages is fetched. If None,
it will block forever.
"""
messages = []
# Give a size hint to the consumers. Each consumer process will fetch
# a maximum of "count" messages. This will fetch more messages than
# necessary, but these will not be committed to kafka. Also, the extra
# messages can be provided in subsequent runs
self.size.value = count
self.events.pause.clear()
if timeout is not None:
max_time = time.time() + timeout
new_offsets = {}
while count > 0 and (timeout is None or timeout > 0):
# Trigger consumption only if the queue is empty
# By doing this, we will ensure that consumers do not
# go into overdrive and keep consuming thousands of
# messages when the user might need only a few
if self.queue.empty():
self.events.start.set()
try:
partition, message = self.queue.get(block, timeout)
except Empty:
break
messages.append(message)
new_offsets[partition] = message.offset + 1
count -= 1
if timeout is not None:
timeout = max_time - time.time()
self.size.value = 0
self.events.start.clear()
self.events.pause.set()
# Update and commit offsets if necessary
self.offsets.update(new_offsets)
self.count_since_commit += len(messages)
self._auto_commit()
return messages
|
apache-2.0
|
jtimberman/omnibus
|
source/libxml2-2.7.7/python/tests/xpathret.py
|
87
|
1312
|
#!/usr/bin/python -u
import sys
import libxml2
#memory debug specific
libxml2.debugMemory(1)
#
# A document hosting the nodes returned from the extension function
#
mydoc = libxml2.newDoc("1.0")
def foo(ctx, str):
global mydoc
#
# test returning a node set works as expected
#
parent = mydoc.newDocNode(None, 'p', None)
mydoc.addChild(parent)
node = mydoc.newDocText(str)
parent.addChild(node)
return [parent]
doc = libxml2.parseFile("tst.xml")
ctxt = doc.xpathNewContext()
libxml2.registerXPathFunction(ctxt._o, "foo", None, foo)
res = ctxt.xpathEval("foo('hello')")
if type(res) != type([]):
print "Failed to return a nodeset"
sys.exit(1)
if len(res) != 1:
print "Unexpected nodeset size"
sys.exit(1)
node = res[0]
if node.name != 'p':
print "Unexpected nodeset element result"
sys.exit(1)
node = node.children
if node.type != 'text':
print "Unexpected nodeset element children type"
sys.exit(1)
if node.content != 'hello':
print "Unexpected nodeset element children content"
sys.exit(1)
doc.freeDoc()
mydoc.freeDoc()
ctxt.xpathFreeContext()
#memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
|
apache-2.0
|
OpenEdgeComputing/elijah-openstack
|
compute/cloudlet_driver.py
|
2
|
39971
|
# Elijah: Cloudlet Infrastructure for Mobile Computing
#
# Author: Kiryong Ha <krha@cmu.edu>
#
# Copyright (C) 2011-2014 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import uuid
import hashlib
import subprocess
import select
import shutil
import StringIO
from urlparse import urlsplit
from tempfile import mkdtemp # replace it to util.tempdir
from nova.virt.libvirt import blockinfo
from nova.compute import power_state
from nova import exception
from nova import utils
from nova.virt import driver
from nova.virt.libvirt import utils as libvirt_utils
from nova.image import glance
from nova.compute import task_states
from nova.openstack.common import fileutils
try:
# icehouse
from nova.openstack.common.gettextutils import _
except ImportError as e:
# kilo
from nova.i18n import _
from nova.openstack.common import loopingcall
from nova.virt.libvirt import driver as libvirt_driver
from nova.compute.cloudlet_api import CloudletAPI
from xml.etree import ElementTree
from elijah.provisioning import synthesis
from elijah.provisioning import handoff
try:
from elijah.provisioning import msgpack
except ImportError as e:
import msgpack
from elijah.provisioning import compression
from elijah.provisioning.package import VMOverlayPackage
from elijah.provisioning.configuration import Const as Cloudlet_Const
from elijah.provisioning.configuration import Options
import logging
LOG = logging.getLogger(__name__)
synthesis.LOG = LOG # overwrite cloudlet's own log
class CloudletDriver(libvirt_driver.LibvirtDriver):
def __init__(self, read_only=False):
super(CloudletDriver, self).__init__(read_only)
# manage VM overlay list
self.resumed_vm_dict = dict()
# manage synthesized VM list
self.synthesized_vm_dics = dict()
def _get_snapshot_metadata(self, virt_dom, context, instance, snapshot_id):
_image_service = glance.get_remote_image_service(context, snapshot_id)
snapshot_image_service, snapshot_image_id = _image_service
snapshot = snapshot_image_service.show(context, snapshot_image_id)
metadata = {'is_public': True,
'status': 'active',
'name': snapshot['name'],
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
try:
base = image_service.show(context, image_id)
except exception.ImageNotFound:
base = {}
if 'architecture' in base.get('properties', {}):
arch = base['properties']['architecture']
metadata['properties']['architecture'] = arch
metadata['disk_format'] = 'raw'
metadata['container_format'] = base.get('container_format', 'bare')
return metadata
def _update_to_glance(self, context, image_service, filepath,
meta_id, metadata):
with libvirt_utils.file_open(filepath) as image_file:
image_service.update(context,
meta_id,
metadata,
image_file)
@exception.wrap_exception()
def cloudlet_base(self, context, instance, vm_name,
disk_meta_id, memory_meta_id,
diskhash_meta_id, memoryhash_meta_id, update_task_state):
"""create base vm and save it to glance
"""
try:
if hasattr(self, "_lookup_by_name"):
# icehouse
virt_dom = self._lookup_by_name(instance['name'])
else:
# kilo
virt_dom = self._host.get_domain(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
# pause VM
self.pause(instance)
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
disk_metadata = self._get_snapshot_metadata(virt_dom, context,
instance, disk_meta_id)
mem_metadata = self._get_snapshot_metadata(virt_dom, context,
instance, memory_meta_id)
diskhash_metadata = self._get_snapshot_metadata(
virt_dom, context, instance, diskhash_meta_id)
memhash_metadata = self._get_snapshot_metadata(
virt_dom, context, instance, memoryhash_meta_id)
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
snapshot_name = uuid.uuid4().hex
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = libvirt_driver.LIBVIRT_POWER_STATE[state]
# creating base vm requires cold snapshotting
snapshot_backend = self.image_backend.snapshot(
disk_path,
image_type=source_format)
LOG.info(_("Beginning cold snapshot process"),
instance=instance)
# not available at icehouse
# snapshot_backend.snapshot_create()
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD,
expected_state=None)
snapshot_directory = libvirt_driver.CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
# At this point, base vm should be "raw" format
snapshot_backend.snapshot_extract(out_path, "raw")
finally:
# snapshotting logic is changed since icehouse.
# : cannot find snapshot_create and snapshot_delete.
# snapshot_extract is replacing these two operations.
# snapshot_backend.snapshot_delete()
LOG.info(_("Snapshot extracted, beginning image upload"),
instance=instance)
# generate memory snapshop and hashlist
basemem_path = os.path.join(tmpdir, snapshot_name+"-mem")
diskhash_path = os.path.join(tmpdir, snapshot_name+"-disk_hash")
memhash_path = os.path.join(tmpdir, snapshot_name+"-mem_hash")
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
synthesis._create_baseVM(self._conn,
virt_dom,
out_path,
basemem_path,
diskhash_path,
memhash_path,
nova_util=libvirt_utils)
self._update_to_glance(context, image_service, out_path,
disk_meta_id, disk_metadata)
LOG.info(_("Base disk upload complete"), instance=instance)
self._update_to_glance(context, image_service, basemem_path,
memory_meta_id, mem_metadata)
LOG.info(_("Base memory image upload complete"), instance=instance)
self._update_to_glance(context, image_service, diskhash_path,
diskhash_meta_id, diskhash_metadata)
LOG.info(_("Base disk upload complete"), instance=instance)
self._update_to_glance(context, image_service, memhash_path,
memoryhash_meta_id, memhash_metadata)
LOG.info(_("Base memory image upload complete"), instance=instance)
def _create_network_only(self, xml, instance, network_info,
block_device_info=None):
"""Only perform network setup but skip set-up for domain (vm instance)
because cloudlet code takes care of domain
"""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
libvirt_driver.CONF.libvirt.virt_type, disk_dev
),
'type': 'disk',
}
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self.firewall_driver.apply_instance_filter(instance, network_info)
def create_overlay_vm(self, context, instance,
overlay_name, overlay_id, update_task_state):
try:
if hasattr(self, "_lookup_by_name"):
# icehouse
virt_dom = self._lookup_by_name(instance['name'])
else:
# kilo
virt_dom = self._host.get_domain(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
# make sure base vm is cached
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
image_meta = image_service.show(context, image_id)
base_sha256_uuid, memory_snap_id, diskhash_snap_id, memhash_snap_id = \
self._get_basevm_meta_info(image_meta)
self._get_cache_image(context, instance, image_meta['id'])
self._get_cache_image(context, instance, memory_snap_id)
self._get_cache_image(context, instance, diskhash_snap_id)
self._get_cache_image(context, instance, memhash_snap_id)
# pause VM
self.pause(instance)
# create VM overlay
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
meta_metadata = self._get_snapshot_metadata(virt_dom, context,
instance, overlay_id)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD,
expected_state=None)
vm_overlay = self.resumed_vm_dict.get(instance['uuid'], None)
if vm_overlay is None:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
del self.resumed_vm_dict[instance['uuid']]
vm_overlay.create_overlay()
overlay_zip = vm_overlay.overlay_zipfile
LOG.info("overlay : %s" % str(overlay_zip))
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
# export to glance
self._update_to_glance(context, image_service, overlay_zip,
overlay_id, meta_metadata)
LOG.info(_("overlay_vm upload complete"), instance=instance)
if os.path.exists(overlay_zip):
os.remove(overlay_zip)
def perform_vmhandoff(self, context, instance, handoff_url,
update_task_state, residue_glance_id=None):
try:
if hasattr(self, "_lookup_by_name"):
# icehouse
virt_dom = self._lookup_by_name(instance['name'])
else:
# kilo
virt_dom = self._host.get_domain(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
synthesized_vm = self.synthesized_vm_dics.get(instance['uuid'], None)
if synthesized_vm is None:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
# get the file path for Base VM and VM overlay
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
image_meta = image_service.show(context, image_id)
base_sha256_uuid, memory_snap_id, diskhash_snap_id, memhash_snap_id = \
self._get_basevm_meta_info(image_meta)
basedisk_path = self._get_cache_image(
context, instance, image_meta['id'])
basemem_path = self._get_cache_image(context, instance, memory_snap_id)
diskhash_path = self._get_cache_image(
context, instance, diskhash_snap_id)
memhash_path = self._get_cache_image(
context, instance, memhash_snap_id)
base_vm_paths = [basedisk_path, basemem_path,
diskhash_path, memhash_path]
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD,
expected_state=None)
try:
residue_filepath = self._handoff_send(
base_vm_paths, base_sha256_uuid, synthesized_vm, handoff_url
)
except handoff.HandoffError as e:
msg = "failed to perform VM handoff:\n"
msg += str(e)
raise exception.ImageNotFound(msg)
del self.synthesized_vm_dics[instance['uuid']]
if residue_filepath:
LOG.info("residue saved at %s" % residue_filepath)
if residue_filepath and residue_glance_id:
# export to glance
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
meta_metadata = self._get_snapshot_metadata(
virt_dom,
context,
instance,
residue_glance_id)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self._update_to_glance(context, image_service, residue_filepath,
residue_glance_id, meta_metadata)
# clean up
LOG.info(_("VM residue upload complete"), instance=instance)
if residue_filepath and os.path.exists(residue_filepath):
os.remove(residue_filepath)
def _handoff_send(self, base_vm_paths, base_hashvalue,
synthesized_vm, handoff_url):
"""
"""
# preload basevm hash dictionary for creating residue
(basedisk_path, basemem_path,
diskhash_path, memhash_path) = base_vm_paths
preload_thread = handoff.PreloadResidueData(diskhash_path, memhash_path)
preload_thread.start()
preload_thread.join()
options = Options()
options.TRIM_SUPPORT = True
options.FREE_SUPPORT = True
options.DISK_ONLY = False
# Set up temp file path for data structure and residue
residue_tmp_dir = mkdtemp(prefix="cloudlet-residue-")
handoff_send_datafile = os.path.join(residue_tmp_dir, "handoff_data")
residue_zipfile = None
dest_handoff_url = handoff_url
parsed_handoff_url = urlsplit(handoff_url)
if parsed_handoff_url.scheme == "file":
residue_zipfile = os.path.join(
residue_tmp_dir, Cloudlet_Const.OVERLAY_ZIP)
dest_handoff_url = "file://%s" % os.path.abspath(residue_zipfile)
# handoff mode --> fix it to be serializable
handoff_mode = None # use default
# data structure for handoff sending
handoff_ds_send = handoff.HandoffDataSend()
LOG.debug("save handoff data to %s" % handoff_send_datafile)
if hasattr(self, "uri"):
# icehouse
libvirt_uri = self.uri()
else:
# kilo
libvirt_uri = self._uri()
handoff_ds_send.save_data(
base_vm_paths, base_hashvalue,
preload_thread.basedisk_hashdict,
preload_thread.basemem_hashdict,
options, dest_handoff_url, handoff_mode,
synthesized_vm.fuse.mountpoint, synthesized_vm.qemu_logfile,
synthesized_vm.qmp_channel, synthesized_vm.machine.ID(),
synthesized_vm.fuse.modified_disk_chunks, libvirt_uri,
)
LOG.debug("start handoff send process")
handoff_ds_send.to_file(handoff_send_datafile)
cmd = ["/usr/local/bin/handoff-proc", "%s" % handoff_send_datafile]
LOG.debug("subprocess: %s" % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, close_fds=True)
def _wait_for_handoff_send(print_log=False):
"""Called at an interval until VM synthesis finishes."""
returncode = proc.poll()
if returncode is None:
# keep record stdout
LOG.debug("waiting for finishing handoff send")
in_ready, _, _ = select.select([proc.stdout], [], [])
try:
buf = os.read(proc.stdout.fileno(), 1024*100)
if print_log:
LOG.debug(buf)
except OSError as e:
if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK:
return
else:
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_handoff_send,
print_log=True)
timer.start(interval=0.5).wait()
LOG.info("Handoff send finishes")
return residue_zipfile
def _get_cache_image(self, context, instance, snapshot_id, suffix=''):
def basepath(fname='', suffix=suffix):
return os.path.join(libvirt_utils.get_instance_path(instance),
fname + suffix)
def raw(fname, image_type='raw'):
return self.image_backend.image(instance, fname, image_type)
# ensure directories exist and are writable
fileutils.ensure_tree(basepath(suffix=''))
fname = hashlib.sha1(snapshot_id).hexdigest()
LOG.debug(_("cloudlet, caching file at %s" % fname))
size = instance['root_gb'] * 1024 * 1024 * 1024
if size == 0:
size = None
raw('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
size=size,
image_id=snapshot_id,
user_id=instance['user_id'],
project_id=instance['project_id'])
# from cache method at virt/libvirt/imagebackend.py
abspath = os.path.join(
libvirt_driver.CONF.instances_path,
libvirt_driver.CONF.image_cache_subdirectory_name,
fname)
return abspath
def _polish_VM_configuration(self, xml):
# remove cpu element
cpu_element = xml.find("cpu")
if cpu_element is not None:
xml.remove(cpu_element)
# TODO: Handle console/serial element properly
device_element = xml.find("devices")
console_elements = device_element.findall("console")
for console_element in console_elements:
device_element.remove(console_element)
serial_elements = device_element.findall("serial")
for serial_element in serial_elements:
device_element.remove(serial_element)
# remove O_DIRECT option since FUSE does not support it
disk_elements = xml.findall('devices/disk')
for disk_element in disk_elements:
disk_type = disk_element.attrib['device']
if disk_type == 'disk':
hdd_driver = disk_element.find("driver")
if hdd_driver is not None and hdd_driver.get("cache", None) is not None:
del hdd_driver.attrib['cache']
xml_str = ElementTree.tostring(xml)
return xml_str
def _get_basevm_meta_info(self, image_meta):
# get memory_snapshot_id for resume case
base_sha256_uuid = None
memory_snap_id = None
diskhash_snap_id = None
memhash_snap_id = None
meta_data = image_meta.get('properties', None)
if meta_data and meta_data.get(CloudletAPI.IMAGE_TYPE_BASE_MEM):
base_sha256_uuid = str(
meta_data.get(CloudletAPI.PROPERTY_KEY_BASE_UUID))
memory_snap_id = str(
meta_data.get(CloudletAPI.IMAGE_TYPE_BASE_MEM))
diskhash_snap_id = str(
meta_data.get(CloudletAPI.IMAGE_TYPE_BASE_DISK_HASH))
memhash_snap_id = str(
meta_data.get(CloudletAPI.IMAGE_TYPE_BASE_MEM_HASH))
LOG.debug(
_("cloudlet, get base sha256 uuid: %s" % str(base_sha256_uuid)))
LOG.debug(
_("cloudlet, get memory_snapshot_id: %s" % str(memory_snap_id)))
LOG.debug(
_("cloudlet, get disk_hash_id: %s" % str(diskhash_snap_id)))
LOG.debug(
_("cloudlet, get memory_hash_id: %s" % str(memhash_snap_id)))
return base_sha256_uuid, memory_snap_id, diskhash_snap_id, memhash_snap_id
def _get_VM_overlay_url(self, instance):
# get overlay from instance metadata for synthesis case
overlay_url = None
instance_meta = instance.get('metadata', None)
if instance_meta is not None:
for (key, value) in instance_meta.iteritems():
if key == "overlay_url":
overlay_url = value
return overlay_url
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""overwrite original libvirt_driver's spawn method
"""
# add metadata to the instance
def _append_metadata(target_instance, metadata_dict):
original_meta = target_instance.get('metadata', None) or list()
original_meta.append(metadata_dict)
target_instance['metadata'] = original_meta
# get meta info related to VM synthesis
base_sha256_uuid, memory_snap_id, diskhash_snap_id, memhash_snap_id = \
self._get_basevm_meta_info(image_meta)
overlay_url = None
handoff_info = None
instance_meta = instance.get('metadata', None)
if instance_meta is not None:
if "overlay_url" in instance_meta.keys():
overlay_url = instance_meta.get("overlay_url")
if "handoff_info" in instance_meta.keys():
handoff_info = instance_meta.get("handoff_info")
# original openstack logic
disk_info = blockinfo.get_disk_info(
libvirt_driver.CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
if hasattr(self, 'to_xml'): # icehouse
xml = self.to_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
elif hasattr(self, '_get_guest_xml'): # kilo
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
# handle xml configuration to make a portable VM
xml_obj = ElementTree.fromstring(xml)
xml = self._polish_VM_configuration(xml_obj)
# avoid injecting key, password, and metadata since we're resuming VM
original_inject_password = libvirt_driver.CONF.libvirt.inject_password
original_inject_key = libvirt_driver.CONF.libvirt.inject_key
original_metadata = instance.get('metadata')
libvirt_driver.CONF.libvirt.inject_password = None
libvirt_driver.CONF.libvirt.inject_key = None
instance['metadata'] = {}
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
# revert back the configuration
libvirt_driver.CONF.libvirt.inject_password = original_inject_password
libvirt_driver.CONF.libvirt.inject_key = original_inject_key
instance['metadata'] = original_metadata
if (overlay_url is not None) and (handoff_info is None):
# spawn instance using VM synthesis
LOG.debug(_('cloudlet, synthesis start'))
# append metadata to the instance
self._create_network_only(xml, instance, network_info,
block_device_info)
synthesized_vm = self._spawn_using_synthesis(context, instance,
xml, image_meta,
overlay_url)
instance_uuid = str(instance.get('uuid', ''))
self.synthesized_vm_dics[instance_uuid] = synthesized_vm
elif handoff_info is not None:
# spawn instance using VM handoff
LOG.debug(_('cloudlet, Handoff start'))
self._create_network_only(xml, instance, network_info,
block_device_info)
synthesized_vm = self._spawn_using_handoff(context, instance,
xml, image_meta,
handoff_info)
instance_uuid = str(instance.get('uuid', ''))
self.synthesized_vm_dics[instance_uuid] = synthesized_vm
pass
elif memory_snap_id is not None:
# resume from memory snapshot
LOG.debug(_('cloudlet, resume from memory snapshot'))
# append metadata to the instance
basedisk_path = self._get_cache_image(context, instance,
image_meta['id'])
basemem_path = self._get_cache_image(context, instance,
memory_snap_id)
diskhash_path = self._get_cache_image(context, instance,
diskhash_snap_id)
memhash_path = self._get_cache_image(context, instance,
memhash_snap_id)
LOG.debug(_('cloudlet, creating network'))
self._create_network_only(xml, instance, network_info,
block_device_info)
LOG.debug(_('cloudlet, resuming base vm'))
self.resume_basevm(instance, xml, basedisk_path, basemem_path,
diskhash_path, memhash_path, base_sha256_uuid)
else:
self._create_domain_and_network(context,
xml,
instance,
network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
LOG.info(_("Instance spawned successfully."),
instance=instance)
def _destroy(self, instance):
"""overwrite original libvirt_driver's _destroy method
"""
super(CloudletDriver, self)._destroy(instance)
# get meta info related to VM synthesis
instance_uuid = str(instance.get('uuid', ''))
# check resumed base VM list
vm_overlay = self.resumed_vm_dict.get(instance_uuid, None)
if vm_overlay is not None:
vm_overlay.terminate()
del self.resumed_vm_dict[instance['uuid']]
# check synthesized VM list
synthesized_VM = self.synthesized_vm_dics.get(instance_uuid, None)
if synthesized_VM is not None:
LOG.info(_("Deallocate all resources of synthesized VM"),
instance=instance)
if hasattr(synthesized_VM, 'machine'):
# intentionally avoid terminating VM at synthesis code
# since OpenStack will do that
synthesized_VM.machine = None
synthesized_VM.terminate()
del self.synthesized_vm_dics[instance_uuid]
def resume_basevm(self, instance, xml, base_disk, base_memory,
base_diskmeta, base_memmeta, base_hashvalue):
""" resume base vm to create overlay vm
"""
options = synthesis.Options()
options.TRIM_SUPPORT = True
options.FREE_SUPPORT = False
options.XRAY_SUPPORT = False
options.DISK_ONLY = False
options.ZIP_CONTAINER = True
vm_overlay = synthesis.VM_Overlay(base_disk, options,
base_mem=base_memory,
base_diskmeta=base_diskmeta,
base_memmeta=base_memmeta,
base_hashvalue=base_hashvalue,
nova_xml=xml,
nova_util=libvirt_utils,
nova_conn=self._conn)
virt_dom = vm_overlay.resume_basevm()
self.resumed_vm_dict[instance['uuid']] = vm_overlay
synthesis.rettach_nic(virt_dom, vm_overlay.old_xml_str, xml)
def _spawn_using_synthesis(self, context, instance, xml,
image_meta, overlay_url):
# download vm overlay
overlay_package = VMOverlayPackage(overlay_url)
meta_raw = overlay_package.read_meta()
meta_info = msgpack.unpackb(meta_raw)
basevm_sha256 = meta_info.get(Cloudlet_Const.META_BASE_VM_SHA256, None)
image_properties = image_meta.get("properties", None)
if image_properties is None:
msg = "image does not have properties for cloudlet metadata"
raise exception.ImageNotFound(msg)
image_sha256 = image_properties.get(CloudletAPI.PROPERTY_KEY_BASE_UUID)
# check basevm
if basevm_sha256 != image_sha256:
msg = "requested base vm is not compatible with openstack base disk %s != %s" \
% (basevm_sha256, image_sha256)
raise exception.ImageNotFound(msg)
memory_snap_id = str(
image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM))
diskhash_snap_id = str(
image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_DISK_HASH))
memhash_snap_id = str(
image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM_HASH))
basedisk_path = self._get_cache_image(context, instance,
image_meta['id'])
basemem_path = self._get_cache_image(context, instance, memory_snap_id)
diskhash_path = self._get_cache_image(context, instance,
diskhash_snap_id)
memhash_path = self._get_cache_image(context, instance, memhash_snap_id)
# download blob
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
decomp_overlay = os.path.join(libvirt_utils.get_instance_path(instance),
'decomp_overlay')
meta_info = compression.decomp_overlayzip(overlay_url, decomp_overlay)
# recover VM
launch_disk, launch_mem, fuse, delta_proc, fuse_proc = \
synthesis.recover_launchVM(basedisk_path, meta_info,
decomp_overlay,
base_mem=basemem_path,
base_diskmeta=diskhash_path,
base_memmeta=memhash_path)
# resume VM
LOG.info(_("Starting VM synthesis"), instance=instance)
synthesized_vm = synthesis.SynthesizedVM(launch_disk, launch_mem, fuse,
disk_only=False,
qemu_args=False,
nova_xml=xml,
nova_conn=self._conn,
nova_util=libvirt_utils
)
# testing non-thread resume
delta_proc.start()
fuse_proc.start()
delta_proc.join()
fuse_proc.join()
LOG.info(_("Finish VM synthesis"), instance=instance)
synthesized_vm.resume()
# rettach NIC
synthesis.rettach_nic(synthesized_vm.machine,
synthesized_vm.old_xml_str, xml)
return synthesized_vm
def _spawn_using_handoff(self, context, instance, xml,
image_meta, handoff_info):
image_properties = image_meta.get("properties", None)
memory_snap_id = str(
image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM))
diskhash_snap_id = str(
image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_DISK_HASH))
memhash_snap_id = str(
image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM_HASH))
basedisk_path = self._get_cache_image(context, instance,
image_meta['id'])
basemem_path = self._get_cache_image(context, instance, memory_snap_id)
diskhash_path = self._get_cache_image(context, instance,
diskhash_snap_id)
memhash_path = self._get_cache_image(context, instance,
memhash_snap_id)
base_vm_paths = [basedisk_path, basemem_path,
diskhash_path, memhash_path]
image_sha256 = image_properties.get(CloudletAPI.PROPERTY_KEY_BASE_UUID)
snapshot_directory = libvirt_driver.CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
synthesized_vm = None
with utils.tempdir(dir=snapshot_directory) as tmpdir:
uuidhex = uuid.uuid4().hex
launch_diskpath = os.path.join(tmpdir, uuidhex + "-launch-disk")
launch_memorypath = os.path.join(
tmpdir, uuidhex + "-launch-memory")
tmp_dir = mkdtemp(prefix="cloudlet-residue-")
handoff_recv_datafile = os.path.join(tmp_dir, "handoff-data")
# recv handoff data and synthesize disk img and memory snapshot
try:
ret_values = self._handoff_recv(base_vm_paths, image_sha256,
handoff_recv_datafile,
launch_diskpath,
launch_memorypath)
# start VM
launch_disk_size, launch_memory_size, \
disk_overlay_map, memory_overlay_map = ret_values
synthesized_vm = self._handoff_launch_vm(
xml, basedisk_path, basemem_path,
launch_diskpath, launch_memorypath,
int(launch_disk_size), int(launch_memory_size),
disk_overlay_map, memory_overlay_map,
)
# rettach NIC
synthesis.rettach_nic(synthesized_vm.machine,
synthesized_vm.old_xml_str, xml)
except handoff.HandoffError as e:
msg = "failed to perform VM handoff:\n"
msg += str(e)
raise exception.ImageNotFound(msg)
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
if os.path.exists(launch_diskpath):
os.remove(launch_diskpath)
if os.path.exists(launch_memorypath):
os.remove(launch_memorypath)
return synthesized_vm
def _handoff_recv(self, base_vm_paths, base_hashvalue,
handoff_recv_datafile, launch_diskpath,
launch_memorypath):
# data structure for handoff receiving
handoff_ds_recv = handoff.HandoffDataRecv()
handoff_ds_recv.save_data(
base_vm_paths, base_hashvalue,
launch_diskpath, launch_memorypath
)
handoff_ds_recv.to_file(handoff_recv_datafile)
LOG.debug("start handoff recv process")
cmd = ["/usr/local/bin/handoff-server-proc", "-d",
"%s" % handoff_recv_datafile]
LOG.debug("subprocess: %s" % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, close_fds=True)
stdout_buf, _ = proc.communicate()
LOG.info("Handoff recv finishes")
returncode = proc.poll()
if returncode is not 0:
msg = "Failed to receive handoff data"
raise handoff.HandoffError(msg)
# parse output: this will be fixed at cloudlet deamon
keyword, disksize, memorysize, disk_overlay_map, memory_overlay_map =\
stdout_buf.split("\n")[-1].split("\t")
if keyword.lower() != "openstack":
raise handoff.HandoffError("Failed to parse returned data")
return disksize, memorysize, disk_overlay_map, memory_overlay_map
def _handoff_launch_vm(self, libvirt_xml, base_diskpath, base_mempath,
launch_disk, launch_memory,
launch_disk_size, launch_memory_size,
disk_overlay_map, memory_overlay_map):
# We told to FUSE that we have everything ready, so we need to wait
# until delta_proc fininshes. we cannot start VM before delta_proc
# finishes, because we don't know what will be modified in the future
fuse = synthesis.run_fuse(
Cloudlet_Const.CLOUDLETFS_PATH, Cloudlet_Const.CHUNK_SIZE,
base_diskpath, launch_disk_size, base_mempath, launch_memory_size,
resumed_disk=launch_disk, disk_overlay_map=disk_overlay_map,
resumed_memory=launch_memory, memory_overlay_map=memory_overlay_map,
valid_bit=1
)
synthesized_vm = synthesis.SynthesizedVM(
launch_disk, launch_memory, fuse,
disk_only=False, qemu_args=None,
nova_xml=libvirt_xml,
nova_conn=self._conn,
nova_util=libvirt_utils
)
synthesized_vm.resume()
return synthesized_vm
|
apache-2.0
|
BichenWuUCB/squeezeDet
|
src/dataset/kitti.py
|
1
|
10284
|
# Author: Bichen Wu (bichen@berkeley.edu) 08/25/2016
"""Image data base class for kitti"""
import cv2
import os
import numpy as np
import subprocess
from dataset.imdb import imdb
from utils.util import bbox_transform_inv, batch_iou
class kitti(imdb):
def __init__(self, image_set, data_path, mc):
imdb.__init__(self, 'kitti_'+image_set, mc)
self._image_set = image_set
self._data_root_path = data_path
self._image_path = os.path.join(self._data_root_path, 'training', 'image_2')
self._label_path = os.path.join(self._data_root_path, 'training', 'label_2')
self._classes = self.mc.CLASS_NAMES
self._class_to_idx = dict(zip(self.classes, xrange(self.num_classes)))
# a list of string indices of images in the directory
self._image_idx = self._load_image_set_idx()
# a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by
# the image width and height
self._rois = self._load_kitti_annotation()
## batch reader ##
self._perm_idx = None
self._cur_idx = 0
# TODO(bichen): add a random seed as parameter
self._shuffle_image_idx()
self._eval_tool = './src/dataset/kitti-eval/cpp/evaluate_object'
def _load_image_set_idx(self):
image_set_file = os.path.join(
self._data_root_path, 'ImageSets', self._image_set+'.txt')
assert os.path.exists(image_set_file), \
'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
return image_idx
def _image_path_at(self, idx):
image_path = os.path.join(self._image_path, idx+'.png')
assert os.path.exists(image_path), \
'Image does not exist: {}'.format(image_path)
return image_path
def _load_kitti_annotation(self):
def _get_obj_level(obj):
height = float(obj[7]) - float(obj[5]) + 1
truncation = float(obj[1])
occlusion = float(obj[2])
if height >= 40 and truncation <= 0.15 and occlusion <= 0:
return 1
elif height >= 25 and truncation <= 0.3 and occlusion <= 1:
return 2
elif height >= 25 and truncation <= 0.5 and occlusion <= 2:
return 3
else:
return 4
idx2annotation = {}
for index in self._image_idx:
filename = os.path.join(self._label_path, index+'.txt')
with open(filename, 'r') as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
try:
cls = self._class_to_idx[obj[0].lower().strip()]
except:
continue
if self.mc.EXCLUDE_HARD_EXAMPLES and _get_obj_level(obj) > 3:
continue
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
assert xmin >= 0.0 and xmin <= xmax, \
'Invalid bounding box x-coord xmin {} or xmax {} at {}.txt' \
.format(xmin, xmax, index)
assert ymin >= 0.0 and ymin <= ymax, \
'Invalid bounding box y-coord ymin {} or ymax {} at {}.txt' \
.format(ymin, ymax, index)
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls])
idx2annotation[index] = bboxes
return idx2annotation
def evaluate_detections(self, eval_dir, global_step, all_boxes):
"""Evaluate detection results.
Args:
eval_dir: directory to write evaluation logs
global_step: step of the checkpoint
all_boxes: all_boxes[cls][image] = N x 5 arrays of
[xmin, ymin, xmax, ymax, score]
Returns:
aps: array of average precisions.
names: class names corresponding to each ap
"""
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
if not os.path.isdir(det_file_dir):
os.makedirs(det_file_dir)
for im_idx, index in enumerate(self._image_idx):
filename = os.path.join(det_file_dir, index+'.txt')
with open(filename, 'wt') as f:
for cls_idx, cls in enumerate(self._classes):
dets = all_boxes[cls_idx][im_idx]
for k in xrange(len(dets)):
f.write(
'{:s} -1 -1 0.0 {:.2f} {:.2f} {:.2f} {:.2f} 0.0 0.0 0.0 0.0 0.0 '
'0.0 0.0 {:.3f}\n'.format(
cls.lower(), dets[k][0], dets[k][1], dets[k][2], dets[k][3],
dets[k][4])
)
cmd = self._eval_tool + ' ' \
+ os.path.join(self._data_root_path, 'training') + ' ' \
+ os.path.join(self._data_root_path, 'ImageSets',
self._image_set+'.txt') + ' ' \
+ os.path.dirname(det_file_dir) + ' ' + str(len(self._image_idx))
print('Running: {}'.format(cmd))
status = subprocess.call(cmd, shell=True)
aps = []
names = []
for cls in self._classes:
det_file_name = os.path.join(
os.path.dirname(det_file_dir), 'stats_{:s}_ap.txt'.format(cls))
if os.path.exists(det_file_name):
with open(det_file_name, 'r') as f:
lines = f.readlines()
assert len(lines) == 3, \
'Line number of {} should be 3'.format(det_file_name)
aps.append(float(lines[0].split('=')[1].strip()))
aps.append(float(lines[1].split('=')[1].strip()))
aps.append(float(lines[2].split('=')[1].strip()))
else:
aps.extend([0.0, 0.0, 0.0])
names.append(cls+'_easy')
names.append(cls+'_medium')
names.append(cls+'_hard')
return aps, names
def do_detection_analysis_in_eval(self, eval_dir, global_step):
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
det_error_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step),
'error_analysis')
if not os.path.exists(det_error_dir):
os.makedirs(det_error_dir)
det_error_file = os.path.join(det_error_dir, 'det_error_file.txt')
stats = self.analyze_detections(det_file_dir, det_error_file)
ims = self.visualize_detections(
image_dir=self._image_path,
image_format='.png',
det_error_file=det_error_file,
output_image_dir=det_error_dir,
num_det_per_type=10
)
return stats, ims
def analyze_detections(self, detection_file_dir, det_error_file):
def _save_detection(f, idx, error_type, det, score):
f.write(
'{:s} {:s} {:.1f} {:.1f} {:.1f} {:.1f} {:s} {:.3f}\n'.format(
idx, error_type,
det[0]-det[2]/2., det[1]-det[3]/2.,
det[0]+det[2]/2., det[1]+det[3]/2.,
self._classes[int(det[4])],
score
)
)
# load detections
self._det_rois = {}
for idx in self._image_idx:
det_file_name = os.path.join(detection_file_dir, idx+'.txt')
with open(det_file_name) as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
cls = self._class_to_idx[obj[0].lower().strip()]
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
score = float(obj[-1])
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls, score])
bboxes.sort(key=lambda x: x[-1], reverse=True)
self._det_rois[idx] = bboxes
# do error analysis
num_objs = 0.
num_dets = 0.
num_correct = 0.
num_loc_error = 0.
num_cls_error = 0.
num_bg_error = 0.
num_repeated_error = 0.
num_detected_obj = 0.
with open(det_error_file, 'w') as f:
for idx in self._image_idx:
gt_bboxes = np.array(self._rois[idx])
num_objs += len(gt_bboxes)
detected = [False]*len(gt_bboxes)
det_bboxes = self._det_rois[idx]
if len(gt_bboxes) < 1:
continue
for i, det in enumerate(det_bboxes):
if i < len(gt_bboxes):
num_dets += 1
ious = batch_iou(gt_bboxes[:, :4], det[:4])
max_iou = np.max(ious)
gt_idx = np.argmax(ious)
if max_iou > 0.1:
if gt_bboxes[gt_idx, 4] == det[4]:
if max_iou >= 0.5:
if i < len(gt_bboxes):
if not detected[gt_idx]:
num_correct += 1
detected[gt_idx] = True
else:
num_repeated_error += 1
else:
if i < len(gt_bboxes):
num_loc_error += 1
_save_detection(f, idx, 'loc', det, det[5])
else:
if i < len(gt_bboxes):
num_cls_error += 1
_save_detection(f, idx, 'cls', det, det[5])
else:
if i < len(gt_bboxes):
num_bg_error += 1
_save_detection(f, idx, 'bg', det, det[5])
for i, gt in enumerate(gt_bboxes):
if not detected[i]:
_save_detection(f, idx, 'missed', gt, -1.0)
num_detected_obj += sum(detected)
f.close()
print ('Detection Analysis:')
print (' Number of detections: {}'.format(num_dets))
print (' Number of objects: {}'.format(num_objs))
print (' Percentage of correct detections: {}'.format(
num_correct/num_dets))
print (' Percentage of localization error: {}'.format(
num_loc_error/num_dets))
print (' Percentage of classification error: {}'.format(
num_cls_error/num_dets))
print (' Percentage of background error: {}'.format(
num_bg_error/num_dets))
print (' Percentage of repeated detections: {}'.format(
num_repeated_error/num_dets))
print (' Recall: {}'.format(
num_detected_obj/num_objs))
out = {}
out['num of detections'] = num_dets
out['num of objects'] = num_objs
out['% correct detections'] = num_correct/num_dets
out['% localization error'] = num_loc_error/num_dets
out['% classification error'] = num_cls_error/num_dets
out['% background error'] = num_bg_error/num_dets
out['% repeated error'] = num_repeated_error/num_dets
out['% recall'] = num_detected_obj/num_objs
return out
|
bsd-2-clause
|
infinitecoin/infinitecoin
|
contrib/bitrpc/bitrpc.py
|
2348
|
7835
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
mit
|
2uller/LotF
|
App/Lib/encodings/gb18030.py
|
61
|
1070
|
#
# gb18030.py: Python Unicode Codec for GB18030
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb18030')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb18030',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
gpl-2.0
|
0x7E/ubuntu-tweak
|
ubuntutweak/tweaks/sound.py
|
4
|
3087
|
# Ubuntu Tweak - Ubuntu Configuration Tool
#
# Copyright (C) 2007-2012 Tualatrix Chou <tualatrix@gmail.com>
#
# Ubuntu Tweak is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Ubuntu Tweak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ubuntu Tweak; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
from gi.repository import Gtk, Gio
from ubuntutweak.utils import walk_directories
from ubuntutweak.gui.containers import ListPack, GridPack
from ubuntutweak.modules import TweakModule
from ubuntutweak.factory import WidgetFactory
from ubuntutweak.settings.configsettings import SystemConfigSetting
class Sound(TweakModule):
__title__ = _('Sound')
__desc__ = _('Set the sound theme for Ubuntu')
__icon__ = 'sound'
__category__ = 'appearance'
utext_event_sounds = _('Event sounds:')
utext_input_feedback = _('Input feedback sounds:')
utext_sound_theme = _('Sound theme:')
utext_login_sound = _('Play login sound:')
def __init__(self):
TweakModule.__init__(self)
valid_themes = self._get_valid_themes()
theme_box = GridPack(
WidgetFactory.create('Switch',
label=self.utext_event_sounds,
key='org.gnome.desktop.sound.event-sounds',
backend='gsettings'),
WidgetFactory.create('Switch',
label=self.utext_login_sound,
key='50_unity-greeter.gschema.override::com.canonical.unity-greeter#play-ready-sound',
backend='systemconfig'),
WidgetFactory.create('Switch',
label=self.utext_input_feedback,
key='org.gnome.desktop.sound.input-feedback-sounds',
backend='gsettings'),
WidgetFactory.create('ComboBox',
label=self.utext_sound_theme,
key='org.gnome.desktop.sound.theme-name',
backend='gsettings',
texts=valid_themes,
values=valid_themes),
)
self.add_start(theme_box, False, False, 0)
def _get_valid_themes(self):
dirs = ( '/usr/share/sounds',
os.path.join(os.path.expanduser("~"), ".sounds"))
valid = walk_directories(dirs, lambda d:
os.path.exists(os.path.join(d, "index.theme")))
valid.sort()
return valid
|
gpl-2.0
|
LTLMoP/slugs
|
tools/translateSafetySlugsSpecToSyntCompAIGFormat.py
|
6
|
12021
|
#!/usr/bin/python
#
import random
import os
import sys
import subprocess, md5, time
# =====================================================
# Option configuration
# =====================================================
slugsExecutableAndBasicOptions = sys.argv[0][0:sys.argv[0].rfind("tools/findMinimalUnrealizableCore.py")]+"src/slugs"
slugsReturnFile = "/tmp/check_"+str(os.getpid())+".slugsreturn"
# =====================================================
# Slugs -> AIG Encoder, used by the main function
# =====================================================
def recurseMakeAIGEncodingOfBooleanFormula(formula,variables,varNumbersUsedSoFar,nofGates,memoryTable,lines):
# Cut off next token
posSpace = formula.find(" ")
if posSpace==-1:
firstToken = formula
restOfFormula = ""
else:
firstToken = formula[0:posSpace]
restOfFormula = formula[posSpace+1:]
# Process token
if firstToken=="1":
return (restOfFormula,1,varNumbersUsedSoFar,nofGates)
elif firstToken=="0":
return (restOfFormula,0,varNumbersUsedSoFar,nofGates)
if firstToken=="!":
(restOfFormula,resultingGateNumber,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(restOfFormula,variables,varNumbersUsedSoFar,nofGates,memoryTable,lines)
return (restOfFormula,resultingGateNumber ^ 1,varNumbersUsedSoFar,nofGates)
elif firstToken=="|":
(restOfFormula,resultingGateNumber,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(restOfFormula,variables,varNumbersUsedSoFar,nofGates,memoryTable,lines)
(restOfFormula,resultingGateNumber2,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(restOfFormula,variables,varNumbersUsedSoFar,nofGates,memoryTable,lines)
varNumbersUsedSoFar += 1
nofGates += 1
lines.append(str(varNumbersUsedSoFar*2)+" "+str(resultingGateNumber ^ 1)+" "+str(resultingGateNumber2 ^ 1))
return (restOfFormula,varNumbersUsedSoFar*2 ^ 1,varNumbersUsedSoFar,nofGates)
elif firstToken=="^":
(restOfFormula,resultingGateNumber,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(restOfFormula,variables,varNumbersUsedSoFar,nofGates,memoryTable,lines)
(restOfFormula,resultingGateNumber2,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(restOfFormula,variables,varNumbersUsedSoFar,nofGates,memoryTable,lines)
lines.append(str(varNumbersUsedSoFar*2+2)+" "+str(resultingGateNumber ^ 1)+" "+str(resultingGateNumber2 ^ 1))
lines.append(str(varNumbersUsedSoFar*2+4)+" "+str(resultingGateNumber)+" "+str(resultingGateNumber2))
lines.append(str(varNumbersUsedSoFar*2+6)+" "+str(varNumbersUsedSoFar*2+3)+" "+str(varNumbersUsedSoFar*2+5))
varNumbersUsedSoFar += 3
nofGates += 3
return (restOfFormula,varNumbersUsedSoFar*2,varNumbersUsedSoFar,nofGates)
elif firstToken=="&":
(restOfFormula,resultingGateNumber,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(restOfFormula,variables,varNumbersUsedSoFar,nofGates,memoryTable,lines)
(restOfFormula,resultingGateNumber2,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(restOfFormula,variables,varNumbersUsedSoFar,nofGates,memoryTable,lines)
varNumbersUsedSoFar += 1
nofGates += 1
lines.append(str(varNumbersUsedSoFar*2)+" "+str(resultingGateNumber)+" "+str(resultingGateNumber2))
return (restOfFormula,varNumbersUsedSoFar*2,varNumbersUsedSoFar,nofGates)
elif firstToken=="$":
posSpace = restOfFormula.find(" ")
nofParts = int(restOfFormula[0:posSpace])
restOfFormula = restOfFormula[posSpace+1:]
memoryTable = []
for i in xrange(0,nofParts):
(restOfFormula,resultingGate,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(restOfFormula,variables,varNumbersUsedSoFar,nofGates,memoryTable,lines)
memoryTable.append(resultingGate)
return (restOfFormula,memoryTable[len(memoryTable)-1],varNumbersUsedSoFar,nofGates)
elif firstToken=="?":
posSpace = restOfFormula.find(" ")
if posSpace>-1:
part = int(restOfFormula[0:posSpace])
restOfFormula = restOfFormula[posSpace+1:]
else:
part = int(restOfFormula)
restOfFormula = ""
return (restOfFormula,memoryTable[part],varNumbersUsedSoFar,nofGates)
else:
# Again, an AP
return (restOfFormula,variables[firstToken],varNumbersUsedSoFar,nofGates)
# =====================================================
# Main function
# =====================================================
def produceAIGFileFromSlugsFile(filename):
slugsFile = open(filename,"r")
slugsLines = slugsFile.readlines()
slugsFile.close()
# Read the categories
categories = {"[INPUT]":[],"[OUTPUT]":[],"[ENV_INIT]":[],"[ENV_LIVENESS]":[],"[ENV_TRANS]":[],"[SYS_INIT]":[],"[SYS_LIVENESS]":[],"[SYS_TRANS]":[]}
currentCategory = ""
lastCR = False
propNr = 1
for line in slugsLines:
line = line.strip()
if line.startswith("#"):
pass
elif line.startswith("["):
currentCategory = line
elif len(line)>0:
categories[currentCategory].append(line)
# Check the absense of liveness conditions.
if len(categories["[ENV_LIVENESS]"])>0:
raise Exception("Cannot translate specifications that comprise liveness assumptions")
if len(categories["[SYS_LIVENESS]"])>0:
raise Exception("Cannot translate specifications that comprise liveness guarantees")
# Fill up condition lists with "1" whenever needed - w.l.o.g, we assume to have at least one property of every supported type
for cat in ["[ENV_INIT]","[SYS_INIT]","[ENV_TRANS]","[SYS_TRANS]"]:
if len(categories[cat])==0:
categories[cat].append("1")
# Have a counter for the last variable. Initialize to account for the "checking gates"
# Also have a list of AIG file lines so far and reserve some numbers for GR(1)-spec
# semantics encoding
varNumbersUsedSoFar = 1
nofGates = 0
lines = []
# Put one special latch at the beginning
latchOutputIsNotFirstRound = varNumbersUsedSoFar*2+2
varNumbersUsedSoFar += 1
# Assign variable numbers to input and output variables
variables = {}
for cat in ["[INPUT]","[OUTPUT]"]:
for x in categories[cat]:
varNumbersUsedSoFar += 1
variables[x] = varNumbersUsedSoFar*2
lines.append(str(varNumbersUsedSoFar*2))
# Put some special latches later
latchOutputIsAssumptionsAlreadyViolated = varNumbersUsedSoFar*2+2
gateNumberAssumptionFailure = varNumbersUsedSoFar*2+4
gateNumberGuaranteeFailure = varNumbersUsedSoFar*2+6
varNumbersUsedSoFar += 3
# Declare one "GR(1) semantics" latch here.
lines.append(str(latchOutputIsNotFirstRound)+" 1")
# Prepare latches for the last Input/Output. Also register the latches in the "varibles" dictionary so
# that they can be used in "recurseMakeAIGEncodingOfBooleanFormula"
oldValueLatches = {}
# Also create variable array with pre- and post-bits
variablesInTrans = {}
for cat in ["[INPUT]","[OUTPUT]"]:
for x in categories[cat]:
varNumbersUsedSoFar += 1
oldValueLatches[x] = varNumbersUsedSoFar*2
variablesInTrans[x] = varNumbersUsedSoFar*2
variablesInTrans[x+"'"] = variables[x]
lines.append(str(varNumbersUsedSoFar*2)+" "+str(variables[x]))
# Put one special latch definition here
lines.append(str(latchOutputIsAssumptionsAlreadyViolated)+" "+str(gateNumberAssumptionFailure ^ 1))
# Now the output lines
lines.append(str(gateNumberGuaranteeFailure))
# Compute the safety init assumptions
initAssuptions = "& "*(len(categories["[ENV_INIT]"])-1)+" ".join(categories["[ENV_INIT]"])
(restOfFormula,resultingGateNumberEnvInit,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(initAssuptions,variables,varNumbersUsedSoFar,nofGates,[],lines)
assert len(restOfFormula)==0
# Compute the safety trans assumptions
transAssuptions = "& "*(len(categories["[ENV_TRANS]"])-1)+" ".join(categories["[ENV_TRANS]"])
(restOfFormula,resultingGateNumberEnvTrans,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(transAssuptions,variablesInTrans,varNumbersUsedSoFar,nofGates,[],lines)
assert len(restOfFormula)==0
# Update assumption violations - note that "gateNumberAssumptionFailure" should get the negation
lines.append(str(gateNumberAssumptionFailure)+" "+str(latchOutputIsAssumptionsAlreadyViolated ^ 1)+" "+str(varNumbersUsedSoFar*2+2))
lines.append(str(varNumbersUsedSoFar*2+2)+" "+str(varNumbersUsedSoFar*2+5)+" "+str(varNumbersUsedSoFar*2+7))
lines.append(str(varNumbersUsedSoFar*2+4)+" "+str(resultingGateNumberEnvInit ^ 1)+" "+str(latchOutputIsNotFirstRound ^ 1))
lines.append(str(varNumbersUsedSoFar*2+6)+" "+str(resultingGateNumberEnvTrans ^ 1)+" "+str(latchOutputIsNotFirstRound))
nofGates += 4
varNumbersUsedSoFar += 3
# Compute the safety init guarantees
initGuarantees = "& "*(len(categories["[SYS_INIT]"])-1)+" ".join(categories["[SYS_INIT]"])
(restOfFormula,resultingGateNumberSysInit,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(initGuarantees,variables,varNumbersUsedSoFar,nofGates,[],lines)
assert len(restOfFormula)==0
# Compute the safety trans guarantees
transGuarantees = "& "*(len(categories["[SYS_TRANS]"])-1)+" ".join(categories["[SYS_TRANS]"])
(restOfFormula,resultingGateNumberSysTrans,varNumbersUsedSoFar,nofGates) = recurseMakeAIGEncodingOfBooleanFormula(transGuarantees,variablesInTrans,varNumbersUsedSoFar,nofGates,[],lines)
assert len(restOfFormula)==0
# Detect guarantee violations - exclude cases in which an assumption has already been violated
lines.append(str(gateNumberGuaranteeFailure)+" "+str(gateNumberAssumptionFailure)+" "+str(varNumbersUsedSoFar*2+3))
lines.append(str(varNumbersUsedSoFar*2+2)+" "+str(varNumbersUsedSoFar*2+5)+" "+str(varNumbersUsedSoFar*2+7))
lines.append(str(varNumbersUsedSoFar*2+4)+" "+str(resultingGateNumberSysInit ^ 1)+" "+str(latchOutputIsNotFirstRound ^ 1))
lines.append(str(varNumbersUsedSoFar*2+6)+" "+str(resultingGateNumberSysTrans ^ 1)+" "+str(latchOutputIsNotFirstRound))
nofGates += 4
varNumbersUsedSoFar += 3
# Print header and payload
print "aag",str(varNumbersUsedSoFar*2),str(len(variables)),str(len(variables)+2),"1",str(nofGates)
for line in lines:
print line
# Print symbols
inNumberSoFar = 0
for cat,prefix in [("[INPUT]",""),("[OUTPUT]","controllable_")]:
for x in categories[cat]:
print "i"+str(inNumberSoFar)+" "+prefix+x
inNumberSoFar += 1
print "l0 IsNotFirstRound"
inNumberSoFar = 1
for cat in ["[INPUT]","[OUTPUT]"]:
for x in categories[cat]:
print "l"+str(inNumberSoFar)+" prev_"+x
inNumberSoFar += 1
print "l"+str(inNumberSoFar)+" AssumptionsAlreadyViolated"
print "o0 err"
print "c"
print "This input file has been automatically translated by 'translateSafetySlugsSpecToSyntCompAIGFormat.py' from the following input file:"
for line in slugsLines:
if len(line.strip())>0:
sys.stdout.write(line)
print ""
# =====================================================
# Run as main program
# =====================================================
if __name__ == "__main__":
# Take random seed from the terminal if it exists, otherwise make up one
if len(sys.argv)>1:
slugsFile = sys.argv[1]
else:
print >>sys.stderr,"Error: Expected slugs file name as input."
sys.exit(1)
produceAIGFileFromSlugsFile(slugsFile)
|
bsd-3-clause
|
gvb/odoo
|
addons/purchase_analytic_plans/purchase_analytic_plans.py
|
378
|
1667
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class purchase_order_line(osv.osv):
_name='purchase.order.line'
_inherit='purchase.order.line'
_columns = {
'analytics_id':fields.many2one('account.analytic.plan.instance','Analytic Distribution'),
}
class purchase_order(osv.osv):
_name='purchase.order'
_inherit='purchase.order'
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
res = super(purchase_order, self)._prepare_inv_line(cr, uid, account_id, order_line, context=context)
res['analytics_id'] = order_line.analytics_id.id
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ryfeus/lambda-packs
|
Tensorflow/source/tensorflow/contrib/nn/python/ops/cross_entropy.py
|
104
|
7764
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deprecated Wrappers for Neural Net (NN) Cross Entropy Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import nn
# TODO(b/33392402): Formally deprecate this API.
# After LSC (see b/33392402#comment1), this API will be deprecated and callers
# will be suggested to use the (updated version of)
# tf.nn.softmax_cross_entropy_with_logits.
def deprecated_flipped_softmax_cross_entropy_with_logits(logits,
labels,
dim=-1,
name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
This function diffs from tf.nn.softmax_cross_entropy_with_logits only in the
argument order.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
`logits` and `labels` must have the same shape `[batch_size, num_classes]`
and the same dtype (either `float16`, `float32`, or `float64`).
Args:
logits: Unscaled log probabilities.
labels: Each row `labels[i]` must be a valid probability distribution.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
softmax cross entropy loss.
"""
return nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits, dim=dim, name=name)
# TODO(b/33392402): Formally deprecate this API.
# After LSC (see b/33392402#comment1), this API will be deprecated and callers
# will be suggested to use the (updated version of)
# tf.nn.sparse_softmax_cross_entropy_with_logits.
def deprecated_flipped_sparse_softmax_cross_entropy_with_logits(logits,
labels,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
This function diffs from tf.nn.sparse_softmax_cross_entropy_with_logits only
in the argument order.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a softmax
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape `[batch_size, num_classes]` and
labels of shape `[batch_size]`. But higher dimensions are supported.
Args:
logits: Unscaled log probabilities of rank `r` and shape
`[d_0, d_1, ..., d_{r-2}, num_classes]` and dtype `float32` or `float64`.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-2}]` and dtype `int32` or
`int64`. Each entry in `labels` must be an index in `[0, num_classes)`.
Other values will raise an exception when this op is run on CPU, and
return `NaN` for corresponding corresponding loss and gradient rows
on GPU.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the labels minus one.
"""
return nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
# TODO(b/33392402): Formally deprecate this API.
# After LSC (see b/33392402#comment1), this API will be deprecated and callers
# will be suggested to use the (updated version of)
# tf.nn.sigmoid_cross_entropy_with_logits.
def deprecated_flipped_sigmoid_cross_entropy_with_logits(logits,
targets,
name=None):
"""Computes sigmoid cross entropy given `logits`.
This function diffs from tf.nn.sigmoid_cross_entropy_with_logits only in the
argument order.
Measures the probability error in discrete classification tasks in which each
class is independent and not mutually exclusive. For instance, one could
perform multilabel classification where a picture can contain both an elephant
and a dog at the same time.
For brevity, let `x = logits`, `z = targets`. The logistic loss is
z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + log(1 + exp(-x))
= x - x * z + log(1 + exp(-x))
For x < 0, to avoid overflow in exp(-x), we reformulate the above
x - x * z + log(1 + exp(-x))
= log(exp(x)) - x * z + log(1 + exp(-x))
= - x * z + log(1 + exp(x))
Hence, to ensure stability and avoid overflow, the implementation uses this
equivalent formulation
max(x, 0) - x * z + log(1 + exp(-abs(x)))
`logits` and `targets` must have the same type and shape.
Args:
logits: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `logits`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
Raises:
ValueError: If `logits` and `targets` do not have the same shape.
"""
return nn.sigmoid_cross_entropy_with_logits(
labels=targets, logits=logits, name=name)
|
mit
|
blaiseli/p4-phylogenetics
|
p4/nexussets.py
|
1
|
69575
|
import os
import sys
import string
import array
import types
import copy
from var import var
# Don't bother with NexusToken2, cuz sets blocks are small
from nexustoken import nexusSkipPastNextSemiColon, safeNextTok
import func
from p4exceptions import P4Error
# [Examples from the paup manual,
# but note the bad charpartition subset names '1' and '2'. P4 would not allow those names.]
# charset coding = 2-457 660-896;
# charset noncoding = 1 458-659 897-898;
# charpartition gfunc = 1:coding, 2:noncoding;
# Notes from MadSwofMad97.
# TaxSet taxset-name [({Standard | Vector})] = taxon-set; # standard is default
# TaxPartition partition-name [([{[No]Tokens}] # tokens is default
# [{standard|vector}])] # standard is default
# = subset-name:taxon-set [, subset-name:taxon-set...];
# eg TaxSet outgroup=1-4;
# TaxSet beetles=Omma-.;
#
# taxpartition populations=1:1-3, 2:4-6, 3:7 8; # note bad taxpartition names 1, 2, 3
# taxpartition populations (vector notokens) = 11122233;
#
class CaseInsensitiveDict(dict):
"""A dictionary that is case insensitive, for Nexus"""
def __init__(self, default=None):
dict.__init__(self)
self.default = default
#self.keyDict = {}
def __setitem__(self, key, val):
if type(key) != types.StringType:
gm = ["CaseInsensitiveDict()"]
gm.append("The key must be a string. Got '%s'" % key)
raise P4Error(gm)
lowKey = string.lower(key)
dict.__setitem__(self, lowKey, val)
#self.keyDict[string.lower(key)] = key
def __getitem__(self, key):
if type(key) != types.StringType:
gm = ["CaseInsensitiveDict()"]
gm.append("The key must be a string. Got '%s'" % key)
raise P4Error(gm)
lowKey = string.lower(key)
try:
return dict.__getitem__(self, lowKey)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
#########################################################################
# CLASS NexusSets
#########################################################################
class NexusSets(object):
"""A container for Nexus CharSet, CharPartition, and TaxSet objects.
When the first Nexus sets block is read, a NexusSets object is
made and saved as ``var.nexusSets``. ``CharSet``, ``TaxSet``, and
``CharPartition`` objects are placed in it, as they are
read/created. TaxPartition commands are not implemented. Here is
a simple nexus sets block that only has charsets::
#nexus
begin sets;
charset pos1 = 1-.\\3;
charset pos2 = 2-.\\3;
charset pos3 = 3-.\\3;
end;
To get the third positions only, you could say::
read('myAlignment.phy')
a = var.alignments[0]
read('mySets.nex') # the sets block above
b = a.subsetUsingCharSet('pos3')
What happens above when the mySets.nex file is read is that a
NexusSets object is created as ``var.nexusSets`` and populated
with the three charsets as CharSet objects. Then when you asked
for a subset, a copy of that NexusSets object was made and applied
to the alignment.
Notice that the length of the alignment is not part of the
information in the sets block, and so things remain undefined
in ``var.nexusSets`` until the nexus sets are applied to a
particular alignment. One consequence of this somewhat awkward
system is that the same charsets could then be applied to another
alignment of a different size::
read('myAlignment.phy')
aA = var.alignments[0]
read('anotherAlignment.nex')
aB = var.alignments[1]
read('mySets.nex') # the sets block above
bA = aA.subsetUsingCharSet('pos3')
bB = aB.subsetUsingCharSet('pos3')
In the above example, ``bA.nexusSets`` and ``bB.nexusSets`` are
both derived from ``var.nexusSets`` but are independent of it, and
different from each other.
So when an Alignment (or Tree object) wants to use ``var.nexusSets``, it
makes a copy of it, and attaches the copy as
theAlignment.nexusSets or theTree.nexusSets
Here is another example, including a ``charPartition`` definition::
begin sets;
charset gene1 = 1-213;
charset gene2 = 214-497;
charPartition cpName = gene1:gene1, gene2:gene2;
end;
For an alignment, you can then set a **character partition** by ::
a.setCharPartition(cpName)
Do this *before* you make a Data object, to partition the alignment.
You can also use charsets to extract subsets, eg via::
b = a.subsetUsingCharSet(csName)
Setting a charPartition or asking for a subset will trigger
applying ``var.nexusSets`` to the alignment, but you can also do
it explicitly, by::
myTree.setNexusSets()
NexusSets knows about predefined 'constant', 'gapped', and
'remainder' charsets. It does not know about 'missambig' or
'uninf' charsets.
NexusSets can either be in the default standard format or in
vector format -- you can change them to vector format with the ::
mySet.vectorize()
method, and you can change them to standard format with the ::
mySet.standardize()
method. For taxSets, you can use actual tax names (rather than
numbers or ranges) by invoking the method::
myTaxSet.setUseTaxNames()
which sets the attribute 'useTaxNames' to True, and puts the
taxNames for the taxSet in the ::
taxSet.taxNames
list, which might be handy.
You can see the current state of a NexusSets object using ::
myNexusSets.dump()
It can also be written out as a nexus sets block. If an Alignment object
has a ``nexusSets`` attribute then if you ask the alignment to write
itself to a nexus file then the Alignment.nexusSets is also
written. If you would rather it not be written, delete it first.
If you would rather it be written to a separate file, do that
first and then delete it.
One nice thing about taxsets is that :meth:`Tree.Tree.tv` and
:meth:`Tree.Tree.btv` know about them and can display them.
"""
def __init__(self):
self.charSets = []
self.charSetsDict = CaseInsensitiveDict()
self.charSetLowNames = []
self.taxSets = []
self.taxSetsDict = CaseInsensitiveDict()
self.taxSetLowNames = []
self.charPartitions = []
self.charPartitionsDict = CaseInsensitiveDict()
self.charPartitionLowNames = []
self.charPartition = None
#self.alignment = None
self.aligNChar = None
self.taxNames = []
self.nTax = None
self.predefinedCharSetLowNames = ['constant', 'gapped']
# The nexus format defines several "predefined" charSets.
# For all datatypes:
# constant
# gapped
# missambig
# remainder
# uninf
# I only have implemented 2-- constant and gapped. The
# 'remainder' charSet is handled by p4, but not as a CharSet
# object, since its content depends on the context.
cS = CharSet(self)
cS.num = -1
cS.name = 'constant'
cS.lowName = 'constant'
cS.format = 'vector'
# self.charSets.append(cS)
self.constant = cS
self.charSetsDict['constant'] = self.constant
cS = CharSet(self)
cS.num = -1
cS.name = 'gapped'
cS.lowName = 'gapped'
cS.format = 'vector'
# self.charSets.append(cS)
self.gapped = cS
self.charSetsDict['gapped'] = self.gapped
def _continueReadingFromNexusFile(self, flob):
gm = ['NexusSets._continueReadingFromNexusFile()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
if 0:
print gm[0]
print ' var.nexus_doFastNextTok = %s' % var.nexus_doFastNextTok
nexusSkipPastNextSemiColon(flob)
commandName = safeNextTok(flob, gm[0])
lowCommandName = string.lower(commandName)
# print 'got lowCommandName = %s' % lowCommandName
while lowCommandName not in [None, 'end', 'endblock']:
# print "Got lowCommandName '%s'" % lowCommandName
if lowCommandName == 'charset':
self._readCharSetCommand(flob)
elif lowCommandName == 'charpartition':
self._readCharPartitionCommand(flob)
elif lowCommandName == 'taxset':
self._readTaxSetCommand(flob)
elif lowCommandName == 'taxpartition':
print
print gm[0]
if len(gm) > 1:
print gm[1]
print " Sorry-- taxpartition is not implemented."
nexusSkipPastNextSemiColon(flob)
else:
gm.append("Got unrecognized sets block command '%s'" %
commandName)
raise P4Error(gm)
commandName = safeNextTok(
flob, 'NexusSets.continueReadingFromNexusFile()')
lowCommandName = string.lower(commandName)
def _readCharSetCommand(self, flob):
# We have just read 'charset'. The next thing we expect is the charset
# name.
gm = ['NexusSets._readCharSetCommand()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
name = func.nexusUnquoteName(
safeNextTok(flob, 'NexusSets: _readCharSetCommand'))
# print "readCharSetCommand: got name '%s'" % name
lowName = string.lower(name)
if not func.nexusCheckName(lowName):
gm.append("Bad charSet name '%s'" % name)
raise P4Error(gm)
# Check for duped names
if lowName in self.charSetLowNames:
gm.append("Duplicated charSet name '%s'" % name)
raise P4Error(gm)
elif lowName in self.predefinedCharSetLowNames:
gm.append(
"You cannot use the name '%s' -- it is predefined." % name)
raise P4Error(gm)
cs = CharSet(self)
cs.name = name
cs.lowName = lowName
cs.readTaxOrCharSetDefinition(flob)
cs.num = len(self.charSets)
self.charSets.append(cs)
self.charSetsDict[name] = cs
self.charSetLowNames.append(cs.lowName)
def _readTaxSetCommand(self, flob):
# We have just read 'taxset'. The next thing we expect is the taxset
# name.
gm = ['NexusSets._readTaxSetCommand()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
name = func.nexusUnquoteName(
safeNextTok(flob, 'NexusSets: readTaxSetCommand'))
# print "readTaxSetCommand: got name '%s'" % name
lowName = string.lower(name)
if not func.nexusCheckName(lowName):
gm.append("Bad taxSet name '%s'" % name)
raise P4Error(gm)
# Check for duped names
if lowName in self.taxSetLowNames:
gm.append("Duplicated taxSet name '%s'" % name)
raise P4Error(gm)
ts = TaxSet(self)
ts.name = name
ts.lowName = lowName
ts.readTaxOrCharSetDefinition(flob)
ts.num = len(self.taxSets)
self.taxSets.append(ts)
self.taxSetsDict[name] = ts
self.taxSetLowNames.append(ts.lowName)
def _readCharPartitionCommand(self, flob):
gm = ['NexusSets._readCharPartitionCommand()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
name = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
# print "readCharPartitionCommand: got name '%s'" % name
lowName = string.lower(name)
if not func.nexusCheckName(lowName):
gm.append("Bad charPartition name '%s'" % name)
if lowName in self.charPartitionLowNames:
gm.append("Duplicated charPartition name '%s'" % name)
raise P4Error(gm)
cp = CharPartition(self)
cp.name = name
cp.lowName = lowName
cp._readCharPartitionDefinition(flob)
self.charPartitions.append(cp)
self.charPartitionsDict[name] = cp
self.charPartitionLowNames.append(cp.lowName)
def dump(self):
print " NexusSets dump"
if self.constant:
print " Predefined char set 'constant'"
self.constant.dump()
if self.gapped:
print " Predefined char set 'gapped'"
self.gapped.dump()
print " There are %i non-predefined char sets" % len(self.charSets)
for cs in self.charSets:
cs.dump()
print " There are %i tax sets" % len(self.taxSets)
for ts in self.taxSets:
ts.dump()
print " There are %i char partitions" % len(self.charPartitions)
for cp in self.charPartitions:
cp.dump()
if self.charPartition:
print " self.charPartition.name is %s" % func.nexusFixNameIfQuotesAreNeeded(self.charPartition.name)
else:
print " There is no self.charPartition"
def write(self):
"""Write self in Nexus format to stdout."""
self.writeNexusToOpenFile(sys.stdout)
def writeNexus(self, fName=None):
"""Write self in Nexus format to stdout or a file."""
if fName:
f = file(fName, 'w')
else:
f = sys.stdout
f.write('#nexus\n\n')
self.writeNexusToOpenFile(f)
if fName:
f.close()
def writeNexusToOpenFile(self, flob):
"""This only writes non-trivial stuff.
Ie if self has only constant and gapped charsets, then it does
not write anything."""
if self.charSets or self.charPartitions or self.taxSets:
flob.write('begin sets;\n')
for cs in self.charSets:
cs.writeNexusToOpenFile(flob)
for cp in self.charPartitions:
cp.writeNexusToOpenFile(flob)
for ts in self.taxSets:
ts.writeNexusToOpenFile(flob)
flob.write('end;\n\n')
def newCharSet(self, name, mask=None):
cs = CharSet(self)
cs.name = name
cs.name = name.lower()
cs.num = len(self.charSets)
if mask:
cs.format = 'vector'
cs.mask = mask
else:
pass
self.charSets.append(cs)
self.charSetsDict[cs.name] = cs
def dupeCharSet(self, existingCharSetName, newName):
theCS = self.charSetsDict.get(existingCharSetName)
if not theCS:
raise P4Error(
"NexusSets.dupeCharSet() -- can't find char set '%s'" % existingCharSetName)
cs = CharSet(self)
cs.name = newName
cs.name = newName.lower()
cs.num = len(self.charSets)
self.charSets.append(cs)
self.charSetsDict[cs.name] = cs
cs.format = theCS.format
cs.triplets = copy.deepcopy(theCS.triplets) # its a list of lists
cs.tokens = theCS.tokens[:]
cs.mask = theCS.mask
cs.aligNChar = theCS.aligNChar
class TaxOrCharSet(object):
def __init__(self, theNexusSets):
self.nexusSets = theNexusSets
self.num = -1
self.name = None
self.lowName = None
self._format = 'standard' # or 'vector' So it should be a property.
self.triplets = []
self.tokens = []
self.mask = None
self.className = 'TaxOrCharSet'
self.lowTaxNames = []
self.taxNames = []
self.useTaxNames = None # undecided
def _getFormat(self):
return self._format
def _setFormat(self, newFormat):
assert newFormat in ['standard', 'vector']
self._format = newFormat
format = property(_getFormat, _setFormat)
def dump(self):
print " %s %i" % (self.className, self.num)
print " name: %s" % self.name
if hasattr(self, 'aligNChar'):
print " aligNChar: %s" % self.aligNChar
print " format: %s" % self.format
if hasattr(self, 'useTaxNames'):
print " useTaxNames: %s" % self.useTaxNames
print " triplets: "
for t in self.triplets:
print " %s" % t
if hasattr(self, 'numberTriplets'):
print " numberTriplets: "
for t in self.numberTriplets:
print " %s" % t
print " tokens: %s" % self.tokens
print " mask: %s" % self.mask
if self.mask:
print " mask 1s-count: %s" % self.mask.count('1')
def readTaxOrCharSetDefinition(self, flob):
gm = ['%s.readTaxSetDefinition()' % self.className]
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
tok = safeNextTok(flob, gm[0])
lowTok = string.lower(tok)
# print "readTaxSetDefinition: get tok '%s'" % tok
if lowTok == '=':
pass
elif lowTok == '(':
#['standard', 'vector']:
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
if lowTok == 'standard':
pass
elif lowTok == 'vector':
self.format = 'vector'
else:
gm.append("Unexpected '%s'" % tok)
gm.append("(I was expecting either 'standard' or")
gm.append("'vector' following the parenthesis.)")
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
if tok == ')':
pass
else:
gm.append("Unexpected '%s'" % tok)
gm.append(
"(I was expecting an unparentheis after '%s')" % self.format)
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
if tok != '=':
gm.append("Unexpected '%s'" % tok)
gm.append("I was expecting an '=' after '(%s)'" % self.format)
raise P4Error(gm)
else:
gm.append("Unexpected '%s'" % tok)
raise P4Error(gm)
# Now we are on the other side of the '='
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok not in [None, ';', 'end', 'endblock']:
self.tokens.append(tok)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
if self.format == 'vector':
self.mask = string.join(self.tokens, '')
self.tokens = []
for i in range(len(self.mask)):
if self.mask[i] not in ['0', '1']:
gm.append("%s '%s', vector format" %
(self.className, self.name))
gm.append("The vector must be all zeros or ones.")
raise P4Error(gm)
# print self.mask
# do a once-over sanity check, and convert integer strings to ints
# print "xx1 self.tokens is now %s" % self.tokens
for tokNum in range(len(self.tokens)):
tok = self.tokens[tokNum]
lowTok = string.lower(tok)
if lowTok in ['.', 'all', '-', '\\']:
pass
elif self.className == 'CharSet' and lowTok in self.nexusSets.charSetLowNames:
# print " xx3 %s is an existing charSet" % tok
pass
elif self.className == 'CharSet' and lowTok in self.nexusSets.predefinedCharSetLowNames:
# print " xx3 %s is a pre-defined charSet" % tok
pass
elif self.className == 'TaxSet' and lowTok in self.nexusSets.taxSetLowNames:
# print " xx4 %s is an existing taxSet" % tok
pass
else:
# print " xx5"
try:
intTok = int(tok)
self.tokens[tokNum] = intTok
except ValueError:
if self.className == 'TaxSet':
pass
elif self.className == 'CharSet':
gm.append("I don't understand the token '%s'" % tok)
raise P4Error(gm)
# Now I want to make a list of triplets representing eg 23-87\3
# first item = 23, second item = 87, third = 3
# not all will exist for each part of the char definition.
tokNum = 0
self.triplets = []
while tokNum < len(self.tokens):
tok = self.tokens[tokNum]
# print "Considering tok[%i] '%s'" % (tokNum, tok)
if type(tok) == type('str'):
lowTok = string.lower(tok)
else:
lowTok = None
if self.className == 'TaxSet' and lowTok in self.nexusSets.taxSetLowNames or \
self.className == 'charSet' and lowTok in self.nexusSets.charSetLowNames:
aTriplet = [tok, None, None]
self.triplets.append(aTriplet)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"An existing tax or char set may not be followed by a '-'")
raise P4Error(gm)
if self.tokens[tokNum] == '\\':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"An existing tax or char set may not be followed by a '\\'")
raise P4Error(gm)
elif tok == 'all':
aTriplet = [tok, None, None]
self.triplets.append(aTriplet)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set 'all' may not be followed by a '-'")
raise P4Error(gm)
if self.tokens[tokNum] == '\\':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set 'all' may not be followed by a '\\'")
raise P4Error(gm)
elif tok == '-':
gm.append("%s '%s' definition" % (self.className, self.name))
gm.append("Out of place '-'")
raise P4Error(gm)
elif tok == '\\':
gm.append("%s '%s' definition" % (self.className, self.name))
gm.append("Out of place '\\'")
raise P4Error(gm)
elif tok == '.':
aTriplet = [tok, None, None]
self.triplets.append(aTriplet)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set '.' may not be followed by a '-'")
raise P4Error(gm)
if self.tokens[tokNum] == '\\':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set '.' may not be followed by a '\\'")
raise P4Error(gm)
elif type(tok) == type(1) or type(tok) == type('str'):
aTriplet = [tok, None, None]
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
tokNum += 1
if tokNum < len(self.tokens):
# maybe '.'
if type(self.tokens[tokNum]) == type('str'):
aTriplet[1] = self.tokens[tokNum]
elif type(self.tokens[tokNum]) == type(1):
if type(aTriplet[0]) == type(1):
if self.tokens[tokNum] > aTriplet[0]:
aTriplet[1] = self.tokens[tokNum]
else:
gm.append(
"%s '%s' definition" % (self.className, self.name))
gm.append(
"If a range is defined by two numbers,")
# gm.append("(as it appears to be -- %s %s %s)" % (
# aTriplet[0], aTriplet[1],
# aTriplet[2]))
gm.append(
"the second number of a range must be bigger than")
gm.append("the first.")
raise P4Error(gm)
else:
aTriplet[1] = self.tokens[tokNum]
else:
raise P4Error(gm)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '\\':
tokNum += 1
if tokNum < len(self.tokens):
if type(self.tokens[tokNum]) == type(1):
aTriplet[2] = self.tokens[tokNum]
else:
gm.append(
"%s '%s' definition" % (self.className, self.name))
gm.append(
"Step value of a range must be a number")
gm.append("(Got '%s')" %
self.tokens[tokNum])
raise P4Error(gm)
tokNum += 1
self.triplets.append(aTriplet)
# print "xxy self.mask = %s" % self.mask
if not self.triplets and not self.mask:
if not var.allowEmptyCharSetsAndTaxSets:
gm.append("%s '%s' definition" % (self.className, self.name))
gm.append("Got no definition (no triplets or mask)")
gm.append("(Allow this by turning var.allowEmptyCharSetsAndTaxSets on)")
raise P4Error(gm)
if 0:
print gm[0]
print " Got self.triplets %s" % self.triplets
def setMask(self):
"""Set self.mask."""
gm = ["%s.setMask() name='%s'" % (self.className, self.name)]
if self.format == 'vector':
if self.mask:
pass
else:
gm.append("vector format, but no mask?")
raise P4Error(gm)
elif self.format == 'standard':
if 0:
print gm[0]
self.dump()
if not len(self.triplets):
if not var.allowEmptyCharSetsAndTaxSets:
gm.append(
"standard format, but we have no triplets? - no definition?")
gm.append("(Allow this by turning var.allowEmptyCharSetsAndTaxSets on.)")
raise P4Error(gm)
if self.className == 'CharSet':
thisMaskLen = self.aligNChar
existingSetNames = self.nexusSets.charSetLowNames
existingSets = self.nexusSets.charSets
theTriplets = self.triplets
elif self.className == 'TaxSet':
thisMaskLen = self.nexusSets.nTax
existingSetNames = self.nexusSets.taxSetLowNames
existingSets = self.nexusSets.taxSets
theTriplets = self.numberTriplets
mask = array.array('c', thisMaskLen * '0')
for aTriplet in theTriplets:
if 0:
print gm[0]
print " '%s' aTriplet=%s" % (self.name, aTriplet)
first = aTriplet[0]
second = aTriplet[1]
third = aTriplet[2]
lowFirst = None
lowSecond = None
if type(first) == type('str'):
lowFirst = string.lower(first)
if type(second) == type('str'):
lowSecond = string.lower(second)
# its a single, or an existing set, not a range
if first and not second:
if lowFirst:
if lowFirst == 'all':
for i in range(thisMaskLen):
mask[i] = '1'
if lowFirst in existingSetNames:
for aSet in existingSets:
if lowFirst == aSet.lowName:
if not aSet.mask:
aSet.setMask()
for j in range(thisMaskLen):
if aSet.mask[j] == '1':
mask[j] = '1'
# Maybe its a predefined charset --- constant or gapped
elif self.className == 'CharSet' and lowFirst in self.nexusSets.predefinedCharSetLowNames:
aSet = None
if lowFirst == 'constant':
aSet = self.nexusSets.constant
elif lowFirst == 'gapped':
aSet = self.nexusSets.gapped
assert aSet
for j in range(thisMaskLen):
if aSet.mask[j] == '1':
mask[j] = '1'
else:
gm.append("I don't know '%s'" % first)
raise P4Error(gm)
elif first == '.':
mask[-1] = '1'
elif type(first) == type(1):
if first > 0 and first <= thisMaskLen:
mask[first - 1] = '1'
else:
# This will have been checked before.
gm.append(
"Component '%s' is out of range of mask len (%s)" % (first, thisMask))
raise P4Error(gm)
elif first and second:
# Its a range.
start = int(first)
if second == '.':
fin = len(mask)
else:
fin = int(second)
if third:
bystep = int(third)
# print "mask len %i, start-1 %i, fin %i, bystep %i" %
# (len(mask), (start-1), fin, bystep)
for spot in range(start - 1, fin, bystep):
mask[spot] = '1'
else:
for spot in range(start - 1, fin):
mask[spot] = '1'
# print " finished incorporating triplet %s into
# '%s' mask." % (aTriplet, self.name)
mask = mask.tostring()
# print "Got char set '%s' mask '%s'" % (self.name, mask)
self.mask = mask
def invertMask(self):
"""Change zeros to ones, and non-zeros to zero."""
gm = ['%s.invertMask()' % self.className]
if not self.mask:
self.dump()
gm.append("The charset has no mask")
raise P4Error(gm)
self.mask = list(self.mask)
for i in range(len(self.mask)):
if self.mask[i] == '0':
self.mask[i] = '1'
else:
self.mask[i] = '0'
self.mask = string.join(self.mask, '')
def write(self):
"""Write self in Nexus format to stdout."""
self.writeNexusToOpenFile(sys.stdout)
def writeNexus(self):
"""Write self in Nexus format to stdout."""
self.writeNexusToOpenFile(sys.stdout)
def writeNexusToOpenFile(self, flob):
if self.className == 'CharSet':
theSetName = 'charSet'
else:
theSetName = 'taxSet'
if self.format == 'standard':
flob.write(' %s %s =' % (theSetName, self.name))
if self.useTaxNames:
for tN in self.taxNames:
flob.write(" %s" % func.nexusFixNameIfQuotesAreNeeded(tN))
else:
# for i in self.tokens:
# flob.write(' %s' % i)
previousTok = None
for theTok in self.tokens:
if type(theTok) == types.StringType:
if theTok not in ['-', '\\']:
tok = func.nexusFixNameIfQuotesAreNeeded(theTok)
else:
tok = theTok
else:
tok = theTok
if previousTok != None:
# tokens will be either ints or strings
previousType = type(previousTok)
# print "previousTok = %s, previousType = %s" %
# (previousTok, previousType)
# usually put in a space
if type(tok) == previousType:
# except in this case
if tok in ['-'] or previousTok in ['-']:
flob.write('%s' % tok)
else:
flob.write(' %s' % tok)
else: # usually no space
if tok in ['-'] or previousTok in ['-']:
flob.write('%s' % tok)
else: # except in this case
flob.write(' %s' % tok)
previousTok = tok
# print "previousTok = %s, previousType = %s" %
# (previousTok, previousType)
else:
flob.write(' %s' % tok)
previousTok = tok
flob.write(';\n')
elif self.format == 'vector':
flob.write(' %s %s (vector) = ' % (theSetName, self.name))
flob.write('%s;\n' % self.mask)
def vectorize(self):
if self.format == 'vector':
return
if not self.mask:
self.setMask()
#self.triplets = []
#self.tokens = []
self.format = 'vector'
def standardize(self):
if self.format == 'standard':
return
self.triplets = []
self.tokens = []
thisTriplet = []
for mPos in range(len(self.mask)):
# print "mPos=%i mask=%s thisTriplet=%s" % (mPos,
# self.mask[mPos], thisTriplet)
if self.mask[mPos] == '0':
if thisTriplet:
if thisTriplet[0] == mPos:
thisTriplet.append(None)
thisTriplet.append(None)
else:
thisTriplet.append(mPos)
thisTriplet.append(None)
# print " finished triplet -- %s" % thisTriplet
self.triplets.append(thisTriplet)
thisTriplet = []
else:
if thisTriplet:
pass
else:
thisTriplet.append(mPos + 1)
# print " started triplet -- %s" % thisTriplet
if thisTriplet:
if thisTriplet[0] == len(self.mask):
thisTriplet.append(None)
thisTriplet.append(None)
else:
thisTriplet.append(mPos + 1)
thisTriplet.append(None)
# print " finished last triplet -- %s" % thisTriplet
self.triplets.append(thisTriplet)
# print self.triplets
for triplet in self.triplets:
if triplet[1] == None:
self.tokens.append(triplet[0])
else:
self.tokens.append(triplet[0])
self.tokens.append('-')
self.tokens.append(triplet[1])
self.format = 'standard'
# self.dump()
class CharSet(TaxOrCharSet):
def __init__(self, theNexusSets):
TaxOrCharSet.__init__(self, theNexusSets)
self.className = 'CharSet'
self.aligNChar = None
def getNChar(self):
self.setMask()
return self.mask.count('1')
def setAligNChar(self, aligNChar):
gm = ['CharSet.setAligNChar()']
# print "CharSet name=%s, format=%s, aligNChar=%i" % (self.name,
# self.format, aligNChar)
self.aligNChar = aligNChar
if self.format == 'standard':
for aTriplet in self.triplets:
first = aTriplet[0]
second = aTriplet[1]
third = aTriplet[2]
if first and not second: # its a single
if type(first) == type(1):
if first > 0 and first <= self.aligNChar:
pass
else:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Charset definition element '%s' is out of range" % first)
gm.append("(aligNChar = %i)" % self.aligNChar)
raise P4Error(gm)
pass
elif first and second: # its a range
try:
start = int(first)
except ValueError:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Can't parse definition element '%s'" % first)
raise P4Error(gm)
if second == '.':
fin = self.aligNChar
else:
try:
fin = int(second)
except ValueError:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Can't parse definition element '%s'" % second)
raise P4Error(gm)
if third:
try:
bystep = int(third)
except ValueError:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Can't parse definition element '%s'" % third)
raise P4Error(gm)
elif self.format == 'vector':
# print "charset %s, vector format %s, mask %s" % (self.name,
# self.format, self.mask)
if self.mask:
if len(self.mask) == self.aligNChar:
pass
else:
gm.append("len(self.mask) is %i, but aligNChar is %i" % (
len(self.mask), self.aligNChar))
raise P4Error(gm)
else:
gm.append("bad format %s" % self.format)
raise P4Error(gm)
class TaxSet(TaxOrCharSet):
def __init__(self, theNexusSets):
TaxOrCharSet.__init__(self, theNexusSets)
self.className = 'TaxSet'
self.numberTriplets = []
def setNumberTriplets(self):
gm = ['TaxSet.setNumberTriplets()']
if not self.nexusSets.lowTaxNames:
self.nexusSets.lowTaxNames = [
string.lower(txName) for txName in self.nexusSets.taxNames]
self.numberTriplets = []
# print "self.triplets = %s" % self.triplets
for tr in self.triplets:
# print "setNumberTriplets() tr=%s" % tr
numTr = []
for itemNum in range(2):
trItem = tr[itemNum]
# print " considering '%s'" % trItem
if trItem == None:
numTr.append(trItem)
elif type(trItem) == type(1):
numTr.append(trItem)
elif trItem == '.':
numTr.append(self.nexusSets.nTax)
else:
assert type(trItem) == type('str')
lowTrItem = string.lower(trItem)
if lowTrItem in self.nexusSets.taxSetLowNames:
numTr.append(trItem)
else:
if lowTrItem not in self.nexusSets.lowTaxNames:
gm.append("Triplet %s" % tr)
gm.append(
"'%s' is a string, but not in the taxNames." % trItem)
raise P4Error(gm)
theIndx = self.nexusSets.lowTaxNames.index(lowTrItem)
theIndx += 1
numTr.append(theIndx)
trItem = tr[2]
if trItem == None:
numTr.append(None)
else:
assert type(trItem) == type(1)
numTr.append(trItem)
assert len(numTr) == 3
# print numTr
first = numTr[0]
# first might be a pre-existing taxSet name
if type(first) == type('str'):
pass
else:
second = numTr[1]
assert type(first) == type(1) and first != 0
if type(second) == type(1):
assert second != 0
if second <= first:
gm.append("Triplet %s" % tr)
gm.append("Triplet expressed as numbers. %s" % numTr)
gm.append(
"This appears to be a range, but the second number")
gm.append("is not bigger than the first.")
raise P4Error(gm)
assert second <= self.nexusSets.nTax
assert first <= self.nexusSets.nTax
self.numberTriplets.append(numTr)
def setUseTaxNames(self):
if self.useTaxNames:
return
# if not self.mask:
# self.setMask()
if not self.taxNames:
for pos in range(len(self.mask)):
c = self.mask[pos]
if c == '1':
self.taxNames.append(self.nexusSets.taxNames[pos])
self.useTaxNames = True
class CharPartitionSubset(object):
def __init__(self):
self.name = None
self.lowName = None
self.tokens = []
self.mask = None
self.triplets = []
def dump(self):
print " -- CharPartitionSubset"
print " name: %s" % func.nexusFixNameIfQuotesAreNeeded(self.name)
print " triplets: "
for t in self.triplets:
print " %s" % t
print " tokens: %s" % self.tokens
# for t in self.tokens:
# print " %s" % t
print " mask: %s" % self.mask
def writeNexusToOpenFile(self, flob):
flob.write('%s:' % self.name)
# print self.tokens
# for i in self.tokens:
# flob.write(' %s' % i)
previousTok = None
for i in self.tokens:
if previousTok != None:
# tokens will be either ints or strings
previousType = type(previousTok)
# print "previousTok = %s, previousType = %s" % (previousTok,
# previousType)
if type(i) == previousType: # put in a space
flob.write(' %s' % i)
else: # no space
flob.write('%s' % i)
previousTok = i
else:
flob.write(' %s' % i)
previousTok = i
class CharPartition(object):
def __init__(self, theNexusSets):
self.nexusSets = theNexusSets
self.name = None
self.lowName = None
self.tokens = []
self.subsets = []
def _readCharPartitionDefinition(self, flob):
gm = ['CharPartition._readCharPartitionDefinition()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok != '=':
if lowTok == '(':
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok != ')':
if lowTok in ['notokens', 'vector']:
gm.append("Got charpartition modifier: '%s'" % tok)
gm.append("It is not implemented.")
gm.append(
"Only 'tokens' and 'standard' are implemented.")
raise P4Error(gm)
elif lowTok in ['tokens', 'standard']:
pass
else:
gm.append("Got charpartition modifier: '%s'" % tok)
gm.append("This is not understood.")
gm.append(
"(Only 'tokens' and 'standard' are implemented.)")
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
else:
gm.append("Got unexpected token: '%s'" % tok)
gm.append(
"I was expecting either an '=' or something in parentheses.")
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok not in [None, ';', 'end', 'endblock']:
self.tokens.append(tok)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
# print "_readCharPartitionDefinition: tokens %s" % self.tokens
# Divide into CharPartitionSubset instances
i = 0
while i < len(self.tokens):
aSubset = CharPartitionSubset()
aSubset.name = self.tokens[i]
if not func.nexusCheckName(aSubset.name):
gm.append("CharPartition '%s' definition:" % self.name)
gm.append("Bad subset name (%s, I think)" % aSubset.name)
raise P4Error(gm)
aSubset.lowName = string.lower(aSubset.name)
i += 1
if i >= len(self.tokens):
gm.append("CharPartition '%s' definition:" % self.name)
gm.append(
"Subset name (%s) should be followed by a colon" % aSubset.name)
raise P4Error(gm)
if self.tokens[i] != ':':
gm.append("CharPartition '%s' definition:" % self.name)
gm.append(
"Subset name (%s) should be followed by a colon" % aSubset.name)
raise P4Error(gm)
i += 1
if i >= len(self.tokens):
gm.append("CharPartition '%s' definition:" % self.name)
gm.append(
"Subset name (%s) and colon should be followed" % aSubset.name)
gm.append(
"by a subset definition (charSet or charSet definition)")
raise P4Error(gm)
while i < len(self.tokens) and self.tokens[i] != ',':
aSubset.tokens.append(self.tokens[i])
i += 1
i += 1
self.subsets.append(aSubset)
# do a once-over sanity check,
# check for duplicated names
# and convert integer strings to ints
existingPartNames = []
for aSubset in self.subsets:
# print "Checking charPartitionPart '%s'" % aSubset.name
# print " existingPartNames '%s'" % existingPartNames
if aSubset.lowName in existingPartNames:
gm.append("CharPartition '%s' definition:" % self.name)
gm.append("Duplicated subset name (%s, I think)" %
aSubset.name)
raise P4Error(gm)
existingPartNames.append(aSubset.lowName)
for i in range(len(aSubset.tokens)):
tok = aSubset.tokens[i]
lowTok = string.lower(tok)
# print "considering '%s', ord(lowTok[0])=%i" % (lowTok,
# ord(lowTok[0]))
# Does not pick up '.'!!!!
if lowTok in ['.', 'all', '-', '\\', 'remainder']:
pass
elif lowTok in self.nexusSets.charSetLowNames:
pass
elif lowTok in self.nexusSets.predefinedCharSetLowNames:
pass
else:
# print " lowTok=%s, ord(lowTok[0])=%s, ord('.')=%s" % (
# lowTok, ord(lowTok[0]), ord('.'))
try:
intTok = int(tok)
aSubset.tokens[i] = intTok
except ValueError:
gm.append("CharPartition '%s' definition:" % self.name)
gm.append("Can't understand '%s' in subset '%s' definition" %
(tok, aSubset.name))
gm.append(
"(If you are using read('whatever'), and there are backslashes,")
gm.append(
"are you using raw strings, ie read(r'whatever')?)")
raise P4Error(gm)
def setSubsetMasks(self):
"""Make charParititionSubset.mask's appropriate to the Alignment.
This is called by theAlignment.setCharPartition().
"""
gm = ['CharPartition.setSubsetMasks()']
assert self.nexusSets.aligNChar
# Make a list of triplets representing eg 23-87\3
# first item = 23, second item = 87, third = 3
# Not all will exist for each part of the char definition.
for aSubset in self.subsets:
i = 0
aSubset.triplets = []
while i < len(aSubset.tokens):
tok = aSubset.tokens[i]
if type(tok) == type('string'):
lowTok = string.lower(tok)
else:
lowTok = None
# print "Doing triplets: looking at tok '%s'" % tok
if lowTok and lowTok in self.nexusSets.charSetLowNames or \
lowTok in self.nexusSets.predefinedCharSetLowNames:
aTriplet = [lowTok, None, None]
aSubset.triplets.append(aTriplet)
i += 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"An existing char set may not be followed by a '-'")
raise P4Error(gm)
if aSubset.tokens[i] == '\\':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"An existing char set may not be followed by a '\\'")
raise P4Error(gm)
elif lowTok in ['all', 'remainder']:
aTriplet = [lowTok, None, None]
aSubset.triplets.append(aTriplet)
i += 1
if lowTok == 'remainder' and i < len(aSubset.tokens):
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set 'remainder' must be the last one in the charPartition definition")
raise P4Error(gm)
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '%s' may not be followed by a '-'" % lowTok)
raise P4Error(gm)
if aSubset.tokens[i] == '\\':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '%s' may not be followed by a '\\'" % lowTok)
raise P4Error(gm)
elif tok == '-':
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append("Out of place '-'")
raise P4Error(gm)
elif tok == '\\':
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append("Out of place '\\'")
raise P4Error(gm)
elif tok == '.':
aTriplet = [tok, None, None]
aSubset.triplets.append(aTriplet)
i += 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '.' may not be followed by a '-'")
raise P4Error(gm)
if aSubset.tokens[i] == '\\':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '.' may not be followed by a '\\'")
raise P4Error(gm)
elif type(tok) == type(1):
aTriplet = [tok, None, None]
i = i + 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
i = i + 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '.':
aTriplet[1] = aSubset.tokens[i]
elif type(aSubset.tokens[i]) == type(1):
if aSubset.tokens[i] > aTriplet[0]:
aTriplet[1] = aSubset.tokens[i]
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append(
"Subset '%s' definition" % aSubset.name)
gm.append(
"Second number of a character range must be bigger than")
gm.append("the first.")
raise P4Error(gm)
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append(
"Subset '%s' definition" % aSubset.name)
gm.append(
"Second item of a character range must be either a")
gm.append(
"number or a '.'. I got '%s'" % aSubset.tokens[i])
raise P4Error(gm)
i = i + 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '\\':
i = i + 1
if i < len(aSubset.tokens):
if type(aSubset.tokens[i]) == type(1):
aTriplet[2] = aSubset.tokens[i]
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append(
"Subset '%s' definition" % aSubset.name)
gm.append(
"Step value of a range must be a number")
gm.append(
"(Got '%s')" % aSubset.tokens[i])
raise P4Error(gm)
i = i + 1
aSubset.triplets.append(aTriplet)
else:
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append("token '%s' is not understood." % tok)
raise P4Error(gm)
if 0:
print gm[0]
print "Got aSubset (%s) triplets %s" % (aSubset.name, aSubset.triplets)
# sys.exit()
aSubset.mask = array.array('c', self.nexusSets.aligNChar * '0')
for aTriplet in aSubset.triplets:
# print "setSubsetMasks() Looking at triplet '%s'" % aTriplet
first = aTriplet[0]
second = aTriplet[1]
third = aTriplet[2]
lowFirst = None
lowSecond = None
if type(first) == type('str'):
lowFirst = string.lower(first)
if type(second) == type('str'):
lowSecond = string.lower(second)
if first and not second: # its a single
# print "Got single: %s" % first
if lowFirst == 'all':
for i in range(self.nexusSets.aligNChar):
aSubset.mask[i] = '1'
elif lowFirst in self.nexusSets.predefinedCharSetLowNames:
theCS = None
if lowFirst == 'constant':
theCS = self.nexusSets.constant
elif lowFirst == 'gapped':
theCS = self.nexusSets.gapped
assert theCS
assert theCS.mask
for j in range(self.nexusSets.aligNChar):
if theCS.mask[j] == '1':
aSubset.mask[j] = '1'
elif lowFirst in self.nexusSets.charSetLowNames:
theCS = None
for cs in self.nexusSets.charSets:
if lowFirst == cs.lowName:
theCS = cs
break
assert theCS
assert theCS.mask
for j in range(self.nexusSets.aligNChar):
if theCS.mask[j] == '1':
aSubset.mask[j] = '1'
# Its legit to use this as a single char.
elif first == '.':
aSubset.mask[-1] = '1'
elif type(first) == type(1):
if first > 0 and first <= self.nexusSets.aligNChar:
aSubset.mask[first - 1] = '1'
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Charset definition element '%s' is out of range" % first)
gm.append("(aligNChar = %i)" %
self.nexusSets.aligNChar)
raise P4Error(gm)
elif lowFirst == 'remainder':
# print "Got first == remainder"
for i in range(self.nexusSets.aligNChar):
aSubset.mask[i] = '1'
# print "Got new aSubset.mask = %s" % aSubset.mask
for ss in self.subsets[:-1]:
if ss.mask:
# print "Previous mask: %s" % ss.mask
for j in range(self.nexusSets.aligNChar):
if ss.mask[j] == '1':
aSubset.mask[j] = '0'
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" %
aSubset.name)
gm.append(
"When implementing 'remainder' charset")
gm.append(
"Found that subset '%s' had no mask" % ss)
raise P4Error(gm)
else:
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Charset definition element '%s' is not understood" % first)
raise P4Error(gm)
elif first and second: # its a range
try:
start = int(first)
except ValueError:
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Can't parse definition element '%s'" % first)
raise P4Error(gm)
if second == '.':
fin = len(aSubset.mask)
else:
try:
fin = int(second)
except ValueError:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Can't parse definition element '%s'" % second)
raise P4Error(gm)
if third:
try:
bystep = int(third)
except ValueError:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Can't parse definition element '%s'" % third)
for spot in range(start - 1, fin, bystep):
aSubset.mask[spot] = '1'
else:
for spot in range(start - 1, fin):
aSubset.mask[spot] = '1'
aSubset.mask = aSubset.mask.tostring()
# print "Got char subset '%s' mask '%s'" % (aSubset.name,
# aSubset.mask)
if aSubset.mask.count('1') == 0:
gm.append(
"The mask for charPartitionSubset '%s' is empty." % aSubset.name)
raise P4Error(gm)
def checkForOverlaps(self):
gm = ['CharParitition.checkForOverlaps()']
unspanned = 0
for i in range(self.nexusSets.aligNChar):
sum = 0
for aSubset in self.subsets:
if aSubset.mask[i] == '1':
sum += 1
if sum > 1:
gm.append("Char partition '%s'" % self.name)
gm.append(
"The problem is that there are overlapping subsets in this")
gm.append(
"charpartition. The same position is in more than one subset.")
gm.append(
"Zero-based position %i, one-based position %i." % (i, i + 1))
raise P4Error(gm)
if sum < 1:
unspanned = 1
if unspanned:
gm.append("Char partition '%s'" % self.name)
gm.append("You should be aware that this partition does not span")
gm.append("the entire sequence. Hopefully that is intentional.")
def dump(self):
print " CharPartition: name: %s" % func.nexusFixNameIfQuotesAreNeeded(self.name)
# string.join(self.tokens)
print " tokens: %s" % self.tokens
# for t in self.tokens:
# print " %s" % t
print " number of subsets: %s" % len(self.subsets)
for aSubset in self.subsets:
aSubset.dump()
def writeNexusToOpenFile(self, flob):
flob.write(' charPartition %s = ' % self.name)
# print " [ %s subsets ] " % len(self.subsets)
for aSubset in self.subsets[:-1]:
aSubset.writeNexusToOpenFile(flob)
flob.write(', ')
self.subsets[-1].writeNexusToOpenFile(flob)
flob.write(';\n')
def mask(self):
if not self.nexusSets.aligNChar:
self.nexusSets.aligNChar = self.theNexusSets.aligNChar
self.setSubsetMasks()
import array
m = array.array('c', self.nexusSets.aligNChar * '0')
for i in range(self.nexusSets.aligNChar):
for aSubset in self.subsets:
if aSubset.mask[i] == '1':
m[i] = '1'
return m.tostring()
|
gpl-2.0
|
mennis/dpkt
|
dpkt/ssl.py
|
16
|
19632
|
# $Id$
# Portion Copyright 2012 Google Inc. All rights reserved.
"""Secure Sockets Layer / Transport Layer Security."""
import dpkt
import ssl_ciphersuites
import struct
import binascii
import traceback
import datetime
#
# Note from April 2011: cde...@gmail.com added code that parses SSL3/TLS messages more in depth.
#
# Jul 2012: afleenor@google.com modified and extended SSL support further.
#
class SSL2(dpkt.Packet):
__hdr__ = (
('len', 'H', 0),
('msg', 's', ''),
('pad', 's', ''),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.len & 0x8000:
n = self.len = self.len & 0x7FFF
self.msg, self.data = self.data[:n], self.data[n:]
else:
n = self.len = self.len & 0x3FFF
padlen = ord(self.data[0])
self.msg = self.data[1:1+n]
self.pad = self.data[1+n:1+n+padlen]
self.data = self.data[1+n+padlen:]
# SSLv3/TLS versions
SSL3_V = 0x0300
TLS1_V = 0x0301
TLS11_V = 0x0302
TLS12_V = 0x0303
ssl3_versions_str = {
SSL3_V: 'SSL3',
TLS1_V: 'TLS 1.0',
TLS11_V: 'TLS 1.1',
TLS12_V: 'TLS 1.2'
}
SSL3_VERSION_BYTES = set(('\x03\x00', '\x03\x01', '\x03\x02', '\x03\x03'))
# Alert levels
SSL3_AD_WARNING = 1
SSL3_AD_FATAL = 2
alert_level_str = {
SSL3_AD_WARNING: 'SSL3_AD_WARNING',
SSL3_AD_FATAL: 'SSL3_AD_FATAL'
}
# SSL3 alert descriptions
SSL3_AD_CLOSE_NOTIFY = 0
SSL3_AD_UNEXPECTED_MESSAGE = 10 # fatal
SSL3_AD_BAD_RECORD_MAC = 20 # fatal
SSL3_AD_DECOMPRESSION_FAILURE = 30 # fatal
SSL3_AD_HANDSHAKE_FAILURE = 40 # fatal
SSL3_AD_NO_CERTIFICATE = 41
SSL3_AD_BAD_CERTIFICATE = 42
SSL3_AD_UNSUPPORTED_CERTIFICATE = 43
SSL3_AD_CERTIFICATE_REVOKED = 44
SSL3_AD_CERTIFICATE_EXPIRED = 45
SSL3_AD_CERTIFICATE_UNKNOWN = 46
SSL3_AD_ILLEGAL_PARAMETER = 47 # fatal
# TLS1 alert descriptions
TLS1_AD_DECRYPTION_FAILED = 21
TLS1_AD_RECORD_OVERFLOW = 22
TLS1_AD_UNKNOWN_CA = 48 # fatal
TLS1_AD_ACCESS_DENIED = 49 # fatal
TLS1_AD_DECODE_ERROR = 50 # fatal
TLS1_AD_DECRYPT_ERROR = 51
TLS1_AD_EXPORT_RESTRICTION = 60 # fatal
TLS1_AD_PROTOCOL_VERSION = 70 # fatal
TLS1_AD_INSUFFICIENT_SECURITY = 71 # fatal
TLS1_AD_INTERNAL_ERROR = 80 # fatal
TLS1_AD_USER_CANCELLED = 90
TLS1_AD_NO_RENEGOTIATION = 100
#/* codes 110-114 are from RFC3546 */
TLS1_AD_UNSUPPORTED_EXTENSION = 110
TLS1_AD_CERTIFICATE_UNOBTAINABLE = 111
TLS1_AD_UNRECOGNIZED_NAME = 112
TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE = 113
TLS1_AD_BAD_CERTIFICATE_HASH_VALUE = 114
TLS1_AD_UNKNOWN_PSK_IDENTITY = 115 # fatal
# Mapping alert types to strings
alert_description_str = {
SSL3_AD_CLOSE_NOTIFY: 'SSL3_AD_CLOSE_NOTIFY',
SSL3_AD_UNEXPECTED_MESSAGE: 'SSL3_AD_UNEXPECTED_MESSAGE',
SSL3_AD_BAD_RECORD_MAC: 'SSL3_AD_BAD_RECORD_MAC',
SSL3_AD_DECOMPRESSION_FAILURE: 'SSL3_AD_DECOMPRESSION_FAILURE',
SSL3_AD_HANDSHAKE_FAILURE: 'SSL3_AD_HANDSHAKE_FAILURE',
SSL3_AD_NO_CERTIFICATE: 'SSL3_AD_NO_CERTIFICATE',
SSL3_AD_BAD_CERTIFICATE: 'SSL3_AD_BAD_CERTIFICATE',
SSL3_AD_UNSUPPORTED_CERTIFICATE: 'SSL3_AD_UNSUPPORTED_CERTIFICATE',
SSL3_AD_CERTIFICATE_REVOKED: 'SSL3_AD_CERTIFICATE_REVOKED',
SSL3_AD_CERTIFICATE_EXPIRED: 'SSL3_AD_CERTIFICATE_EXPIRED',
SSL3_AD_CERTIFICATE_UNKNOWN: 'SSL3_AD_CERTIFICATE_UNKNOWN',
SSL3_AD_ILLEGAL_PARAMETER: 'SSL3_AD_ILLEGAL_PARAMETER',
TLS1_AD_DECRYPTION_FAILED: 'TLS1_AD_DECRYPTION_FAILED',
TLS1_AD_RECORD_OVERFLOW: 'TLS1_AD_RECORD_OVERFLOW',
TLS1_AD_UNKNOWN_CA: 'TLS1_AD_UNKNOWN_CA',
TLS1_AD_ACCESS_DENIED: 'TLS1_AD_ACCESS_DENIED',
TLS1_AD_DECODE_ERROR: 'TLS1_AD_DECODE_ERROR',
TLS1_AD_DECRYPT_ERROR: 'TLS1_AD_DECRYPT_ERROR',
TLS1_AD_EXPORT_RESTRICTION: 'TLS1_AD_EXPORT_RESTRICTION',
TLS1_AD_PROTOCOL_VERSION: 'TLS1_AD_PROTOCOL_VERSION',
TLS1_AD_INSUFFICIENT_SECURITY: 'TLS1_AD_INSUFFICIENT_SECURITY',
TLS1_AD_INTERNAL_ERROR: 'TLS1_AD_INTERNAL_ERROR',
TLS1_AD_USER_CANCELLED: 'TLS1_AD_USER_CANCELLED',
TLS1_AD_NO_RENEGOTIATION: 'TLS1_AD_NO_RENEGOTIATION',
TLS1_AD_UNSUPPORTED_EXTENSION: 'TLS1_AD_UNSUPPORTED_EXTENSION',
TLS1_AD_CERTIFICATE_UNOBTAINABLE: 'TLS1_AD_CERTIFICATE_UNOBTAINABLE',
TLS1_AD_UNRECOGNIZED_NAME: 'TLS1_AD_UNRECOGNIZED_NAME',
TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE: 'TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE',
TLS1_AD_BAD_CERTIFICATE_HASH_VALUE: 'TLS1_AD_BAD_CERTIFICATE_HASH_VALUE',
TLS1_AD_UNKNOWN_PSK_IDENTITY: 'TLS1_AD_UNKNOWN_PSK_IDENTITY'
}
# struct format strings for parsing buffer lengths
# don't forget, you have to pad a 3-byte value with \x00
_SIZE_FORMATS = ['!B', '!H', '!I', '!I']
def parse_variable_array(buf, lenbytes):
"""
Parse an array described using the 'Type name<x..y>' syntax from the spec
Read a length at the start of buf, and returns that many bytes
after, in a tuple with the TOTAL bytes consumed (including the size). This
does not check that the array is the right length for any given datatype.
"""
# first have to figure out how to parse length
assert lenbytes <= 4 # pretty sure 4 is impossible, too
size_format = _SIZE_FORMATS[lenbytes - 1]
padding = '\x00' if lenbytes == 3 else ''
# read off the length
size = struct.unpack(size_format, padding + buf[:lenbytes])[0]
# read the actual data
data = buf[lenbytes:lenbytes + size]
# if len(data) != size: insufficient data
return data, size + lenbytes
class SSL3Exception(Exception):
pass
class TLSRecord(dpkt.Packet):
"""
SSLv3 or TLSv1+ packet.
In addition to the fields specified in the header, there are
compressed and decrypted fields, indicating whether, in the language
of the spec, this is a TLSPlaintext, TLSCompressed, or
TLSCiphertext. The application will have to figure out when it's
appropriate to change these values.
"""
__hdr__ = (
('type', 'B', 0),
('version', 'H', 0),
('length', 'H', 0),
)
def __init__(self, *args, **kwargs):
# assume plaintext unless specified otherwise in arguments
self.compressed = kwargs.pop('compressed', False)
self.encrypted = kwargs.pop('encrypted', False)
# parent constructor
dpkt.Packet.__init__(self, *args, **kwargs)
# make sure length and data are consistent
self.length = len(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
header_length = self.__hdr_len__
self.data = buf[header_length:header_length+self.length]
# make sure buffer was long enough
if len(self.data) != self.length:
raise dpkt.NeedData('TLSRecord data was too short.')
# assume compressed and encrypted when it's been parsed from
# raw data
self.compressed = True
self.encrypted = True
class TLSChangeCipherSpec(dpkt.Packet):
"""
ChangeCipherSpec message is just a single byte with value 1
"""
__hdr__ = (('type', 'B', 1),)
class TLSAppData(str):
"""
As far as TLSRecord is concerned, AppData is just an opaque blob.
"""
pass
class TLSAlert(dpkt.Packet):
__hdr__ = (
('level', 'B', 1),
('description', 'B', 0),
)
class TLSHelloRequest(dpkt.Packet):
__hdr__ = tuple()
class TLSClientHello(dpkt.Packet):
__hdr__ = (
('version', 'H', 0x0301),
('random', '32s', '\x00'*32),
) # the rest is variable-length and has to be done manually
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# now session, cipher suites, extensions are in self.data
self.session_id, pointer = parse_variable_array(self.data, 1)
# print 'pointer',pointer
# handle ciphersuites
ciphersuites, parsed = parse_variable_array(self.data[pointer:], 2)
pointer += parsed
self.num_ciphersuites = len(ciphersuites) / 2
# check len(ciphersuites) % 2 == 0 ?
# compression methods
compression_methods, parsed = parse_variable_array(
self.data[pointer:], 1)
pointer += parsed
self.num_compression_methods = parsed - 1
self.compression_methods = map(ord, compression_methods)
# extensions
class TLSServerHello(dpkt.Packet):
__hdr__ = (
('version', 'H', '0x0301'),
('random', '32s', '\x00'*32),
) # session is variable, forcing rest to be manual
def unpack(self, buf):
try:
dpkt.Packet.unpack(self, buf)
self.session_id, pointer = parse_variable_array(self.data, 1)
# single cipher suite
self.cipher_suite = struct.unpack('!H', self.data[pointer:pointer+2])[0]
pointer += 2
# single compression method
self.compression = struct.unpack('!B', self.data[pointer:pointer+1])[0]
pointer += 1
# ignore extensions for now
except struct.error:
# probably data too short
raise dpkt.NeedData
class TLSUnknownHandshake(dpkt.Packet):
__hdr__ = tuple()
TLSCertificate = TLSUnknownHandshake
TLSServerKeyExchange = TLSUnknownHandshake
TLSCertificateRequest = TLSUnknownHandshake
TLSServerHelloDone = TLSUnknownHandshake
TLSCertificateVerify = TLSUnknownHandshake
TLSClientKeyExchange = TLSUnknownHandshake
TLSFinished = TLSUnknownHandshake
# mapping of handshake type ids to their names
# and the classes that implement them
HANDSHAKE_TYPES = {
0: ('HelloRequest', TLSHelloRequest),
1: ('ClientHello', TLSClientHello),
2: ('ServerHello', TLSServerHello),
11: ('Certificate', TLSCertificate),
12: ('ServerKeyExchange', TLSServerKeyExchange),
13: ('CertificateRequest', TLSCertificateRequest),
14: ('ServerHelloDone', TLSServerHelloDone),
15: ('CertificateVerify', TLSCertificateVerify),
16: ('ClientKeyExchange', TLSClientKeyExchange),
20: ('Finished', TLSFinished),
}
class TLSHandshake(dpkt.Packet):
'''
A TLS Handshake message
This goes for all messages encapsulated in the Record layer, but especially
important for handshakes and app data: A message may be spread across a
number of TLSRecords, in addition to the possibility of there being more
than one in a given Record. You have to put together the contents of
TLSRecord's yourself.
'''
# struct.unpack can't handle the 3-byte int, so we parse it as bytes
# (and store it as bytes so dpkt doesn't get confused), and turn it into
# an int in a user-facing property
__hdr__ = (
('type', 'B', 0),
('length_bytes', '3s', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Wait, might there be more than one message of self.type?
embedded_type = HANDSHAKE_TYPES.get(self.type, None)
if embedded_type is None:
raise SSL3Exception('Unknown or invalid handshake type %d' %
self.type)
# only take the right number of bytes
self.data = self.data[:self.length]
if len(self.data) != self.length:
raise dpkt.NeedData
# get class out of embedded_type tuple
self.data = embedded_type[1](self.data)
@property
def length(self):
return struct.unpack('!I', '\x00' + self.length_bytes)[0]
RECORD_TYPES = {
20: TLSChangeCipherSpec,
21: TLSAlert,
22: TLSHandshake,
23: TLSAppData,
}
class SSLFactory(object):
def __new__(cls, buf):
v = buf[1:3]
if v in [ '\x03\x00', '\x03\x01', '\x03\x02' ]:
return SSL3(buf)
# SSL2 has no characteristic header or magic bytes, so we just assume
# that the msg is an SSL2 msg if it is not detected as SSL3+
return SSL2(buf)
def TLSMultiFactory(buf):
'''
Attempt to parse one or more TLSRecord's out of buf
Args:
buf: string containing SSL/TLS messages. May have an incomplete record
on the end
Returns:
[TLSRecord]
int, total bytes consumed, != len(buf) if an incomplete record was left at
the end.
Raises SSL3Exception.
'''
i, n = 0, len(buf)
msgs = []
while i < n:
v = buf[i+1:i+3]
if v in SSL3_VERSION_BYTES:
try:
msg = TLSRecord(buf[i:])
msgs.append(msg)
except dpkt.NeedData:
break
else:
raise SSL3Exception('Bad TLS version in buf: %r' % buf[i:i+5])
i += len(msg)
return msgs, i
import unittest
_hexdecode = binascii.a2b_hex
class TLSRecordTest(unittest.TestCase):
"""
Test basic TLSRecord functionality
For this test, the contents of the record doesn't matter, since we're not
parsing the next layer.
"""
def setUp(self):
# add some extra data, to make sure length is parsed correctly
self.p = TLSRecord('\x17\x03\x01\x00\x08abcdefghzzzzzzzzzzz')
def testContentType(self):
self.assertEqual(self.p.type, 23)
def testVersion(self):
self.assertEqual(self.p.version, 0x0301)
def testLength(self):
self.assertEqual(self.p.length, 8)
def testData(self):
self.assertEqual(self.p.data, 'abcdefgh')
def testInitialFlags(self):
self.assertTrue(self.p.compressed)
self.assertTrue(self.p.encrypted)
def testRepack(self):
p2 = TLSRecord(type=23, version=0x0301, data='abcdefgh')
self.assertEqual(p2.type, 23)
self.assertEqual(p2.version, 0x0301)
self.assertEqual(p2.length, 8)
self.assertEqual(p2.data, 'abcdefgh')
self.assertEqual(p2.pack(), self.p.pack())
def testTotalLength(self):
# that len(p) includes header
self.assertEqual(len(self.p), 13)
def testRaisesNeedDataWhenBufIsShort(self):
self.assertRaises(
dpkt.NeedData,
TLSRecord,
'\x16\x03\x01\x00\x10abc')
class TLSChangeCipherSpecTest(unittest.TestCase):
"It's just a byte. This will be quick, I promise"
def setUp(self):
self.p = TLSChangeCipherSpec('\x01')
def testParses(self):
self.assertEqual(self.p.type, 1)
def testTotalLength(self):
self.assertEqual(len(self.p), 1)
class TLSAppDataTest(unittest.TestCase):
"AppData is basically just a string"
def testValue(self):
d = TLSAppData('abcdefgh')
self.assertEqual(d, 'abcdefgh')
class TLSHandshakeTest(unittest.TestCase):
def setUp(self):
self.h = TLSHandshake('\x00\x00\x00\x01\xff')
def testCreatedInsideMessage(self):
self.assertTrue(isinstance(self.h.data, TLSHelloRequest))
def testLength(self):
self.assertEqual(self.h.length, 0x01)
def testRaisesNeedData(self):
self.assertRaises(dpkt.NeedData, TLSHandshake, '\x00\x00\x01\x01')
class ClientHelloTest(unittest.TestCase):
'This data is extracted from and verified by Wireshark'
def setUp(self):
self.data = _hexdecode(
"01000199" # handshake header
"0301" # version
"5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d" # rand
"2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1" # session id
# cipher suites
"005400ffc00ac0140088008700390038c00fc00500840035c007c009c011c0130045004400330032c00cc00ec002c0040096004100050004002fc008c01200160013c00dc003feff000ac006c010c00bc00100020001"
"0100" # compresssion methods
# extensions
"00fc0000000e000c0000096c6f63616c686f7374000a00080006001700180019000b00020100002300d0a50b2e9f618a9ea9bf493ef49b421835cd2f6b05bbe1179d8edf70d58c33d656e8696d36d7e7e0b9d3ecc0e4de339552fa06c64c0fcb550a334bc43944e2739ca342d15a9ebbe981ac87a0d38160507d47af09bdc16c5f0ee4cdceea551539382333226048a026d3a90a0535f4a64236467db8fee22b041af986ad0f253bc369137cd8d8cd061925461d7f4d7895ca9a4181ab554dad50360ac31860e971483877c9335ac1300c5e78f3e56f3b8e0fc16358fcaceefd5c8d8aaae7b35be116f8832856ca61144fcdd95e071b94d0cf7233740000"
"FFFFFFFFFFFFFFFF") # random garbage
self.p = TLSHandshake(self.data)
def testClientHelloConstructed(self):
'Make sure the correct class was constructed'
#print self.p
self.assertTrue(isinstance(self.p.data, TLSClientHello))
# def testClientDateCorrect(self):
# self.assertEqual(self.p.random_unixtime, 1342710284)
def testClientRandomCorrect(self):
self.assertEqual(self.p.data.random,
_hexdecode('5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d'))
def testCipherSuiteLength(self):
# we won't bother testing the identity of each cipher suite in the list.
self.assertEqual(self.p.data.num_ciphersuites, 42)
#self.assertEqual(len(self.p.ciphersuites), 42)
def testSessionId(self):
self.assertEqual(self.p.data.session_id,
_hexdecode('09bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1'))
def testCompressionMethods(self):
self.assertEqual(self.p.data.num_compression_methods, 1)
def testTotalLength(self):
self.assertEqual(len(self.p), 413)
class ServerHelloTest(unittest.TestCase):
'Again, from Wireshark'
def setUp(self):
self.data = _hexdecode('0200004d03015008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed10002000005ff01000100')
self.p = TLSHandshake(self.data)
def testConstructed(self):
self.assertTrue(isinstance(self.p.data, TLSServerHello))
# def testDateCorrect(self):
# self.assertEqual(self.p.random_unixtime, 1342710284)
def testRandomCorrect(self):
self.assertEqual(self.p.data.random,
_hexdecode('5008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd'))
def testCipherSuite(self):
self.assertEqual(
ssl_ciphersuites.BY_CODE[self.p.data.cipher_suite].name,
'TLS_RSA_WITH_NULL_SHA')
def testTotalLength(self):
self.assertEqual(len(self.p), 81)
class TLSMultiFactoryTest(unittest.TestCase):
"Made up test data"
def setUp(self):
self.data = _hexdecode('1703010010' # header 1
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' # data 1
'1703010010' # header 2
'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB' # data 2
'1703010010' # header 3
'CCCCCCCC') # data 3 (incomplete)
self.msgs, self.bytes_parsed = TLSMultiFactory(self.data)
def testNumMessages(self):
# only complete messages should be parsed, incomplete ones left
# in buffer
self.assertEqual(len(self.msgs), 2)
def testBytesParsed(self):
self.assertEqual(self.bytes_parsed, (5 + 16) * 2)
def testFirstMsgData(self):
self.assertEqual(self.msgs[0].data, _hexdecode('AA' * 16))
def testSecondMsgData(self):
self.assertEqual(self.msgs[1].data, _hexdecode('BB' * 16))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
NikolaYolov/invenio_backup
|
modules/websubmit/lib/functions/CaseEDS.py
|
35
|
4620
|
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is the CaseEDS module. Contains the CaseEDS WebSubmit function.
"""
__revision__ = "$Id$"
import os
from invenio.websubmit_config import \
InvenioWebSubmitFunctionStop, \
InvenioWebSubmitFunctionError
def CaseEDS(parameters, curdir, form, user_info=None):
"""
This function compares the content of a file to different values and
directly goes to a different step in the action according to the value.
This function may be used if the treatment to be done after a
submission depends on a field entered by the user. Typically
this is used in an approval interface. If the referee approves
then we do this. If he rejects, then we do other thing. More
specifically, the function gets the value from the file named
[casevariable] and compares it with the values stored in
[casevalues]. If a value matches, the function directly goes to
the corresponding step stored in [casesteps]. If no value is
matched, it goes to step [casedefault].
@param parameters: (dictionary) of parameters (relating to the given
doctype/action) that are to be passed to the function:
+ casevariable: This parameters contains the name of the
file in which the function will get the
chosen value.
Eg:"decision"
+ casevalues: Contains the list of recognized values to
match with the chosen value. Should be a
comma separated list of words.
Eg:"approve,reject"
+ casesteps: Contains the list of steps corresponding to
the values matched in [casevalue]. It should
be a comma separated list of numbers.
Eg:"2,3"
In this example, if the value stored in the
file named"decision" is "approved", then the
function launches step 2 of this action. If it
is "reject", then step 3 is launched.
+ casedefault: Contains the step number to go by default if
no match is found.
Eg:"4"
In this example, if the value stored in the
file named "decision" is not "approved" nor
"reject", then step 4 is launched.
@return: (string) - empty string
"""
## Get the values of the parameters passed to this function via the
## parameters array:
casevariable = parameters['casevariable']
casevalue = parameters['casevalues']
casestep = parameters['casesteps']
casedefault = parameters['casedefault']
casevalues = casevalue.split(",")
casesteps = casestep.split(",")
cases = {}
for a, b in map(None, casevalues, casesteps):
cases[a] = b
nextstep = ""
if not os.path.exists("%s/%s" % (curdir, casevariable)):
nextstep = casedefault
else:
fp = open("%s/%s" % (curdir, casevariable), "r")
value = fp.read()
fp.close()
if cases.has_key(value):
nextstep = cases[value]
else:
nextstep = casedefault
if nextstep != "":
t = "<b>Please wait...</b>"
t = """
<SCRIPT LANGUAGE="JavaScript1.1">
document.forms[0].action="/submit";
document.forms[0].step.value=%s;
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
</SCRIPT>""" % nextstep
raise InvenioWebSubmitFunctionStop(t)
else:
raise InvenioWebSubmitFunctionError("Case function: Could not " \
"determine next action step")
return ""
|
gpl-2.0
|
simbha/mAngE-Gin
|
lib/django/contrib/formtools/tests/tests.py
|
32
|
7417
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import os
import unittest
import warnings
from django import http
from django.contrib.formtools import preview, utils
from django.test import TestCase, override_settings
from django.utils._os import upath
from django.contrib.formtools.tests.forms import (
HashTestBlankForm, HashTestForm, TestForm,
)
success_string = "Done was called!"
success_string_encoded = success_string.encode()
class TestFormPreview(preview.FormPreview):
def get_context(self, request, form):
context = super(TestFormPreview, self).get_context(request, form)
context.update({'custom_context': True})
return context
def get_initial(self, request):
return {'field1': 'Works!'}
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
@override_settings(
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
ROOT_URLCONF='django.contrib.formtools.tests.urls',
)
class PreviewTests(TestCase):
def setUp(self):
super(PreviewTests, self).setUp()
# Create a FormPreview instance to share between tests
self.preview = preview.FormPreview(TestForm)
input_template = '<input type="hidden" name="%s" value="%s" />'
self.input = input_template % (self.preview.unused_name('stage'), "%d")
self.test_data = {'field1': 'foo', 'field1_': 'asdf'}
def test_unused_name(self):
"""
Verifies name mangling to get uniue field name.
"""
self.assertEqual(self.preview.unused_name('field1'), 'field1__')
def test_form_get(self):
"""
Test contrib.formtools.preview form retrieval.
Use the client library to see if we can successfully retrieve
the form (mostly testing the setup ROOT_URLCONF
process). Verify that an additional hidden input field
is created to manage the stage.
"""
response = self.client.get('/preview/')
stage = self.input % 1
self.assertContains(response, stage, 1)
self.assertEqual(response.context['custom_context'], True)
self.assertEqual(response.context['form'].initial, {'field1': 'Works!'})
def test_form_preview(self):
"""
Test contrib.formtools.preview form preview rendering.
Use the client library to POST to the form to see if a preview
is returned. If we do get a form back check that the hidden
value is correctly managing the state of the form.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 1, 'date1': datetime.date(2006, 10, 25)})
response = self.client.post('/preview/', self.test_data)
# Check to confirm stage is set to 2 in output form.
stage = self.input % 2
self.assertContains(response, stage, 1)
def test_form_submit(self):
"""
Test contrib.formtools.preview form submittal.
Use the client library to POST to the form with stage set to 3
to see if our forms done() method is called. Check first
without the security hash, verify failure, retry with security
hash and verify success.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 2, 'date1': datetime.date(2006, 10, 25)})
response = self.client.post('/preview/', self.test_data)
self.assertNotEqual(response.content, success_string_encoded)
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string_encoded)
def test_bool_submit(self):
"""
Test contrib.formtools.preview form submittal when form contains:
BooleanField(required=False)
Ticket: #6209 - When an unchecked BooleanField is previewed, the preview
form's hash would be computed with no value for ``bool1``. However, when
the preview form is rendered, the unchecked hidden BooleanField would be
rendered with the string value 'False'. So when the preview form is
resubmitted, the hash would be computed with the value 'False' for
``bool1``. We need to make sure the hashes are the same in both cases.
"""
self.test_data.update({'stage': 2})
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash, 'bool1': 'False'})
with warnings.catch_warnings(record=True):
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string_encoded)
def test_form_submit_good_hash(self):
"""
Test contrib.formtools.preview form submittal, using a correct
hash
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 2})
response = self.client.post('/preview/', self.test_data)
self.assertNotEqual(response.content, success_string_encoded)
hash = utils.form_hmac(TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string_encoded)
def test_form_submit_bad_hash(self):
"""
Test contrib.formtools.preview form submittal does not proceed
if the hash is incorrect.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 2})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.content, success_string_encoded)
hash = utils.form_hmac(TestForm(self.test_data)) + "bad"
self.test_data.update({'hash': hash})
response = self.client.post('/previewpreview/', self.test_data)
self.assertNotEqual(response.content, success_string_encoded)
class FormHmacTests(unittest.TestCase):
def test_textfield_hash(self):
"""
Regression test for #10034: the hash generation function should ignore
leading/trailing whitespace so as to be friendly to broken browsers that
submit it (usually in textareas).
"""
f1 = HashTestForm({'name': 'joe', 'bio': 'Speaking español.'})
f2 = HashTestForm({'name': ' joe', 'bio': 'Speaking español. '})
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
def test_empty_permitted(self):
"""
Regression test for #10643: the security hash should allow forms with
empty_permitted = True, or forms where data has not changed.
"""
f1 = HashTestBlankForm({})
f2 = HashTestForm({}, empty_permitted=True)
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
|
mit
|
nanditav/15712-TensorFlow
|
tensorflow/python/saved_model/signature_constants.py
|
10
|
1977
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Signature constants for SavedModel save and restore operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
################################################################################
# Classification API constants.
# Classification inputs.
CLASSIFY_INPUTS = "inputs"
# Classification method name used in a SignatureDef.
CLASSIFY_METHOD_NAME = "tensorflow/serving/classify"
# Classification classes output.
CLASSIFY_OUTPUT_CLASSES = "classes"
# Classification scores output.
CLASSIFY_OUTPUT_SCORES = "scores"
################################################################################
# Prediction API constants.
# Predict inputs.
PREDICT_INPUTS = "inputs"
# Prediction method name used in a SignatureDef.
PREDICT_METHOD_NAME = "tensorflow/serving/predict"
# Predict outputs.
PREDICT_OUTPUTS = "outputs"
################################################################################
# Regression API constants.
# Regression inputs.
REGRESS_INPUTS = "inputs"
# Regression method name used in a SignatureDef.
REGRESS_METHOD_NAME = "tensorflow/serving/regress"
# Regression outputs.
REGRESS_OUTPUTS = "outputs"
################################################################################
|
apache-2.0
|
dkubiak789/odoo
|
addons/sale_stock/sale_stock.py
|
21
|
26561
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import pytz
from openerp import SUPERUSER_ID
class sale_order(osv.osv):
_inherit = "sale.order"
def _get_default_warehouse(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
warehouse_ids = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', company_id)], context=context)
if not warehouse_ids:
return False
return warehouse_ids[0]
def _get_shipped(self, cr, uid, ids, name, args, context=None):
res = {}
for sale in self.browse(cr, uid, ids, context=context):
group = sale.procurement_group_id
if group:
res[sale.id] = all([proc.state in ['cancel', 'done'] for proc in group.procurement_ids])
else:
res[sale.id] = False
return res
def _get_orders(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.procurement_id and move.procurement_id.sale_line_id:
res.add(move.procurement_id.sale_line_id.order_id.id)
return list(res)
def _get_orders_procurements(self, cr, uid, ids, context=None):
res = set()
for proc in self.pool.get('procurement.order').browse(cr, uid, ids, context=context):
if proc.state =='done' and proc.sale_line_id:
res.add(proc.sale_line_id.order_id.id)
return list(res)
def _get_picking_ids(self, cr, uid, ids, name, args, context=None):
res = {}
for sale in self.browse(cr, uid, ids, context=context):
if not sale.procurement_group_id:
res[sale.id] = []
continue
res[sale.id] = self.pool.get('stock.picking').search(cr, uid, [('group_id', '=', sale.procurement_group_id.id)], context=context)
return res
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
vals = super(sale_order, self)._prepare_order_line_procurement(cr, uid, order, line, group_id=group_id, context=context)
location_id = order.partner_shipping_id.property_stock_customer.id
vals['location_id'] = location_id
routes = line.route_id and [(4, line.route_id.id)] or []
vals['route_ids'] = routes
vals['warehouse_id'] = order.warehouse_id and order.warehouse_id.id or False
vals['partner_dest_id'] = order.partner_shipping_id.id
return vals
_columns = {
'incoterm': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'picking_policy': fields.selection([('direct', 'Deliver each product when available'), ('one', 'Deliver all products at once')],
'Shipping Policy', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""Pick 'Deliver each product when available' if you allow partial delivery."""),
'order_policy': fields.selection([
('manual', 'On Demand'),
('picking', 'On Delivery Order'),
('prepaid', 'Before Delivery'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""On demand: A draft invoice can be created from the sales order when needed. \nOn delivery order: A draft invoice can be created from the delivery order when the products have been delivered. \nBefore delivery: A draft invoice is created from the sales order and must be paid before the products can be delivered."""),
'shipped': fields.function(_get_shipped, string='Delivered', type='boolean', store={
'procurement.order': (_get_orders_procurements, ['state'], 10)
}),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking associated to this sale'),
}
_defaults = {
'warehouse_id': _get_default_warehouse,
'picking_policy': 'direct',
'order_policy': 'manual',
}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
val = {}
if warehouse_id:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
if warehouse.company_id:
val['company_id'] = warehouse.company_id.id
return {'value': val}
def action_view_delivery(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing delivery orders
of given sales order ids. It can either be a in a list or in a form
view, if there is only one delivery order to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree_all')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of delivery orders to display
pick_ids = []
for so in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in so.picking_ids]
#choose the view_mode accordingly
if len(pick_ids) > 1:
result['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = pick_ids and pick_ids[0] or False
return result
def action_invoice_create(self, cr, uid, ids, grouped=False, states=['confirmed', 'done', 'exception'], date_invoice = False, context=None):
move_obj = self.pool.get("stock.move")
res = super(sale_order,self).action_invoice_create(cr, uid, ids, grouped=grouped, states=states, date_invoice = date_invoice, context=context)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy == 'picking':
for picking in order.picking_ids:
move_obj.write(cr, uid, [x.id for x in picking.move_lines], {'invoice_state': 'invoiced'}, context=context)
return res
def action_wait(self, cr, uid, ids, context=None):
res = super(sale_order, self).action_wait(cr, uid, ids, context=context)
for o in self.browse(cr, uid, ids):
noprod = self.test_no_product(cr, uid, o, context)
if noprod and o.order_policy=='picking':
self.write(cr, uid, [o.id], {'order_policy': 'manual'}, context=context)
return res
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
date_planned = super(sale_order, self)._get_date_planned(cr, uid, order, line, start_date, context=context)
date_planned = (date_planned - timedelta(days=order.company_id.security_lead)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return date_planned
def _prepare_procurement_group(self, cr, uid, order, context=None):
res = super(sale_order, self)._prepare_procurement_group(cr, uid, order, context=None)
res.update({'move_type': order.picking_policy})
return res
def action_ship_end(self, cr, uid, ids, context=None):
super(sale_order, self).action_ship_end(cr, uid, ids, context=context)
for order in self.browse(cr, uid, ids, context=context):
val = {'shipped': True}
if order.state == 'shipping_except':
val['state'] = 'progress'
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
res = self.write(cr, uid, [order.id], val)
return True
def has_stockable_products(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.state == 'cancel':
continue
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
class product_product(osv.osv):
_inherit = 'product.product'
def need_procurement(self, cr, uid, ids, context=None):
#when sale/product is installed alone, there is no need to create procurements, but with sale_stock
#we must create a procurement for each product that is not a service.
for product in self.browse(cr, uid, ids, context=context):
if product.type != 'service':
return True
return super(product_product, self).need_procurement(cr, uid, ids, context=context)
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def _number_packages(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
try:
res[line.id] = int((line.product_uom_qty+line.product_packaging.qty-0.0001) / line.product_packaging.qty)
except:
res[line.id] = 1
return res
_columns = {
'product_packaging': fields.many2one('product.packaging', 'Packaging'),
'number_packages': fields.function(_number_packages, type='integer', string='Number Packages'),
'route_id': fields.many2one('stock.location.route', 'Route', domain=[('sale_selectable', '=', True)]),
'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'),
}
_defaults = {
'product_packaging': False,
}
def product_packaging_change(self, cr, uid, ids, pricelist, product, qty=0, uom=False,
partner_id=False, packaging=False, flag=False, context=None):
if not product:
return {'value': {'product_packaging': False}}
product_obj = self.pool.get('product.product')
product_uom_obj = self.pool.get('product.uom')
pack_obj = self.pool.get('product.packaging')
warning = {}
result = {}
warning_msgs = ''
if flag:
res = self.product_id_change(cr, uid, ids, pricelist=pricelist,
product=product, qty=qty, uom=uom, partner_id=partner_id,
packaging=packaging, flag=False, context=context)
warning_msgs = res.get('warning') and res['warning'].get('message', '') or ''
products = product_obj.browse(cr, uid, product, context=context)
if not products.packaging_ids:
packaging = result['product_packaging'] = False
if packaging:
default_uom = products.uom_id and products.uom_id.id
pack = pack_obj.browse(cr, uid, packaging, context=context)
q = product_uom_obj._compute_qty(cr, uid, uom, pack.qty, default_uom)
# qty = qty - qty % q + q
if qty and (q and not (qty % q) == 0):
ean = pack.ean or _('(n/a)')
qty_pack = pack.qty
type_ul = pack.ul
if not warning_msgs:
warn_msg = _("You selected a quantity of %d Units.\n"
"But it's not compatible with the selected packaging.\n"
"Here is a proposition of quantities according to the packaging:\n"
"EAN: %s Quantity: %s Type of ul: %s") % \
(qty, ean, qty_pack, type_ul.name)
warning_msgs += _("Picking Information ! : ") + warn_msg + "\n\n"
warning = {
'title': _('Configuration Error!'),
'message': warning_msgs
}
result['product_uom_qty'] = qty
return {'value': result, 'warning': warning}
def _check_routing(self, cr, uid, ids, product, warehouse_id, context=None):
""" Verify the route of the product based on the warehouse
return True if the product availibility in stock does not need to be verified
"""
is_available = False
if warehouse_id:
warehouse = self.pool['stock.warehouse'].browse(cr, uid, warehouse_id, context=context)
for product_route in product.route_ids:
if warehouse.mto_pull_id and warehouse.mto_pull_id.route_id and warehouse.mto_pull_id.route_id.id == product_route.id:
is_available = True
break
else:
try:
mto_route_id = self.pool['stock.warehouse']._get_mto_route(cr, uid, context=context)
except osv.except_osv:
# if route MTO not found in ir_model_data, we treat the product as in MTS
mto_route_id = False
if mto_route_id:
for product_route in product.route_ids:
if product_route.id == mto_route_id:
is_available = True
break
return is_available
def product_id_change_with_wh(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, warehouse_id=False, context=None):
context = context or {}
product_uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
warning = {}
#UoM False due to hack which makes sure uom changes price, ... in product_id_change
res = self.product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=False, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if not product:
res['value'].update({'product_packaging': False})
return res
# set product uom in context to get virtual stock in current uom
if 'product_uom' in res.get('value', {}):
# use the uom changed by super call
context = dict(context, uom=res['value']['product_uom'])
elif uom:
# fallback on selected
context = dict(context, uom=uom)
#update of result obtained in super function
product_obj = product_obj.browse(cr, uid, product, context=context)
res['value'].update({'product_tmpl_id': product_obj.product_tmpl_id.id, 'delay': (product_obj.sale_delay or 0.0)})
# Calling product_packaging_change function after updating UoM
res_packing = self.product_packaging_change(cr, uid, ids, pricelist, product, qty, uom, partner_id, packaging, context=context)
res['value'].update(res_packing.get('value', {}))
warning_msgs = res_packing.get('warning') and res_packing['warning']['message'] or ''
if product_obj.type == 'product':
#determine if the product needs further check for stock availibility
is_available = self._check_routing(cr, uid, ids, product_obj, warehouse_id, context=context)
#check if product is available, and if not: raise a warning, but do this only for products that aren't processed in MTO
if not is_available:
uom_record = False
if uom:
uom_record = product_uom_obj.browse(cr, uid, uom, context=context)
if product_obj.uom_id.category_id.id != uom_record.category_id.id:
uom_record = False
if not uom_record:
uom_record = product_obj.uom_id
compare_qty = float_compare(product_obj.virtual_available, qty, precision_rounding=uom_record.rounding)
if compare_qty == -1:
warn_msg = _('You plan to sell %.2f %s but you only have %.2f %s available !\nThe real stock is %.2f %s. (without reservations)') % \
(qty, uom_record.name,
max(0,product_obj.virtual_available), uom_record.name,
max(0,product_obj.qty_available), uom_record.name)
warning_msgs += _("Not enough stock ! : ") + warn_msg + "\n\n"
#update of warning messages
if warning_msgs:
warning = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
res.update({'warning': warning})
return res
def button_cancel(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for procurement in lines.mapped('procurement_ids'):
for move in procurement.move_ids:
if move.state == 'done' and not move.scrapped:
raise osv.except_osv(_('Invalid Action!'), _('You cannot cancel a sale order line which is linked to a stock move already done.'))
return super(sale_order_line, self).button_cancel(cr, uid, ids, context=context)
class stock_move(osv.osv):
_inherit = 'stock.move'
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
invoice_line_id = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
if context.get('inv_type') in ('out_invoice', 'out_refund') and move.procurement_id and move.procurement_id.sale_line_id:
sale_line = move.procurement_id.sale_line_id
self.pool.get('sale.order.line').write(cr, uid, [sale_line.id], {
'invoice_lines': [(4, invoice_line_id)]
}, context=context)
self.pool.get('sale.order').write(cr, uid, [sale_line.order_id.id], {
'invoice_ids': [(4, invoice_line_vals['invoice_id'])],
})
sale_line_obj = self.pool.get('sale.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
sale_line_ids = sale_line_obj.search(cr, uid, [('order_id', '=', move.procurement_id.sale_line_id.order_id.id), ('invoiced', '=', False), '|', ('product_id', '=', False), ('product_id.type', '=', 'service')], context=context)
if sale_line_ids:
created_lines = sale_line_obj.invoice_line_create(cr, uid, sale_line_ids, context=context)
invoice_line_obj.write(cr, uid, created_lines, {'invoice_id': invoice_line_vals['invoice_id']}, context=context)
return invoice_line_id
def _get_master_data(self, cr, uid, move, company, context=None):
if context.get('inv_type') in ('out_invoice', 'out_refund') and move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.order_policy == 'picking':
sale_order = move.procurement_id.sale_line_id.order_id
return sale_order.partner_invoice_id, sale_order.user_id.id, sale_order.pricelist_id.currency_id.id
elif move.picking_id.sale_id and context.get('inv_type') in ('out_invoice', 'out_refund'):
# In case of extra move, it is better to use the same data as the original moves
sale_order = move.picking_id.sale_id
return sale_order.partner_invoice_id, sale_order.user_id.id, sale_order.pricelist_id.currency_id.id
return super(stock_move, self)._get_master_data(cr, uid, move, company, context=context)
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
if inv_type in ('out_invoice', 'out_refund') and move.procurement_id and move.procurement_id.sale_line_id:
sale_line = move.procurement_id.sale_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in sale_line.tax_id])]
res['account_analytic_id'] = sale_line.order_id.project_id and sale_line.order_id.project_id.id or False
res['discount'] = sale_line.discount
if move.product_id.id != sale_line.product_id.id:
res['price_unit'] = self.pool['product.pricelist'].price_get(
cr, uid, [sale_line.order_id.pricelist_id.id],
move.product_id.id, move.product_uom_qty or 1.0,
sale_line.order_id.partner_id, context=context)[sale_line.order_id.pricelist_id.id]
else:
res['price_unit'] = sale_line.price_unit
uos_coeff = move.product_uom_qty and move.product_uos_qty / move.product_uom_qty or 1.0
res['price_unit'] = res['price_unit'] / uos_coeff
return res
def _get_moves_taxes(self, cr, uid, moves, inv_type, context=None):
is_extra_move, extra_move_tax = super(stock_move, self)._get_moves_taxes(cr, uid, moves, inv_type, context=context)
if inv_type == 'out_invoice':
for move in moves:
if move.procurement_id and move.procurement_id.sale_line_id:
is_extra_move[move.id] = False
extra_move_tax[move.picking_id, move.product_id] = [(6, 0, [x.id for x in move.procurement_id.sale_line_id.tax_id])]
elif move.picking_id.sale_id and move.product_id.product_tmpl_id.taxes_id:
fp = move.picking_id.sale_id.fiscal_position
res = self.pool.get("account.invoice.line").product_id_change(cr, uid, [], move.product_id.id, None, partner_id=move.picking_id.partner_id.id, fposition_id=(fp and fp.id), context=context)
extra_move_tax[0, move.product_id] = [(6, 0, res['value']['invoice_line_tax_id'])]
return (is_extra_move, extra_move_tax)
def _get_taxes(self, cr, uid, move, context=None):
if move.procurement_id.sale_line_id.tax_id:
return [tax.id for tax in move.procurement_id.sale_line_id.tax_id]
return super(stock_move, self)._get_taxes(cr, uid, move, context=context)
class stock_location_route(osv.osv):
_inherit = "stock.location.route"
_columns = {
'sale_selectable': fields.boolean("Selectable on Sales Order Line")
}
class stock_picking(osv.osv):
_inherit = "stock.picking"
def _get_partner_to_invoice(self, cr, uid, picking, context=None):
""" Inherit the original function of the 'stock' module
We select the partner of the sales order as the partner of the customer invoice
"""
saleorder_ids = self.pool['sale.order'].search(cr, uid, [('procurement_group_id' ,'=', picking.group_id.id)], context=context)
saleorders = self.pool['sale.order'].browse(cr, uid, saleorder_ids, context=context)
if saleorders and saleorders[0] and saleorders[0].order_policy == 'picking':
saleorder = saleorders[0]
return saleorder.partner_invoice_id.id
return super(stock_picking, self)._get_partner_to_invoice(cr, uid, picking, context=context)
def _get_sale_id(self, cr, uid, ids, name, args, context=None):
sale_obj = self.pool.get("sale.order")
res = {}
for picking in self.browse(cr, uid, ids, context=context):
res[picking.id] = False
if picking.group_id:
sale_ids = sale_obj.search(cr, uid, [('procurement_group_id', '=', picking.group_id.id)], context=context)
if sale_ids:
res[picking.id] = sale_ids[0]
return res
_columns = {
'sale_id': fields.function(_get_sale_id, type="many2one", relation="sale.order", string="Sale Order"),
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
sale_obj = self.pool.get('sale.order')
sale_line_obj = self.pool.get('sale.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context)
return invoice_id
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, move, context=None):
inv_vals = super(stock_picking, self)._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context)
sale = move.picking_id.sale_id
if sale:
inv_vals.update({
'fiscal_position': sale.fiscal_position.id,
'payment_term': sale.payment_term.id,
'user_id': sale.user_id.id,
'section_id': sale.section_id.id,
'name': sale.client_order_ref or '',
'comment': sale.note,
})
return inv_vals
|
agpl-3.0
|
AndrewGYork/tools
|
zaber.py
|
1
|
8750
|
import time
import serial
class Stage:
"""Zaber stage(s), attached through the (USB?) serial port."""
def __init__(
self,
port_name, # For example, 'COM3' on Windows
timeout=1,
verbose=True,
very_verbose=False):
"""port_name: which serial port the stage is connected to, e.g. 'COM3'
"""
self.verbose = verbose
self.very_verbose = very_verbose
try:
self.serial = serial.Serial(
port=port_name,
baudrate=9600,
bytesize=8,
parity='N',
stopbits=1,
timeout=timeout)
except serial.serialutil.SerialException:
print('Failed to open serial port for Zaber stage(s).')
print('Sometimes Windows is weird about this!')
print('Consider trying again.')
raise
if self.verbose: print("Renumbering stages:")
self.devices = self.renumber_all_devices()
self.pending_moves = [False for d in self.devices]
if self.verbose:
for d in self.devices:
print(' Axis:', d)
print(' Done renumbering.')
self.restore_settings()
self.default_speed = min([r['speed'] for r in self.get_target_speed()])
if verbose: print(" Default stage speed:", self.default_speed)
self.move_home()
def send(self, instruction):
"""Send an instruction to the Zaber stage.
'instruction' must be a list of 6 integers, 0-255 (no error
checking).
See: http://www.zaber.com/wiki/Manuals/Binary_Protocol_Manual
for a list of instructions.
"""
assert len(instruction) == 6
if self.very_verbose: print("Sending to stage:", instruction)
serial_cmd = bytes(instruction) # 0 <= int(i) < 256 for i in instruction
self.serial.write(serial_cmd)
return None
def receive(self, expected_command_ID=None):
"""Return 6 bytes from the serial port
There must be 6 bytes to receive (no error checking).
"""
response = self.serial.read(6)
if len(response) != 6:
raise UserWarning(
"Zaber stage failed to respond. Is the timeout too short?\n" +
"Is the stage plugged in?")
response = {'device_number': response[0],
'command_ID': response[1],
'data': four_bytes_to_uint(response[2:6])}
if expected_command_ID is not None:
assert response['command_ID'] == expected_command_ID
if self.very_verbose:
print("Response from stage:\n", response)
return response
def get_position(self, axis='all'):
if axis == 'all':
axis = 0
num_responses = len(self.devices)
else:
num_responses = 1
assert axis in range(len(self.devices) + 1)
self.send([axis, 60, 0, 0, 0, 0])
responses = []
for i in range(num_responses):
responses.append(self.receive(expected_command_ID=60))
axis_positions = {}
for r in responses:
axis_positions[r['device_number']] = r['data']
return axis_positions
def move(self, distance, movetype='absolute', response=True, axis='all'):
distance = int(distance)
if self.verbose:
print("Moving axis: ", repr(axis),
" distance ", distance, " (", movetype, ")", sep='')
if axis == 'all':
axis = 0
assert self.pending_moves == [False for d in self.devices]
else:
assert axis in [d['device_number'] for d in self.devices]
assert self.pending_moves[(axis - 1)] == False
if movetype == 'absolute':
instruction = [axis, 20]
elif movetype == 'relative':
instruction = [axis, 21]
else:
raise UserWarning("Move type must be 'relative' or 'absolute'")
# Data conversion and transfer:
instruction.extend(uint_to_four_bytes(distance))
self.send(instruction)
if axis == 0:
self.pending_moves = [True for d in self.devices]
else:
self.pending_moves[axis - 1] = True
if response:
return self.finish_moving()
return None
def finish_moving(self):
response = []
for i in range(len(self.devices)):
if self.pending_moves[i]:
response.append(self.receive())
assert response[-1]['command_ID'] in (1, 20, 21)
self.pending_moves = [False for d in self.devices]
assert self.serial.inWaiting() == 0
return response
def move_home(self, response=True):
if self.verbose: print("Moving stage(s) near home...")
self.move(100)
if self.verbose: print("Moving stage(s) home.")
assert self.pending_moves == [False for d in self.devices]
self.send([0, 1, 0, 0, 0, 0])
self.pending_moves = [True for d in self.devices]
if response:
return self.finish_moving()
return None
def restore_settings(self):
if self.verbose: print("Restoring stage(s) to default settings.")
assert self.pending_moves == [False for d in self.devices]
assert self.serial.inWaiting() == 0
self.send([0, 36, 0, 0, 0, 0]) # Restore to default settings
for d in self.devices:
self.receive(expected_command_ID=36)
self.send([0, 116, 1, 0, 0, 0]) # Disable manual move tracking
for d in self.devices:
self.receive(expected_command_ID=116)
assert self.serial.inWaiting() == 0
return None
def renumber_all_devices(self):
self.serial.flushInput()
self.serial.flushOutput()
self.send([0, 2, 0, 0, 0, 0])
# We can't predict the length of the response, since we don't
# yet know how many stages there are. Just wait a healthy amount
# of time for the answer:
time.sleep(.8) # Seems to take a little over 0.5 seconds.
bytes_waiting = self.serial.inWaiting()
assert bytes_waiting % 6 == 0 # Each stage responds with 6 bytes.
num_stages = int(bytes_waiting / 6)
stages = []
for n in range(num_stages):
r = self.receive()
assert (r['device_number'] - 1) in range(num_stages)
assert r.pop('command_ID') == 2
r['device_ID'] = r.pop('data')
assert r['device_ID'] in (# List of devices we've tested; add liberally.
20053,
)
stages.append(r)
assert self.serial.inWaiting() == 0
return stages
def set_target_speed(self, speed, response=True):
min_speed = int(self.default_speed * 0.01)
max_speed = int(2*self.default_speed)
speed = int(speed)
assert min_speed <= speed < max_speed
if self.verbose: print("Setting stage speed to", speed)
inst = [0, 42]
inst.extend(uint_to_four_bytes(speed))
self.send(inst)
if response:
reply = [self.receive(expected_command_ID=42)
for d in self.devices]
return reply
def get_target_speed(self):
inst = [0, 53, 42, 0, 0, 0]
self.send(inst)
reply = []
for d in self.devices:
reply.append(self.receive())
assert reply[-1].pop('command_ID') == 42
reply[-1]['speed'] = reply[-1].pop('data')
return reply
def close(self):
self.move_home()
self.serial.close()
def four_bytes_to_uint(x):
assert len(x) == 4
return int.from_bytes(x, byteorder='little')
def uint_to_four_bytes(x):
assert 0 <= x < 4294967296
return [x >> i & 0xff for i in (0, 8, 16, 24)]
if __name__ == '__main__':
my_stage = Stage(port_name='COM3', verbose=True, very_verbose=False)
try:
my_stage.move(0, movetype='absolute', axis='all')
for i in range(len(my_stage.devices)):
my_stage.move(70000, movetype='absolute', axis=i+1)
print("Stage postion:", my_stage.get_position())
my_stage.move(0, movetype='absolute', axis=i+1)
print("Stage postion:", my_stage.get_position())
my_stage.set_target_speed(my_stage.default_speed * 1.3)
my_stage.move(70000, movetype='absolute', axis='all')
print("Stage postion:", my_stage.get_position())
my_stage.move(0, movetype='absolute', axis='all')
print("Stage postion:", my_stage.get_position())
my_stage.set_target_speed(my_stage.default_speed)
finally:
my_stage.close()
|
gpl-2.0
|
alkyl1978/gnuradio
|
gr-trellis/examples/python/test_tcm.py
|
45
|
4831
|
#!/usr/bin/env python
from gnuradio import gr
from gnuradio import trellis, digital, blocks
from gnuradio import eng_notation
import math
import sys
import random
from gnuradio.trellis import fsm_utils
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
def run_test (f,Kb,bitspersymbol,K,dimensionality,constellation,N0,seed):
tb = gr.top_block ()
# TX
numpy.random.seed(-seed)
packet = numpy.random.randint(0,2,Kb) # create Kb random bits
packet[Kb-10:Kb]=0
packet[0:Kb]=0
src = blocks.vector_source_s(packet.tolist(),False)
b2s = blocks.unpacked_to_packed_ss(1,gr.GR_MSB_FIRST) # pack bits in shorts
s2fsmi = blocks.packed_to_unpacked_ss(bitspersymbol,gr.GR_MSB_FIRST) # unpack shorts to symbols compatible with the FSM input cardinality
enc = trellis.encoder_ss(f,0) # initial state = 0
mod = digital.chunks_to_symbols_sf(constellation,dimensionality)
# CHANNEL
add = blocks.add_ff()
noise = analog.noise_source_f(analog.GR_GAUSSIAN,math.sqrt(N0/2),long(seed))
# RX
va = trellis.viterbi_combined_fs(f,K,0,0,dimensionality,constellation,digital.TRELLIS_EUCLIDEAN) # Put -1 if the Initial/Final states are not set.
fsmi2s = blocks.unpacked_to_packed_ss(bitspersymbol,gr.GR_MSB_FIRST) # pack FSM input symbols to shorts
s2b = blocks.packed_to_unpacked_ss(1,gr.GR_MSB_FIRST) # unpack shorts to bits
dst = blocks.vector_sink_s();
tb.connect (src,b2s,s2fsmi,enc,mod)
tb.connect (mod,(add,0))
tb.connect (noise,(add,1))
tb.connect (add,va,fsmi2s,s2b,dst)
tb.run()
# A bit of cheating: run the program once and print the
# final encoder state..
# Then put it as the last argument in the viterbi block
#print "final state = " , enc.ST()
if len(dst.data()) != len(packet):
print "Error: not enough data:", len(dst.data()), len(packet)
ntotal=len(packet)
nwrong = sum(abs(packet-numpy.array(dst.data())));
return (ntotal,nwrong,abs(packet-numpy.array(dst.data())))
def main():
parser = OptionParser(option_class=eng_option)
parser.add_option("-f", "--fsm_file", type="string", default="fsm_files/awgn1o2_4.fsm", help="Filename containing the fsm specification, e.g. -f fsm_files/awgn1o2_4.fsm (default=fsm_files/awgn1o2_4.fsm)")
parser.add_option("-e", "--esn0", type="eng_float", default=10.0, help="Symbol energy to noise PSD level ratio in dB, e.g., -e 10.0 (default=10.0)")
parser.add_option("-r", "--repetitions", type="int", default=100, help="Number of packets to be generated for the simulation, e.g., -r 100 (default=100)")
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
fname=options.fsm_file
esn0_db=float(options.esn0)
rep=int(options.repetitions)
# system parameters
f=trellis.fsm(fname) # get the FSM specification from a file
# alternatively you can specify the fsm from its generator matrix
#f=trellis.fsm(1,2,[5,7])
Kb=1024*16 # packet size in bits (make it multiple of 16 so it can be packed in a short)
bitspersymbol = int(round(math.log(f.I())/math.log(2))) # bits per FSM input symbol
K=Kb/bitspersymbol # packet size in trellis steps
modulation = fsm_utils.psk4 # see fsm_utlis.py for available predefined modulations
dimensionality = modulation[0]
constellation = modulation[1]
if len(constellation)/dimensionality != f.O():
sys.stderr.write ('Incompatible FSM output cardinality and modulation size.\n')
sys.exit (1)
# calculate average symbol energy
Es = 0
for i in range(len(constellation)):
Es = Es + constellation[i]**2
Es = Es / (len(constellation)/dimensionality)
N0=Es/pow(10.0,esn0_db/10.0); # calculate noise variance
tot_b=0 # total number of transmitted bits
terr_b=0 # total number of bits in error
terr_p=0 # total number of packets in error
for i in range(rep):
(b,e,pattern)=run_test(f,Kb,bitspersymbol,K,dimensionality,constellation,N0,-(666+i)) # run experiment with different seed to get different noise realizations
tot_b=tot_b+b
terr_b=terr_b+e
terr_p=terr_p+(e!=0)
if ((i+1)%100==0) : # display progress
print i+1,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_b,terr_b, '%.2e' % ((1.0*terr_b)/tot_b)
if e!=0:
print "rep=",i, e
for k in range(Kb):
if pattern[k]!=0:
print k
# estimate of the bit error rate
print rep,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_b,terr_b, '%.2e' % ((1.0*terr_b)/tot_b)
if __name__ == '__main__':
main()
|
gpl-3.0
|
adilimad1/google-blog-converters-appengine
|
src/movabletype2blogger/movabletype2blogger.py
|
30
|
2023
|
#!/usr/bin/env python
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import traceback
import StringIO
import gdata.service
import gdata.urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import mt2b
import wsgiref.handlers
__author__ = 'JJ Lueck (jlueck@gmail.com)'
# Use urlfetch instead of httplib
gdata.service.http_request_handler = gdata.urlfetch
class TransformPage(webapp.RequestHandler):
def post(self):
# All input/output will be in UTF-8
self.response.charset = 'utf8'
# Parse the mulit-part form-data part out of the POST
input = self.request.get('input-file', allow_multiple=False)
# Run the blogger import processor
translator = mt2b.MovableType2Blogger()
try:
translator.Translate(StringIO.StringIO(input), self.response.out)
self.response.content_type = 'application/atom+xml'
self.response.headers['Content-Disposition'] = \
'attachment;filename=blogger-export.xml'
except:
# Just provide an error message to the user.
self.response.content_type = 'text/plain'
self.response.out.write("Error encountered during conversion.<br/><br/>")
exc = traceback.format_exc()
self.response.out.write(exc.replace('\n', '<br/>'))
application = webapp.WSGIApplication([('/mt2b/', TransformPage)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
apache-2.0
|
jfunez/scielo-manager
|
scielomanager/journalmanager/tests/modelfactories.py
|
1
|
7014
|
# coding: utf-8
import factory
import datetime
from journalmanager import models
class ArticleFactory(factory.Factory):
FACTORY_FOR = models.Article
front = {
'default-language': 'en',
'title-group': {
'en': u'Article Title',
'pt': u'Título do Artigo',
}
}
xml_url = 'http://xml.url/'
pdf_url = 'http://pdf.url/'
images_url = 'http://img.url/'
class UserFactory(factory.Factory):
FACTORY_FOR = models.User
@classmethod
def _setup_next_sequence(cls):
try:
return cls._associated_class.objects.values_list(
'id', flat=True).order_by('-id')[0] + 1
except IndexError:
return 0
username = factory.Sequence(lambda n: "username%s" % n)
first_name = factory.Sequence(lambda n: "first_name%s" % n)
last_name = factory.Sequence(lambda n: "last_name%s" % n)
email = factory.Sequence(lambda n: "email%s@example.com" % n)
password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'
is_staff = False
is_active = True
is_superuser = False
last_login = datetime.datetime(2000, 1, 1)
date_joined = datetime.datetime(1999, 1, 1)
class SubjectCategoryFactory(factory.Factory):
FACTORY_FOR = models.SubjectCategory
term = 'Acoustics'
class StudyAreaFactory(factory.Factory):
FACTORY_FOR = models.StudyArea
study_area = 'Health Sciences'
class SponsorFactory(factory.Factory):
FACTORY_FOR = models.Sponsor
name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'
address = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \
Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'
email = 'fapesp@scielo.org'
complement = ''
class UseLicenseFactory(factory.Factory):
FACTORY_FOR = models.UseLicense
license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)
reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'
disclaimer = u'<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/"><img alt="Licença Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png" /></a><br />Este trabalho foi licenciado com uma Licença <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'
class CollectionFactory(factory.Factory):
FACTORY_FOR = models.Collection
url = u'http://www.scielo.br/'
name = factory.Sequence(lambda n: 'scielo%s' % n)
address_number = u'430'
country = u'Brasil'
address = u'Rua Machado Bittencourt'
email = u'fapesp@scielo.org'
name_slug = factory.Sequence(lambda n: 'scl%s' % n)
class JournalFactory(factory.Factory):
FACTORY_FOR = models.Journal
ctrl_vocabulary = u'decs'
frequency = u'Q'
scielo_issn = u'print'
print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))
init_vol = u'1'
title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'
short_title = u'ABCD.(São Paulo)'
editorial_standard = u'vancouv'
secs_code = u'6633'
init_year = u'1986'
acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))
pub_level = u'CT'
init_num = u'1',
subject_descriptors = u"""
MEDICINA
CIRURGIA
GASTROENTEROLOGIA
GASTROENTEROLOGIA""".strip()
pub_status = u'current'
pub_status_reason = u'Motivo da mudança é...'
publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'
publisher_country = u'BR'
publisher_state = u'SP'
publication_city = u'São Paulo'
editor_address = u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'
editor_email = u'cbcd@cbcd.org.br'
creator = factory.SubFactory(UserFactory)
pub_status_changed_by = factory.SubFactory(UserFactory)
use_license = factory.SubFactory(UseLicenseFactory)
collection = factory.SubFactory(CollectionFactory)
class SectionFactory(factory.Factory):
FACTORY_FOR = models.Section
code = factory.Sequence(lambda n: 'BJCE%s' % n)
journal = factory.SubFactory(JournalFactory)
class LanguageFactory(factory.Factory):
FACTORY_FOR = models.Language
iso_code = 'pt'
name = 'portuguese'
class IssueTitleFactory(factory.Factory):
"""
``issue`` must be provided
"""
FACTORY_FOR = models.IssueTitle
language = factory.SubFactory(LanguageFactory)
title = u'Bla'
class IssueFactory(factory.Factory):
FACTORY_FOR = models.Issue
total_documents = 16
number = factory.Sequence(lambda n: '%s' % n)
volume = factory.Sequence(lambda n: '%s' % n)
is_trashed = False
publication_start_month = 9
publication_end_month = 11
publication_year = 2012
is_marked_up = False
suppl_text = '1'
journal = factory.SubFactory(JournalFactory)
@classmethod
def _prepare(cls, create, **kwargs):
section = SectionFactory()
issue = super(IssueFactory, cls)._prepare(create, **kwargs)
issue.section.add(section)
return issue
class UserProfileFactory(factory.Factory):
FACTORY_FOR = models.UserProfile
user = factory.SubFactory(UserFactory)
email = factory.Sequence(lambda n: 'email%s@example.com' % n)
class SectionTitleFactory(factory.Factory):
FACTORY_FOR = models.SectionTitle
title = u'Artigos Originais'
language = factory.SubFactory(LanguageFactory)
section = factory.SubFactory(SectionFactory)
class DataChangeEventFactory(factory.Factory):
FACTORY_FOR = models.DataChangeEvent
user = factory.SubFactory(UserFactory)
content_object = factory.SubFactory(JournalFactory)
collection = factory.SubFactory(CollectionFactory)
event_type = 'added'
class RegularPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.RegularPressRelease
issue = factory.SubFactory(IssueFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class AheadPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.AheadPressRelease
journal = factory.SubFactory(JournalFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class PressReleaseTranslationFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseTranslation
language = factory.SubFactory(LanguageFactory)
press_release = factory.SubFactory(RegularPressReleaseFactory)
title = u'Yeah, this issue is amazing!'
content = u'Want to read more about...'
class PressReleaseArticleFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseArticle
press_release = factory.SubFactory(RegularPressReleaseFactory)
article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)
|
bsd-2-clause
|
rbharvs/mnd-learning
|
supervised.py
|
1
|
8636
|
import sys
import parsetags
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn.decomposition import PCA as PCA
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import nltk
from nltk.stem import LancasterStemmer
import re
def get_top_tags(segment_tags, n):
tag_freqs = {}
for tag_list in segment_tags.values():
for tag in tag_list:
if tag not in tag_freqs:
tag_freqs[tag] = 0
tag_freqs[tag] += 1
return ['NULL'] + sorted(tag_freqs.keys(), key=lambda x: tag_freqs[x])[-n:]
def get_common_words(n=100):
try:
file_content = open(sys.argv[3]).read()
common_words = nltk.word_tokenize(file_content)
except IndexError:
return None
return set(common_words[:n])
def get_named_entities():
try:
file_content = open(sys.argv[2]).read()
named_entities = nltk.word_tokenize(file_content)
except IndexError:
return None
return set(named_entities)
def filter_segments(segment_tags, ntags):
filtered_segtags = {}
for segment in segment_tags:
# np.random.shuffle(segment_tags[segment])
for tag in segment_tags[segment]:
if tag not in ntags: continue
filtered_segtags[segment] = ntags.index(tag)
if segment not in filtered_segtags:
filtered_segtags[segment] = 0
return filtered_segtags
def increase_num_segments(segment_tags, n, length=1000):
new_segment_tags = {}
segments = sorted(segment_tags.keys(), key=len)
lengths = np.array([len(seg) for seg in segments])
dist = lengths/np.sum(lengths)
random_segments = np.random.choice(segments, size=n, p=dist)
for segment in random_segments:
new_segment = segment
if len(new_segment) > length:
index = np.random.randint(0, len(new_segment)-length)
new_segment = new_segment[index:index+length]
new_segment_tags[new_segment] = segment_tags[segment]
return new_segment_tags
def named_entity_reduction(segment_tags, named_entities, common_words):
punctuation = [',', '.', "'", '?', ';', ':', '!', '(', ')', '`', '--'
'\xe2', '\x80', '\x94', '\x99']
new_segments = []
segments = list(segment_tags.keys())
for segment in segments:
new_segment = ''
tokens = nltk.word_tokenize(segment)
for token in tokens:
if token in punctuation: continue
if token.lower() in common_words: continue
if token not in named_entities: continue
new_segment += token + ' '
new_segments.append(new_segment)
new_segment_tags = {}
for i in range(len(segments)):
new_segment_tags[new_segments[i]] = segment_tags[segments[i]]
return new_segment_tags
def stemming_reduction(segment_tags):
punctuation = [',', '.', "'", '?', ';', ':', '!', '(', ')', '`', '--'
'\xe2', '\x80', '\x94', '\x99']
new_segments = []
stemmer = LancasterStemmer()
segments = list(segment_tags.keys())
for segment in segments:
new_segment = ''
segment = re.sub(r'[^\x00-\x7f]',r'', segment)
tokens = nltk.word_tokenize(segment)
for token in tokens:
if token in punctuation: continue
try:
new_segment += stemmer.stem(token)+' '
except UnicodeDecodeError:
new_segment += ''
new_segments.append(new_segment)
stemmed_segment_tags = {}
for i in range(len(segments)):
stemmed_segment_tags[new_segments[i]] = segment_tags[segments[i]]
return stemmed_segment_tags
def separate_segments(segment_tags, k):
train = {}
for segment in segment_tags.keys():
if np.random.random() < k:
train[segment] = segment_tags.pop(segment)
return train, segment_tags
def bag_of_words(segment_tags, tfidf=False):
#create matrix of word frequencies
segments = list(segment_tags.keys())
vec = CountVectorizer()
word_freqs = vec.fit_transform(segments).toarray()
if tfidf:
tfidf_transformer = TfidfTransformer()
word_freqs = tfidf_transformer.fit_transform(word_freqs)
labels = np.empty(shape=len(segments))
for i in range(len(segments)):
labels[i] = segment_tags[segments[i]]
return word_freqs, labels, segments
def entity_bow(segment_tags, named_entities, common_words):
punctuation = [',', '.', "'", '?', ';', ':', '!', '(', ')', '`', '--'
'\xe2', '\x80', '\x94', '\x99']
new_segments = []
segments = list(segment_tags.keys())
for segment in segments:
new_segment = ''
tokens = nltk.word_tokenize(segment)
for token in tokens:
if token in punctuation: continue
if token.lower() in common_words: continue
if token not in named_entities: continue
new_segment += token + ' '
new_segments.append(new_segment)
vec = CountVectorizer()
word_freqs = vec.fit_transform(new_segments).toarray()
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(word_freqs)
print(word_freqs.shape, X_train_tfidf.shape)
labels = np.empty(shape=len(segments))
for i in range(len(segments)):
labels[i] = segment_tags[segments[i]]
return X_train_tfidf, labels, segments
def pca_plot(Xtrain, ytrain):
#binary classification case
X_reduced = Xtrain
pca = PCA(3)
X_pca = pca.fit_transform(X_reduced)
ax = plt.axes(projection='3d')
for i in range(X_pca.shape[0]):
if ytrain[i] == 1:
ax.scatter(X_pca[i, 0], X_pca[i, 2], X_pca[i, 1], 'o', color='blue')
else:
ax.scatter(X_pca[i, 0], X_pca[i, 2], X_pca[i,1], 'x', color='red')
plt.show()
def randomize_order(X, y, segments):
shuffled_segments = []
indices = np.arange(len(segments))
np.random.shuffle(indices)
X, y = X[indices], y[indices]
for i in indices:
shuffled_segments.append(segments[i])
return X, y, segments
def naive_bayes(segment_tags, k=0.5, normalize=False):
X, y, segments = randomize_order(*bag_of_words(segment_tags, tfidf=normalize))
num_examples = len(segments)
Xtest, ytest = X[int(k*num_examples):, :], y[int(k*num_examples):]
Xtrain, ytrain = X[:int(k*num_examples), :], y[:int(k*num_examples)]
nb_classifier = MultinomialNB().fit(Xtrain, ytrain)
nb_predicted_tags = nb_classifier.predict(Xtest)
nb_success_rate = np.mean(nb_predicted_tags == ytest)
return 1-nb_success_rate
def support_vector(segment_tags, k=0.5, normalize=False):
X, y, segments = randomize_order(*bag_of_words(segment_tags, tfidf=normalize))
num_examples = len(segments)
Xtest, ytest = X[int(k*num_examples):, :], y[int(k*num_examples):]
Xtrain, ytrain = X[:int(k*num_examples), :], y[:int(k*num_examples)]
svm_classifier = svm.SVC()
svm_classifier.fit(Xtrain, ytrain)
svm_predicted_tags = svm_classifier.predict(Xtest)
svm_success_rate = np.mean(svm_predicted_tags == ytest)
return 1-svm_success_rate
if __name__ == "__main__":
orig_segment_tags = parsetags.parse_tags(sys.argv[1])
common_words = get_common_words()
named_entities = get_named_entities()
for sample_size in ['BIG']:
if sample_size == 'BIG':
segment_tags = increase_num_segments(orig_segment_tags, 3000, length=1000)
orig_segment_tags = segment_tags
for text_features in ['REGULAR', 'STEMMED', 'NAMED']:
if text_features == 'STEMMED':
segment_tags = stemming_reduction(orig_segment_tags)
if text_features == 'NAMED':
segment_tags = named_entity_reduction(orig_segment_tags, named_entities, common_words)
for freq_feature in ['COUNT', 'TFIDF']:
# ntags = get_top_tags(segment_tags, 7)
print(sample_size, text_features, freq_feature)
ntags = ['ETA', 'EHDRH', 'AFR']
filtered_segtags = filter_segments(segment_tags, ntags)
with open('Results/' + sample_size + '_' + text_features + '_' + freq_feature + '.txt', 'w') as f:
for i in range(100):
f.write(str(naive_bayes(filtered_segtags, normalize=(freq_feature is 'TFIDF'))) + '\n')
# segment_tags = parsetags.parse_tags(sys.argv[1])
# big_segment_tags = increase_num_segments(segment_tags, 3000, length=1000)
# ntags = get_top_tags(segment_tags, 7)
# for
# # ntags = ['NULL', 'ETA', 'EHDRH', 'AFR']
# common_words = get_common_words()
# named_entities = get_named_entities()
# filtered_segtags = filter_segments(segment_tags, ntags)
# #entity_bow(filtered_segtags, named_entities, common_words)
# naive_bayes(filtered_segtags, named_entities, common_words, features=entity_bow)
# naive_bayes(filtered_segtags)
# support_vector(filtered_segtags)
# predicted_tags = [ntags[int(np.round(nb_predicted_tags[i]))] for i in range(len(svm_predicted_tags))]
# count = 0
# print(ntags)
# for i in range(len(predicted_tags)):
# if predicted_tags[i] == 'NULL':
# if all(tag not in segment_tags[shuffled_segments[i]] for tag in ntags):
# count += 1
# else:
# if predicted_tags[i] in segment_tags[shuffled_segments[i]]:
# count += 1
# print(count/len(predicted_tags))
|
mit
|
scottkmaxwell/modified
|
setup.py
|
2
|
1319
|
import os
import modified
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_description():
f = open(os.path.abspath(os.path.join(os.path.dirname(__file__), 'README.md')), 'r')
try:
return f.read()
finally:
f.close()
setup(
name="modified",
version=modified.__version__,
description="Python file modification tracker",
long_description=get_description(),
author="Scott Maxwell",
author_email="scott@codecobblers.com",
url=modified.__project_url__,
license="MIT",
py_modules=["modified"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
mit
|
liorvh/raspberry_pwn
|
src/pentest/metagoofil/hachoir_parser/common/msdos.py
|
95
|
1455
|
"""
MS-DOS structures.
Documentation:
- File attributes:
http://www.cs.colorado.edu/~main/cs1300/include/ddk/winddk.h
"""
from hachoir_core.field import StaticFieldSet
from hachoir_core.field import Bit, NullBits
_FIELDS = (
(Bit, "read_only"),
(Bit, "hidden"),
(Bit, "system"),
(NullBits, "reserved[]", 1),
(Bit, "directory"),
(Bit, "archive"),
(Bit, "device"),
(Bit, "normal"),
(Bit, "temporary"),
(Bit, "sparse_file"),
(Bit, "reparse_file"),
(Bit, "compressed"),
(Bit, "offline"),
(Bit, "dont_index_content"),
(Bit, "encrypted"),
)
class MSDOSFileAttr16(StaticFieldSet):
"""
MSDOS 16-bit file attributes
"""
format = _FIELDS + ((NullBits, "reserved[]", 1),)
_text_keys = (
# Sort attributes by importance
"directory", "read_only", "compressed",
"hidden", "system",
"normal", "device",
"temporary", "archive")
def createValue(self):
mode = []
for name in self._text_keys:
if self[name].value:
if 4 <= len(mode):
mode.append("...")
break
else:
mode.append(name)
if mode:
return ", ".join(mode)
else:
return "(none)"
class MSDOSFileAttr32(MSDOSFileAttr16):
"""
MSDOS 32-bit file attributes
"""
format = _FIELDS + ((NullBits, "reserved[]", 17),)
|
gpl-3.0
|
adityasharad/storm
|
dev-tools/travis/save-logs.py
|
37
|
1806
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import subprocess
from datetime import datetime, timedelta
def main(file, cmd):
print cmd, "writing to", file
out = open(file, "w")
count = 0
process = subprocess.Popen(cmd,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
start = datetime.now()
nextPrint = datetime.now() + timedelta(seconds=1)
# wait for the process to terminate
pout = process.stdout
line = pout.readline()
while line:
count = count + 1
if datetime.now() > nextPrint:
diff = datetime.now() - start
sys.stdout.write("\r%d seconds %d log lines"%(diff.seconds, count))
sys.stdout.flush()
nextPrint = datetime.now() + timedelta(seconds=10)
out.write(line)
line = pout.readline()
out.close()
errcode = process.wait()
diff = datetime.now() - start
sys.stdout.write("\r%d seconds %d log lines"%(diff.seconds, count))
print
print cmd, "done", errcode
return errcode
if __name__ == "__main__":
if sys.argv < 1:
print "Usage: %s [file info]" % sys.argv[0]
sys.exit(1)
sys.exit(main(sys.argv[1], sys.argv[2:]))
|
apache-2.0
|
jamiefolsom/xblock-sdk
|
script/startnew.py
|
6
|
1834
|
#!/usr/bin/env python
"""
Use cookiecutter to create a new XBlock project.
"""
import os
import re
import textwrap
from cookiecutter.main import cookiecutter
EXPLANATION = """\
This script will create a new XBlock project.
You will be prompted for two pieces of information:
* Short name: a single word, all lower-case, for directory and file names.
For a hologram 3-D XBlock, you might choose "holo3d".
* Class name: a valid Python class name. It's best if this ends with "XBlock",
so for our hologram XBlock, you might choose "Hologram3dXBlock".
Once you specify those two words, a directory will be created in the current
directory containing the new project.
If you don't want to create the project here, or you enter a name incorrectly,
just type Ctrl-C to stop this script. If you don't want the resulting project,
just delete the directory it created.
"""
def main():
print EXPLANATION
# Get the values.
try:
while True:
short_name = raw_input("Short name: ")
if re.match(r"^[a-z][a-z0-9_]+$", short_name):
break
print "The short name must be a valid Python identifier, all lower-case."
while True:
class_name = raw_input("Class name: ")
if re.match(r"[A-Z][a-zA-Z0-9]+XBlock$", class_name):
break
print "The class name must be a valid Python class name, ending with XBlock."
except KeyboardInterrupt:
print "\n** Cancelled **"
return
# Find the prototype.
proto_dir = os.path.abspath(os.path.join(__file__, "../../prototype"))
cookiecutter(
proto_dir,
no_input=True,
extra_context={
'short_name': short_name,
'class_name': class_name,
},
)
if __name__ == "__main__":
main()
|
agpl-3.0
|
XXMrHyde/android_external_chromium_org
|
build/mac/change_mach_o_flags.py
|
232
|
10318
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Usage: change_mach_o_flags.py [--executable-heap] [--no-pie] <executablepath>
Arranges for the executable at |executable_path| to have its data (heap)
pages protected to prevent execution on Mac OS X 10.7 ("Lion"), and to have
the PIE (position independent executable) bit set to enable ASLR (address
space layout randomization). With --executable-heap or --no-pie, the
respective bits are cleared instead of set, making the heap executable or
disabling PIE/ASLR.
This script is able to operate on thin (single-architecture) Mach-O files
and fat (universal, multi-architecture) files. When operating on fat files,
it will set or clear the bits for each architecture contained therein.
NON-EXECUTABLE HEAP
Traditionally in Mac OS X, 32-bit processes did not have data pages set to
prohibit execution. Although user programs could call mprotect and
mach_vm_protect to deny execution of code in data pages, the kernel would
silently ignore such requests without updating the page tables, and the
hardware would happily execute code on such pages. 64-bit processes were
always given proper hardware protection of data pages. This behavior was
controllable on a system-wide level via the vm.allow_data_exec sysctl, which
is set by default to 1. The bit with value 1 (set by default) allows code
execution on data pages for 32-bit processes, and the bit with value 2
(clear by default) does the same for 64-bit processes.
In Mac OS X 10.7, executables can "opt in" to having hardware protection
against code execution on data pages applied. This is done by setting a new
bit in the |flags| field of an executable's |mach_header|. When
MH_NO_HEAP_EXECUTION is set, proper protections will be applied, regardless
of the setting of vm.allow_data_exec. See xnu-1699.22.73/osfmk/vm/vm_map.c
override_nx and xnu-1699.22.73/bsd/kern/mach_loader.c load_machfile.
The Apple toolchain has been revised to set the MH_NO_HEAP_EXECUTION when
producing executables, provided that -allow_heap_execute is not specified
at link time. Only linkers shipping with Xcode 4.0 and later (ld64-123.2 and
later) have this ability. See ld64-123.2.1/src/ld/Options.cpp
Options::reconfigureDefaults() and
ld64-123.2.1/src/ld/HeaderAndLoadCommands.hpp
HeaderAndLoadCommandsAtom<A>::flags().
This script sets the MH_NO_HEAP_EXECUTION bit on Mach-O executables. It is
intended for use with executables produced by a linker that predates Apple's
modifications to set this bit itself. It is also useful for setting this bit
for non-i386 executables, including x86_64 executables. Apple's linker only
sets it for 32-bit i386 executables, presumably under the assumption that
the value of vm.allow_data_exec is set in stone. However, if someone were to
change vm.allow_data_exec to 2 or 3, 64-bit x86_64 executables would run
without hardware protection against code execution on data pages. This
script can set the bit for x86_64 executables, guaranteeing that they run
with appropriate protection even when vm.allow_data_exec has been tampered
with.
POSITION-INDEPENDENT EXECUTABLES/ADDRESS SPACE LAYOUT RANDOMIZATION
This script sets or clears the MH_PIE bit in an executable's Mach-O header,
enabling or disabling position independence on Mac OS X 10.5 and later.
Processes running position-independent executables have varying levels of
ASLR protection depending on the OS release. The main executable's load
address, shared library load addresess, and the heap and stack base
addresses may be randomized. Position-independent executables are produced
by supplying the -pie flag to the linker (or defeated by supplying -no_pie).
Executables linked with a deployment target of 10.7 or higher have PIE on
by default.
This script is never strictly needed during the build to enable PIE, as all
linkers used are recent enough to support -pie. However, it's used to
disable the PIE bit as needed on already-linked executables.
"""
import optparse
import os
import struct
import sys
# <mach-o/fat.h>
FAT_MAGIC = 0xcafebabe
FAT_CIGAM = 0xbebafeca
# <mach-o/loader.h>
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
MH_EXECUTE = 0x2
MH_PIE = 0x00200000
MH_NO_HEAP_EXECUTION = 0x01000000
class MachOError(Exception):
"""A class for exceptions thrown by this module."""
pass
def CheckedSeek(file, offset):
"""Seeks the file-like object at |file| to offset |offset| and raises a
MachOError if anything funny happens."""
file.seek(offset, os.SEEK_SET)
new_offset = file.tell()
if new_offset != offset:
raise MachOError, \
'seek: expected offset %d, observed %d' % (offset, new_offset)
def CheckedRead(file, count):
"""Reads |count| bytes from the file-like |file| object, raising a
MachOError if any other number of bytes is read."""
bytes = file.read(count)
if len(bytes) != count:
raise MachOError, \
'read: expected length %d, observed %d' % (count, len(bytes))
return bytes
def ReadUInt32(file, endian):
"""Reads an unsinged 32-bit integer from the file-like |file| object,
treating it as having endianness specified by |endian| (per the |struct|
module), and returns it as a number. Raises a MachOError if the proper
length of data can't be read from |file|."""
bytes = CheckedRead(file, 4)
(uint32,) = struct.unpack(endian + 'I', bytes)
return uint32
def ReadMachHeader(file, endian):
"""Reads an entire |mach_header| structure (<mach-o/loader.h>) from the
file-like |file| object, treating it as having endianness specified by
|endian| (per the |struct| module), and returns a 7-tuple of its members
as numbers. Raises a MachOError if the proper length of data can't be read
from |file|."""
bytes = CheckedRead(file, 28)
magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \
struct.unpack(endian + '7I', bytes)
return magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags
def ReadFatArch(file):
"""Reads an entire |fat_arch| structure (<mach-o/fat.h>) from the file-like
|file| object, treating it as having endianness specified by |endian|
(per the |struct| module), and returns a 5-tuple of its members as numbers.
Raises a MachOError if the proper length of data can't be read from
|file|."""
bytes = CheckedRead(file, 20)
cputype, cpusubtype, offset, size, align = struct.unpack('>5I', bytes)
return cputype, cpusubtype, offset, size, align
def WriteUInt32(file, uint32, endian):
"""Writes |uint32| as an unsinged 32-bit integer to the file-like |file|
object, treating it as having endianness specified by |endian| (per the
|struct| module)."""
bytes = struct.pack(endian + 'I', uint32)
assert len(bytes) == 4
file.write(bytes)
def HandleMachOFile(file, options, offset=0):
"""Seeks the file-like |file| object to |offset|, reads its |mach_header|,
and rewrites the header's |flags| field if appropriate. The header's
endianness is detected. Both 32-bit and 64-bit Mach-O headers are supported
(mach_header and mach_header_64). Raises MachOError if used on a header that
does not have a known magic number or is not of type MH_EXECUTE. The
MH_PIE and MH_NO_HEAP_EXECUTION bits are set or cleared in the |flags| field
according to |options| and written to |file| if any changes need to be made.
If already set or clear as specified by |options|, nothing is written."""
CheckedSeek(file, offset)
magic = ReadUInt32(file, '<')
if magic == MH_MAGIC or magic == MH_MAGIC_64:
endian = '<'
elif magic == MH_CIGAM or magic == MH_CIGAM_64:
endian = '>'
else:
raise MachOError, \
'Mach-O file at offset %d has illusion of magic' % offset
CheckedSeek(file, offset)
magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \
ReadMachHeader(file, endian)
assert magic == MH_MAGIC or magic == MH_MAGIC_64
if filetype != MH_EXECUTE:
raise MachOError, \
'Mach-O file at offset %d is type 0x%x, expected MH_EXECUTE' % \
(offset, filetype)
original_flags = flags
if options.no_heap_execution:
flags |= MH_NO_HEAP_EXECUTION
else:
flags &= ~MH_NO_HEAP_EXECUTION
if options.pie:
flags |= MH_PIE
else:
flags &= ~MH_PIE
if flags != original_flags:
CheckedSeek(file, offset + 24)
WriteUInt32(file, flags, endian)
def HandleFatFile(file, options, fat_offset=0):
"""Seeks the file-like |file| object to |offset| and loops over its
|fat_header| entries, calling HandleMachOFile for each."""
CheckedSeek(file, fat_offset)
magic = ReadUInt32(file, '>')
assert magic == FAT_MAGIC
nfat_arch = ReadUInt32(file, '>')
for index in xrange(0, nfat_arch):
cputype, cpusubtype, offset, size, align = ReadFatArch(file)
assert size >= 28
# HandleMachOFile will seek around. Come back here after calling it, in
# case it sought.
fat_arch_offset = file.tell()
HandleMachOFile(file, options, offset)
CheckedSeek(file, fat_arch_offset)
def main(me, args):
parser = optparse.OptionParser('%prog [options] <executable_path>')
parser.add_option('--executable-heap', action='store_false',
dest='no_heap_execution', default=True,
help='Clear the MH_NO_HEAP_EXECUTION bit')
parser.add_option('--no-pie', action='store_false',
dest='pie', default=True,
help='Clear the MH_PIE bit')
(options, loose_args) = parser.parse_args(args)
if len(loose_args) != 1:
parser.print_usage()
return 1
executable_path = loose_args[0]
executable_file = open(executable_path, 'rb+')
magic = ReadUInt32(executable_file, '<')
if magic == FAT_CIGAM:
# Check FAT_CIGAM and not FAT_MAGIC because the read was little-endian.
HandleFatFile(executable_file, options)
elif magic == MH_MAGIC or magic == MH_CIGAM or \
magic == MH_MAGIC_64 or magic == MH_CIGAM_64:
HandleMachOFile(executable_file, options)
else:
raise MachOError, '%s is not a Mach-O or fat file' % executable_file
executable_file.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[0], sys.argv[1:]))
|
bsd-3-clause
|
medspx/QGIS
|
python/plugins/processing/algs/gdal/GdalAlgorithmProvider.py
|
2
|
6534
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
GdalAlgorithmProvider.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsApplication,
QgsProcessingProvider)
from processing.core.ProcessingConfig import ProcessingConfig, Setting
from .GdalUtils import GdalUtils
from .AssignProjection import AssignProjection
from .aspect import aspect
from .buildvrt import buildvrt
from .ClipRasterByExtent import ClipRasterByExtent
from .ClipRasterByMask import ClipRasterByMask
from .ColorRelief import ColorRelief
from .contour import contour
from .fillnodata import fillnodata
from .gdalinfo import gdalinfo
from .gdal2tiles import gdal2tiles
from .gdal2xyz import gdal2xyz
from .gdaladdo import gdaladdo
from .gdaltindex import gdaltindex
from .GridAverage import GridAverage
from .GridDataMetrics import GridDataMetrics
from .GridInverseDistance import GridInverseDistance
from .GridInverseDistanceNearestNeighbor import GridInverseDistanceNearestNeighbor
from .GridLinear import GridLinear
from .GridNearestNeighbor import GridNearestNeighbor
from .hillshade import hillshade
from .merge import merge
from .nearblack import nearblack
from .pct2rgb import pct2rgb
from .polygonize import polygonize
from .proximity import proximity
from .retile import retile
from .rgb2pct import rgb2pct
from .roughness import roughness
from .sieve import sieve
from .slope import slope
from .translate import translate
from .tpi import tpi
from .tri import tri
from .warp import warp
# from .rasterize import rasterize
# from .extractprojection import ExtractProjection
# from .gdalcalc import gdalcalc
# from .rasterize_over import rasterize_over
from .Buffer import Buffer
from .ClipVectorByExtent import ClipVectorByExtent
from .ClipVectorByMask import ClipVectorByMask
from .Dissolve import Dissolve
from .ExecuteSql import ExecuteSql
from .OffsetCurve import OffsetCurve
from .ogr2ogr import ogr2ogr
from .ogrinfo import ogrinfo
from .OgrToPostGis import OgrToPostGis
from .ogr2ogrtopostgislist import Ogr2OgrToPostGisList
from .OneSideBuffer import OneSideBuffer
from .PointsAlongLines import PointsAlongLines
# from .ogr2ogrtabletopostgislist import Ogr2OgrTableToPostGisList
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class GdalAlgorithmProvider(QgsProcessingProvider):
def __init__(self):
super().__init__()
self.algs = []
def load(self):
ProcessingConfig.settingIcons[self.name()] = self.icon()
ProcessingConfig.addSetting(Setting(self.name(), 'ACTIVATE_GDAL',
self.tr('Activate'), True))
ProcessingConfig.addSetting(Setting(
self.name(),
GdalUtils.GDAL_HELP_PATH,
self.tr('Location of GDAL docs'),
GdalUtils.gdalHelpPath()))
ProcessingConfig.readSettings()
self.refreshAlgorithms()
return True
def unload(self):
ProcessingConfig.removeSetting('ACTIVATE_GDAL')
ProcessingConfig.removeSetting(GdalUtils.GDAL_HELP_PATH)
def isActive(self):
return ProcessingConfig.getSetting('ACTIVATE_GDAL')
def setActive(self, active):
ProcessingConfig.setSettingValue('ACTIVATE_GDAL', active)
def name(self):
return 'GDAL'
def longName(self):
version = GdalUtils.readableVersion()
return 'GDAL ({})'.format(version)
def id(self):
return 'gdal'
def icon(self):
return QgsApplication.getThemeIcon("/providerGdal.svg")
def svgIconPath(self):
return QgsApplication.iconPath("providerGdal.svg")
def loadAlgorithms(self):
self.algs = [
AssignProjection(),
aspect(),
buildvrt(),
ClipRasterByExtent(),
ClipRasterByMask(),
ColorRelief(),
contour(),
fillnodata(),
gdalinfo(),
gdal2tiles(),
gdal2xyz(),
gdaladdo(),
gdaltindex(),
GridAverage(),
GridDataMetrics(),
GridInverseDistance(),
GridInverseDistanceNearestNeighbor(),
GridLinear(),
GridNearestNeighbor(),
hillshade(),
merge(),
nearblack(),
pct2rgb(),
polygonize(),
proximity(),
retile(),
rgb2pct(),
roughness(),
sieve(),
slope(),
translate(),
tpi(),
tri(),
warp(),
# rasterize(),
# ExtractProjection(),
# gdalcalc(),
# rasterize_over(),
# ----- OGR tools -----
Buffer(),
ClipVectorByExtent(),
ClipVectorByMask(),
Dissolve(),
ExecuteSql(),
OffsetCurve(),
ogr2ogr(),
ogrinfo(),
OgrToPostGis(),
Ogr2OgrToPostGisList(),
OneSideBuffer(),
PointsAlongLines(),
# Ogr2OgrTableToPostGisList(),
]
for a in self.algs:
self.addAlgorithm(a)
def supportedOutputRasterLayerExtensions(self):
return GdalUtils.getSupportedRasterExtensions()
def tr(self, string, context=''):
if context == '':
context = 'GdalAlgorithmProvider'
return QCoreApplication.translate(context, string)
|
gpl-2.0
|
kenwang815/KodiPlugins
|
script.module.youtube.dl/lib/youtube_dl/extractor/criterion.py
|
38
|
1279
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class CriterionIE(InfoExtractor):
_VALID_URL = r'https?://www\.criterion\.com/films/(?P<id>[0-9]+)-.+'
_TEST = {
'url': 'http://www.criterion.com/films/184-le-samourai',
'md5': 'bc51beba55685509883a9a7830919ec3',
'info_dict': {
'id': '184',
'ext': 'mp4',
'title': 'Le Samouraï',
'description': 'md5:a2b4b116326558149bef81f76dcbb93f',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
final_url = self._search_regex(
r'so.addVariable\("videoURL", "(.+?)"\)\;', webpage, 'video url')
title = self._og_search_title(webpage)
description = self._html_search_meta('description', webpage)
thumbnail = self._search_regex(
r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
webpage, 'thumbnail url')
return {
'id': video_id,
'url': final_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
|
gpl-2.0
|
davgibbs/django
|
django/core/files/locks.py
|
725
|
3516
|
"""
Portable file locking utilities.
Based partially on an example by Jonathan Feignberg in the Python
Cookbook [1] (licensed under the Python Software License) and a ctypes port by
Anatoly Techtonik for Roundup [2] (license [3]).
[1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
[2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py
[3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt
Example Usage::
>>> from django.core.files import locks
>>> with open('./file', 'wb') as f:
... locks.lock(f, locks.LOCK_EX)
... f.write('Django')
"""
import os
__all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock')
def _fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return f.fileno() if hasattr(f, 'fileno') else f
if os.name == 'nt':
import msvcrt
from ctypes import (sizeof, c_ulong, c_void_p, c_int64,
Structure, Union, POINTER, windll, byref)
from ctypes.wintypes import BOOL, DWORD, HANDLE
LOCK_SH = 0 # the default
LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY
LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK
# --- Adapted from the pyserial project ---
# detect size of ULONG_PTR
if sizeof(c_ulong) != sizeof(c_void_p):
ULONG_PTR = c_int64
else:
ULONG_PTR = c_ulong
PVOID = c_void_p
# --- Union inside Structure by stackoverflow:3480240 ---
class _OFFSET(Structure):
_fields_ = [
('Offset', DWORD),
('OffsetHigh', DWORD)]
class _OFFSET_UNION(Union):
_anonymous_ = ['_offset']
_fields_ = [
('_offset', _OFFSET),
('Pointer', PVOID)]
class OVERLAPPED(Structure):
_anonymous_ = ['_offset_union']
_fields_ = [
('Internal', ULONG_PTR),
('InternalHigh', ULONG_PTR),
('_offset_union', _OFFSET_UNION),
('hEvent', HANDLE)]
LPOVERLAPPED = POINTER(OVERLAPPED)
# --- Define function prototypes for extra safety ---
LockFileEx = windll.kernel32.LockFileEx
LockFileEx.restype = BOOL
LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED]
UnlockFileEx = windll.kernel32.UnlockFileEx
UnlockFileEx.restype = BOOL
UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED]
def lock(f, flags):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
def unlock(f):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
else:
try:
import fcntl
LOCK_SH = fcntl.LOCK_SH # shared lock
LOCK_NB = fcntl.LOCK_NB # non-blocking
LOCK_EX = fcntl.LOCK_EX
except (ImportError, AttributeError):
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = 0
# Dummy functions that don't do anything.
def lock(f, flags):
# File is not locked
return False
def unlock(f):
# File is unlocked
return True
else:
def lock(f, flags):
ret = fcntl.flock(_fd(f), flags)
return (ret == 0)
def unlock(f):
ret = fcntl.flock(_fd(f), fcntl.LOCK_UN)
return (ret == 0)
|
bsd-3-clause
|
apeyser/nest-simulator
|
pynest/examples/precise_spiking.py
|
17
|
4794
|
# -*- coding: utf-8 -*-
#
# precise_spiking.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Comparing precise and grid-based neuron models
----------------------------------------------
In traditional time-driven simulations, spikes are constrained to the
time grid at a user-defined resolution. The precise spiking models
overcome this by handling spikes in continuous time [1, 2].
The precise spiking neuron models in NEST include: iaf_psc_exp_ps,
iaf_psc_alpha_canon, iaf_psc_alpha_presc, and iaf_psc_delta_canon.
More detailed information about the precise spiking models can be
found here:
http://www.nest-simulator.org/simulations-with-precise-spike-times/
This example compares the conventional grid-constrained model and the
precise version for an integrate-and-fire neuron model with exponential
post-synaptic currents [2].
References:
[1] Morrison A, Straube S, Plesser HE, Diesmann M (2007) Exact
subthreshold integration with continuous spike times in
discrete-time neural network simulations. Neural
Comput. 19(1):47-79. doi: 10.1162/neco.2007.19.1.47
[2] Hanuschkin A, Kunkel S, Helias M, Morrison A and Diesmann M (2010)
A general and efficient method for incorporating precise spike
times in globally time-driven simulations. Front. Neuroinform.
4:113. doi:10.3389/fninf.2010.00113
'''
'''
First, we import all necessary modules for simulation, analysis and
plotting.
'''
import nest
import pylab
'''
Second, we assign the simulation parameters to variables.
'''
simtime = 100.0 # ms
stim_current = 700.0 # pA
resolutions = [0.1, 0.5, 1.0] # ms
'''
Now, we simulate the two versions of the neuron models (i.e.
discrete-time: `iaf_psc_exp`; precise: `iaf_psc_exp_ps`) for each of
the defined resolutions. The neurons use their default parameters and
we stimulate them by injecting a current using a `dc_generator`
device. The membrane potential is recorded by a `voltmeter`, the
spikes are recorded by a `spike_detector`, whose property
'precise_times' is set to True. The data is stored in a dictionary for
later use.
'''
data = {}
for h in resolutions:
data[h] = {}
for model in ["iaf_psc_exp", "iaf_psc_exp_ps"]:
nest.ResetKernel()
nest.SetKernelStatus({'resolution': h})
neuron = nest.Create(model)
voltmeter = nest.Create("voltmeter", params={"interval": h})
dc = nest.Create("dc_generator", params={"amplitude": stim_current})
sd = nest.Create("spike_detector", params={"precise_times": True})
nest.Connect(voltmeter, neuron)
nest.Connect(dc, neuron)
nest.Connect(neuron, sd)
nest.Simulate(simtime)
vm_status = nest.GetStatus(voltmeter, 'events')[0]
sd_status = nest.GetStatus(sd, 'events')[0]
data[h][model] = {"vm_times": vm_status['times'],
"vm_values": vm_status['V_m'],
"spikes": sd_status['times'],
"V_th": nest.GetStatus(neuron, 'V_th')[0]}
'''
After simulation, we plot the results from the simulation. The figure
illustrates the membrane potential excursion of the two models due to
injected current simulated for 100 ms for a different timestep in each
panel. The blue line is the voltage trace of the discrete-time neuron,
the red line is that of the precise spiking version of the same model.
Please note that the temporal differences between the traces in the
different panels is caused by the different resolutions used.
'''
colors = ["#3465a4", "#cc0000"]
for v, h in enumerate(sorted(data)):
plot = pylab.subplot(len(data), 1, v + 1)
plot.set_title("Resolution: {0} ms".format(h))
for i, model in enumerate(data[h]):
times = data[h][model]["vm_times"]
potentials = data[h][model]["vm_values"]
spikes = data[h][model]["spikes"]
spikes_y = [data[h][model]["V_th"]] * len(spikes)
plot.plot(times, potentials, "-", c=colors[i], ms=5, lw=2, label=model)
plot.plot(spikes, spikes_y, ".", c=colors[i], ms=5, lw=2)
if v == 2:
plot.legend(loc=4)
else:
plot.set_xticklabels('')
|
gpl-2.0
|
jorisvandenbossche/numpy
|
tools/swig/test/testArray.py
|
121
|
12933
|
#! /usr/bin/env python
from __future__ import division, absolute_import, print_function
# System imports
from distutils.util import get_platform
import os
import sys
import unittest
# Import NumPy
import numpy as np
major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
if major == 0:
BadListError = TypeError
else:
BadListError = ValueError
import Array
######################################################################
class Array1TestCase(unittest.TestCase):
def setUp(self):
self.length = 5
self.array1 = Array.Array1(self.length)
def testConstructor0(self):
"Test Array1 default constructor"
a = Array.Array1()
self.failUnless(isinstance(a, Array.Array1))
self.failUnless(len(a) == 0)
def testConstructor1(self):
"Test Array1 length constructor"
self.failUnless(isinstance(self.array1, Array.Array1))
def testConstructor2(self):
"Test Array1 array constructor"
na = np.arange(self.length)
aa = Array.Array1(na)
self.failUnless(isinstance(aa, Array.Array1))
def testConstructor3(self):
"Test Array1 copy constructor"
for i in range(self.array1.length()): self.array1[i] = i
arrayCopy = Array.Array1(self.array1)
self.failUnless(arrayCopy == self.array1)
def testConstructorBad(self):
"Test Array1 length constructor, negative"
self.assertRaises(ValueError, Array.Array1, -4)
def testLength(self):
"Test Array1 length method"
self.failUnless(self.array1.length() == self.length)
def testLen(self):
"Test Array1 __len__ method"
self.failUnless(len(self.array1) == self.length)
def testResize0(self):
"Test Array1 resize method, length"
newLen = 2 * self.length
self.array1.resize(newLen)
self.failUnless(len(self.array1) == newLen)
def testResize1(self):
"Test Array1 resize method, array"
a = np.zeros((2*self.length,), dtype='l')
self.array1.resize(a)
self.failUnless(len(self.array1) == a.size)
def testResizeBad(self):
"Test Array1 resize method, negative length"
self.assertRaises(ValueError, self.array1.resize, -5)
def testSetGet(self):
"Test Array1 __setitem__, __getitem__ methods"
n = self.length
for i in range(n):
self.array1[i] = i*i
for i in range(n):
self.failUnless(self.array1[i] == i*i)
def testSetBad1(self):
"Test Array1 __setitem__ method, negative index"
self.assertRaises(IndexError, self.array1.__setitem__, -1, 0)
def testSetBad2(self):
"Test Array1 __setitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array1.__setitem__, self.length+1, 0)
def testGetBad1(self):
"Test Array1 __getitem__ method, negative index"
self.assertRaises(IndexError, self.array1.__getitem__, -1)
def testGetBad2(self):
"Test Array1 __getitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array1.__getitem__, self.length+1)
def testAsString(self):
"Test Array1 asString method"
for i in range(self.array1.length()): self.array1[i] = i+1
self.failUnless(self.array1.asString() == "[ 1, 2, 3, 4, 5 ]")
def testStr(self):
"Test Array1 __str__ method"
for i in range(self.array1.length()): self.array1[i] = i-2
self.failUnless(str(self.array1) == "[ -2, -1, 0, 1, 2 ]")
def testView(self):
"Test Array1 view method"
for i in range(self.array1.length()): self.array1[i] = i+1
a = self.array1.view()
self.failUnless(isinstance(a, np.ndarray))
self.failUnless(len(a) == self.length)
self.failUnless((a == [1, 2, 3, 4, 5]).all())
######################################################################
class Array2TestCase(unittest.TestCase):
def setUp(self):
self.nrows = 5
self.ncols = 4
self.array2 = Array.Array2(self.nrows, self.ncols)
def testConstructor0(self):
"Test Array2 default constructor"
a = Array.Array2()
self.failUnless(isinstance(a, Array.Array2))
self.failUnless(len(a) == 0)
def testConstructor1(self):
"Test Array2 nrows, ncols constructor"
self.failUnless(isinstance(self.array2, Array.Array2))
def testConstructor2(self):
"Test Array2 array constructor"
na = np.zeros((3, 4), dtype="l")
aa = Array.Array2(na)
self.failUnless(isinstance(aa, Array.Array2))
def testConstructor3(self):
"Test Array2 copy constructor"
for i in range(self.nrows):
for j in range(self.ncols):
self.array2[i][j] = i * j
arrayCopy = Array.Array2(self.array2)
self.failUnless(arrayCopy == self.array2)
def testConstructorBad1(self):
"Test Array2 nrows, ncols constructor, negative nrows"
self.assertRaises(ValueError, Array.Array2, -4, 4)
def testConstructorBad2(self):
"Test Array2 nrows, ncols constructor, negative ncols"
self.assertRaises(ValueError, Array.Array2, 4, -4)
def testNrows(self):
"Test Array2 nrows method"
self.failUnless(self.array2.nrows() == self.nrows)
def testNcols(self):
"Test Array2 ncols method"
self.failUnless(self.array2.ncols() == self.ncols)
def testLen(self):
"Test Array2 __len__ method"
self.failUnless(len(self.array2) == self.nrows*self.ncols)
def testResize0(self):
"Test Array2 resize method, size"
newRows = 2 * self.nrows
newCols = 2 * self.ncols
self.array2.resize(newRows, newCols)
self.failUnless(len(self.array2) == newRows * newCols)
def testResize1(self):
"Test Array2 resize method, array"
a = np.zeros((2*self.nrows, 2*self.ncols), dtype='l')
self.array2.resize(a)
self.failUnless(len(self.array2) == a.size)
def testResizeBad1(self):
"Test Array2 resize method, negative nrows"
self.assertRaises(ValueError, self.array2.resize, -5, 5)
def testResizeBad2(self):
"Test Array2 resize method, negative ncols"
self.assertRaises(ValueError, self.array2.resize, 5, -5)
def testSetGet1(self):
"Test Array2 __setitem__, __getitem__ methods"
m = self.nrows
n = self.ncols
array1 = [ ]
a = np.arange(n, dtype="l")
for i in range(m):
array1.append(Array.Array1(i*a))
for i in range(m):
self.array2[i] = array1[i]
for i in range(m):
self.failUnless(self.array2[i] == array1[i])
def testSetGet2(self):
"Test Array2 chained __setitem__, __getitem__ methods"
m = self.nrows
n = self.ncols
for i in range(m):
for j in range(n):
self.array2[i][j] = i*j
for i in range(m):
for j in range(n):
self.failUnless(self.array2[i][j] == i*j)
def testSetBad1(self):
"Test Array2 __setitem__ method, negative index"
a = Array.Array1(self.ncols)
self.assertRaises(IndexError, self.array2.__setitem__, -1, a)
def testSetBad2(self):
"Test Array2 __setitem__ method, out-of-range index"
a = Array.Array1(self.ncols)
self.assertRaises(IndexError, self.array2.__setitem__, self.nrows+1, a)
def testGetBad1(self):
"Test Array2 __getitem__ method, negative index"
self.assertRaises(IndexError, self.array2.__getitem__, -1)
def testGetBad2(self):
"Test Array2 __getitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array2.__getitem__, self.nrows+1)
def testAsString(self):
"Test Array2 asString method"
result = """\
[ [ 0, 1, 2, 3 ],
[ 1, 2, 3, 4 ],
[ 2, 3, 4, 5 ],
[ 3, 4, 5, 6 ],
[ 4, 5, 6, 7 ] ]
"""
for i in range(self.nrows):
for j in range(self.ncols):
self.array2[i][j] = i+j
self.failUnless(self.array2.asString() == result)
def testStr(self):
"Test Array2 __str__ method"
result = """\
[ [ 0, -1, -2, -3 ],
[ 1, 0, -1, -2 ],
[ 2, 1, 0, -1 ],
[ 3, 2, 1, 0 ],
[ 4, 3, 2, 1 ] ]
"""
for i in range(self.nrows):
for j in range(self.ncols):
self.array2[i][j] = i-j
self.failUnless(str(self.array2) == result)
def testView(self):
"Test Array2 view method"
a = self.array2.view()
self.failUnless(isinstance(a, np.ndarray))
self.failUnless(len(a) == self.nrows)
######################################################################
class ArrayZTestCase(unittest.TestCase):
def setUp(self):
self.length = 5
self.array3 = Array.ArrayZ(self.length)
def testConstructor0(self):
"Test ArrayZ default constructor"
a = Array.ArrayZ()
self.failUnless(isinstance(a, Array.ArrayZ))
self.failUnless(len(a) == 0)
def testConstructor1(self):
"Test ArrayZ length constructor"
self.failUnless(isinstance(self.array3, Array.ArrayZ))
def testConstructor2(self):
"Test ArrayZ array constructor"
na = np.arange(self.length, dtype=np.complex128)
aa = Array.ArrayZ(na)
self.failUnless(isinstance(aa, Array.ArrayZ))
def testConstructor3(self):
"Test ArrayZ copy constructor"
for i in range(self.array3.length()): self.array3[i] = complex(i,-i)
arrayCopy = Array.ArrayZ(self.array3)
self.failUnless(arrayCopy == self.array3)
def testConstructorBad(self):
"Test ArrayZ length constructor, negative"
self.assertRaises(ValueError, Array.ArrayZ, -4)
def testLength(self):
"Test ArrayZ length method"
self.failUnless(self.array3.length() == self.length)
def testLen(self):
"Test ArrayZ __len__ method"
self.failUnless(len(self.array3) == self.length)
def testResize0(self):
"Test ArrayZ resize method, length"
newLen = 2 * self.length
self.array3.resize(newLen)
self.failUnless(len(self.array3) == newLen)
def testResize1(self):
"Test ArrayZ resize method, array"
a = np.zeros((2*self.length,), dtype=np.complex128)
self.array3.resize(a)
self.failUnless(len(self.array3) == a.size)
def testResizeBad(self):
"Test ArrayZ resize method, negative length"
self.assertRaises(ValueError, self.array3.resize, -5)
def testSetGet(self):
"Test ArrayZ __setitem__, __getitem__ methods"
n = self.length
for i in range(n):
self.array3[i] = i*i
for i in range(n):
self.failUnless(self.array3[i] == i*i)
def testSetBad1(self):
"Test ArrayZ __setitem__ method, negative index"
self.assertRaises(IndexError, self.array3.__setitem__, -1, 0)
def testSetBad2(self):
"Test ArrayZ __setitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array3.__setitem__, self.length+1, 0)
def testGetBad1(self):
"Test ArrayZ __getitem__ method, negative index"
self.assertRaises(IndexError, self.array3.__getitem__, -1)
def testGetBad2(self):
"Test ArrayZ __getitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array3.__getitem__, self.length+1)
def testAsString(self):
"Test ArrayZ asString method"
for i in range(self.array3.length()): self.array3[i] = complex(i+1,-i-1)
self.failUnless(self.array3.asString() == "[ (1,-1), (2,-2), (3,-3), (4,-4), (5,-5) ]")
def testStr(self):
"Test ArrayZ __str__ method"
for i in range(self.array3.length()): self.array3[i] = complex(i-2,(i-2)*2)
self.failUnless(str(self.array3) == "[ (-2,-4), (-1,-2), (0,0), (1,2), (2,4) ]")
def testView(self):
"Test ArrayZ view method"
for i in range(self.array3.length()): self.array3[i] = complex(i+1,i+2)
a = self.array3.view()
self.failUnless(isinstance(a, np.ndarray))
self.failUnless(len(a) == self.length)
self.failUnless((a == [1+2j, 2+3j, 3+4j, 4+5j, 5+6j]).all())
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Array1TestCase))
suite.addTest(unittest.makeSuite(Array2TestCase))
suite.addTest(unittest.makeSuite(ArrayZTestCase))
# Execute the test suite
print("Testing Classes of Module Array")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
|
bsd-3-clause
|
sburnett/seattle
|
seattlegeni/website/html/tests/ut_html_test_get_resources.py
|
1
|
5863
|
"""
<Program>
test_get_resources.py
<Started>
8/30/2009
<Author>
Jason Chen
jchen@cs.washington.edu
<Purpose>
Tests that the get_resources view function
handles normal operation and exceptions correctly.
<Notes>
See test_register.py for an explanation of our usage of the Django test client.
"""
#pragma out
# We import the testlib FIRST, as the test db settings
# need to be set before we import anything else.
from seattlegeni.tests import testlib
from seattlegeni.common.exceptions import *
from seattlegeni.website.control import interface
from seattlegeni.website.control import models
from django.contrib.auth.models import User as DjangoUser
# The django test client emulates a webclient, and returns what django
# considers the final rendered result (in HTML). This allows to test purely the view
# functions.
from django.test.client import Client
# Declare our mock functions
def mock_get_logged_in_user(request):
geniuser = models.GeniUser(username='tester', password='password', email='test@test.com',
affiliation='test affil', user_pubkey='user_pubkey',
user_privkey='user_privkey', donor_pubkey='donor_pubkey',
usable_vessel_port='12345', free_vessel_credits=10)
return geniuser
def mock_acquire_vessels(geniuser, vesselcount, vesseltype):
return ['test1', 'test2']
def mock_acquire_vessels_throws_UnableToAcquireResourcesError(geniuser, vesselcount, vesseltype):
raise UnableToAcquireResourcesError
def mock_acquire_vessels_throws_InsufficientUserResourcesError(geniuser, vesselcount, vesseltype):
raise InsufficientUserResourcesError
c = Client()
good_data = {'num':5, 'env':'rand'}
def main():
# Setup test environment
testlib.setup_test_environment()
testlib.setup_test_db()
try:
login_test_user()
test_normal()
test_interface_throws_UnableToAcquireResourcesError()
test_interface_throws_InsufficientUserResourcesError()
test_blank_POST_data()
test_invalid_POST_data_invalid_num()
test_invalid_POST_data_invalid_env()
print "All tests passed."
finally:
testlib.teardown_test_db()
testlib.teardown_test_environment()
def test_normal():
"""
<Purpose>
Test normal behavior
"""
interface.acquire_vessels = mock_acquire_vessels
response = c.post('/html/get_resources', good_data, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
def test_interface_throws_UnableToAcquireResourcesError():
"""
<Purpose>
Test behavior if interface throws UnableToAcquireResourcesError.
<Note>
Checking only the existance of "action_summary" in the response
is sufficient in seeing whether the view caught the error, since
error messages are shown in that html element. (Of course, so are
normal messages, but we are clearly having the interface throw an
exception, so normal messages don't even exist in this context).
"""
interface.acquire_vessels = mock_acquire_vessels_throws_UnableToAcquireResourcesError
response = c.post('/html/get_resources', good_data, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
assert("Unable to acquire vessels at this time" in response.content)
def test_interface_throws_InsufficientUserResourcesError():
"""
<Purpose>
Test behavior if interface throws InsufficientUserResourcesError.
"""
interface.acquire_vessels = mock_acquire_vessels_throws_InsufficientUserResourcesError
response = c.post('/html/get_resources', good_data, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
assert("Unable to acquire" in response.content)
assert("vessel credit" in response.content)
def test_blank_POST_data():
"""
<Purpose>
Test behavior if we submit blank POST data.
"""
interface.acquire_vessels = mock_acquire_vessels
response = c.post('/html/get_resources', {}, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
assert("This field is required" in response.content)
def test_invalid_POST_data_invalid_num():
"""
<Purpose>
Test behavior if we submit POST data with an invalid 'num' field.
"""
interface.acquire_vessels = mock_acquire_vessels
test_data = {'num':-5, 'env':'rand'}
response = c.post('/html/get_resources', test_data, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
assert("Select a valid choice" in response.content)
def test_invalid_POST_data_invalid_env():
"""
<Purpose>
Test behavior if we submit POST data with an invalid 'env' field.
"""
interface.acquire_vessels = mock_acquire_vessels
test_data = {'num':5, 'env':'notvalid'}
response = c.post('/html/get_resources', test_data, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
assert("Select a valid choice" in response.content)
# Creates a test user in the test db, and uses the test client to 'login',
# so all views that expect @login_required will now pass the login check.
def login_test_user():
# uses the mock get_logged_in_user function that represents a logged in user
interface.get_logged_in_user = mock_get_logged_in_user
user = DjangoUser.objects.create_user('tester', 'test@test.com', 'testpassword')
user.save()
c.login(username='tester', password='testpassword')
if __name__=="__main__":
main()
|
mit
|
trondeau/gnuradio
|
gr-digital/python/digital/qa_pn_correlator_cc.py
|
57
|
1657
|
#!/usr/bin/env python
#
# Copyright 2007,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
class test_pn_correlator_cc(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000_make(self):
c = digital.pn_correlator_cc(10)
def test_001_correlate(self):
degree = 10
length = 2**degree-1
src = digital.glfsr_source_f(degree)
head = blocks.head(gr.sizeof_float, length*length)
f2c = blocks.float_to_complex()
corr = digital.pn_correlator_cc(degree)
dst = blocks.vector_sink_c()
self.tb.connect(src, head, f2c, corr, dst)
self.tb.run()
data = dst.data()
self.assertEqual(data[-1], (1.0+0j))
if __name__ == '__main__':
gr_unittest.run(test_pn_correlator_cc, "test_pn_correlator_cc.xml")
|
gpl-3.0
|
huobaowangxi/scikit-learn
|
sklearn/decomposition/dict_learning.py
|
83
|
44062
|
""" Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
|
bsd-3-clause
|
daGrevis/squirrel
|
ware.py
|
1
|
2640
|
import inspect
class MiddlewareDuplicationError(Exception):
def __init__(self, middleware_name, middleware_names):
message = ("Middleware `{}` was already found in `{}` middlewares!"
.format(middleware_name, middleware_names))
super().__init__(message)
class MiddlewareMissingError(Exception):
def __init__(self, middleware_name, middleware_names):
message = ("Middleware `{}` wasn't found between `{}` middlewares!"
.format(middleware_name, middleware_names))
super().__init__(message)
class MiddlewareOrderError(Exception):
def __init__(self, middleware_name,
names_for_before_middlewares, names_for_after_middlewares):
message = ("Middleware `{}` can't be added before `{}` middlewares"
" and after `{}` middlewares!"
.format(middleware_name,
names_for_before_middlewares,
names_for_after_middlewares))
super().__init__(message)
class MiddlewareArgumentsError(Exception):
def __init__(self, middleware_name):
message = ("Middleware `{}` has wrong count of arguments!"
.format(middleware_name))
super().__init__(message)
class Ware(object):
def __init__(self, middlewares=[]):
self.middlewares = []
def get_names_for_middlewares(self):
return [name for name, _ in self.middlewares]
def add(self, middleware_name, middleware_callable):
if len((inspect.getfullargspec(middleware_callable)).args) != 1:
raise MiddlewareArgumentsError(middleware_name)
names_for_middlewares = self.get_names_for_middlewares()
if middleware_name in names_for_middlewares:
raise MiddlewareDuplicationError(middleware_name,
names_for_middlewares)
(self.middlewares).append((middleware_name, middleware_callable, ))
def remove(self, middleware_name):
names_for_middlewares = self.get_names_for_middlewares()
if middleware_name not in names_for_middlewares:
raise MiddlewareMissingError(middleware_name,
names_for_middlewares)
for i, (name, _) in enumerate(self.middlewares):
if name == middleware_name:
(self.middlewares).pop(i)
break
def run(self, initial_context={}):
context = initial_context
for _, middleware_callable in self.middlewares:
context = middleware_callable(context)
return context
|
mit
|
sam-tsai/django
|
django/core/management/commands/runserver.py
|
203
|
7383
|
from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
leave_locale_alone = True
default_port = '8000'
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
except MigrationSchemaMissing:
self.stdout.write(self.style.NOTICE(
"\nNot checking migrations as it is not possible to access/create the django_migrations table."
))
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
# Kept for backward compatibility
BaseRunserverCommand = Command
|
bsd-3-clause
|
nadavitay/linux-3.14.1
|
arch/ia64/scripts/unwcheck.py
|
13143
|
1714
|
#!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
gpl-2.0
|
ejekt/rigging-system
|
Modules/System/groupSelected.py
|
1
|
9019
|
import maya.cmds as mc
from functools import partial
import os
import System.utils as utils
class GroupSelected:
def __init__(self):
self.objectsToGroup = []
def showUI(self):
# build the grouping GUI
self.findSelectionToGroup()
if len(self.objectsToGroup) == 0:
return
self.dUiElements = {}
if mc.window('groupSelected_UI_window', exists=True):
mc.deleteUI('groupSelected_UI_window')
windowWidth = 300
windowHeight = 150
self.dUiElements['window'] = mc.window('groupSelected_UI_window',
w=windowWidth,
h=windowHeight,
t='Blueprint UI',
sizeable=False,)
self.dUiElements['topLevelColumn'] = mc.columnLayout(adj=True, columnAlign='center', rs=3)
self.dUiElements['groupName_rowColumn'] = mc.rowColumnLayout(nc=2, columnAttach=[1,'right',0], columnWidth=[(1,80), (2,windowWidth-90)])
mc.text(label='Group Name :')
self.dUiElements['groupName'] = mc.textField(text='group')
mc.setParent(self.dUiElements['topLevelColumn'])
self.dUiElements['createAt_rowColumn'] = mc.rowColumnLayout(nc=3, columnAttach=(1,'right',0), columnWidth=[(1,80),(2,windowWidth-170),(3,80)])
# row 1
mc.text(label='Position at :')
mc.text(label='')
mc.text(label='')
# row 2
mc.text(label='')
self.dUiElements['createAtBtn_lastSelected'] = mc.button(l='Last Selected', c=self.createAtLastSelected)
mc.text(label='')
# row 3
mc.text(label='')
self.dUiElements['createAveragePosBtn_lastSelected'] = mc.button(l='Average Position', c=self.createAtAveragePosition)
mc.text(label='')
mc.setParent(self.dUiElements['topLevelColumn'])
mc.separator()
# final row of buttons
columnWidth = (windowWidth/2) - 5
self.dUiElements['buttonRowLayout'] = mc.rowLayout(nc=2,
columnAttach=[(1,'both',10),(2,'both',10)],
columnWidth=[(1,columnWidth),(2,columnWidth)],
columnAlign=[(1,'center'),(2,'center')])
self.dUiElements['acceptBtn'] = mc.button(l='Accept', c=self.acceptWindow)
self.dUiElements['cancelBtn'] = mc.button(l='Cancel', c=self.cancelWindow)
mc.showWindow(self.dUiElements['window'])
self.createTempGroupRepresentation()
self.createAtLastSelected()
mc.select(self.tempGrpTransform, r=True)
mc.setToolTo('moveSuperContext')
def findSelectionToGroup(self):
# filters selection to only contain module transform controls
selectedObjects = mc.ls(sl=True, transforms=True)
self.objectsToGroup = []
for obj in selectedObjects:
valid = False
if obj.find('module_transform') != -1:
splitString = obj.rsplit('module_transform')
if splitString[1] == '':
valid = True
if valid == False and obj.find('Group__') == 0:
valid = True
if valid == True:
self.objectsToGroup.append(obj)
def createTempGroupRepresentation(self):
controlGrpFile = os.environ['RIGGING_TOOL_ROOT'] + '/ControlObjects/Blueprint/controlGroup_control.ma'
mc.file(controlGrpFile, i=True)
self.tempGrpTransform = mc.rename('controlGroup_control', 'Group__tempGroupTransform__')
mc.connectAttr(self.tempGrpTransform+'.sy', self.tempGrpTransform+'.sx')
mc.connectAttr(self.tempGrpTransform+'.sy', self.tempGrpTransform+'.sz')
for attr in ['sx','sz','v']:
mc.setAttr(self.tempGrpTransform+'.'+attr, l=True, k=False)
mc.aliasAttr('globalScale', self.tempGrpTransform+'.sy')
def createAtLastSelected(self, *args):
controlPos = mc.xform(self.objectsToGroup[-1], q=True, ws=True, t=True)
mc.xform(self.tempGrpTransform, ws=True, absolute=True, t=controlPos)
def createAtAveragePosition(self, *args):
controlPos = [0.0,0.0,0.0]
for obj in self.objectsToGroup:
objPos = mc.xform(obj, q=True, ws=True, absolute=True, t=True)
controlPos[0] += objPos[0]
controlPos[1] += objPos[1]
controlPos[2] += objPos[2]
numberOfObjects = len(self.objectsToGroup)
controlPos[0] /= numberOfObjects
controlPos[1] /= numberOfObjects
controlPos[2] /= numberOfObjects
mc.xform(self.tempGrpTransform, ws=True, absolute=True, t=controlPos)
def cancelWindow(self, *args):
mc.deleteUI(self.dUiElements['window'])
mc.delete(self.tempGrpTransform)
def acceptWindow(self, *args):
groupName = mc.textField(self.dUiElements['groupName'], q=True, text=True)
if self.createGroup(groupName) != None:
mc.deleteUI(self.dUiElements['window'])
def createGroup(self, sGroupName):
# check that group of that name doesn't exist yet
fullGroupName = 'Group__' + sGroupName
if mc.objExists(fullGroupName):
mc.confirmDialog(title='Name Conflict', m='Group \''+groupName+'\' already exists', button='Accept', db='Accept')
return None
# rename the tempGroup to the user specified name
groupTransform = mc.rename(self.tempGrpTransform, fullGroupName)
groupContainer = 'group_container'
if not mc.objExists(groupContainer):
mc.container(n=groupContainer)
containers = [groupContainer]
for obj in self.objectsToGroup:
if obj.find('Group__') == 0:
continue
objNamespace = utils.stripLeadingNamespace(obj)[0]
containers.append(objNamespace+':module_container')
for c in containers:
mc.lockNode(c, lock=False, lockUnpublished=False)
if len(self.objectsToGroup) != 0:
tempGroup = mc.group(self.objectsToGroup, absolute=True)
groupParent = mc.listRelatives(tempGroup, parent=True)
if groupParent:
mc.parent(groupTransform, groupParent[0], absolute=True)
mc.parent(self.objectsToGroup, groupTransform, absolute=True)
mc.delete(tempGroup)
self.addGroupToContainer(groupTransform)
for c in containers:
mc.lockNode(c, lock=True, lockUnpublished=True)
mc.setToolTo('moveSuperContext')
mc.select(groupTransform, r=True)
return groupTransform
def addGroupToContainer(self, sGroup):
groupContainer = 'group_container'
utils.addNodeToContainer(groupContainer, sGroup, includeShapes=True)
groupName = sGroup.rpartition('Group__')[2]
mc.container(groupContainer, e=True, publishAndBind=[sGroup+'.t', groupName+'_T'])
mc.container(groupContainer, e=True, publishAndBind=[sGroup+'.r', groupName+'_R'])
mc.container(groupContainer, e=True, publishAndBind=[sGroup+'.globalScale', groupName+'_globalScale'])
def createGroupAtSpecified(self, sName, sTargetGroup, sParent):
self.createTempGroupRepresentation()
pCon = mc.parentConstraint(sTargetGroup, self.tempGrpTransform , mo=False)[0]
mc.delete(pCon)
scale = mc.getAttr(sTargetGroup+'.globalScale')
mc.setAttr(self.tempGrpTransform +'.globalScale', scale)
if sParent:
mc.parent(self.tempGrpTransform , sParent, absolute=True)
newGroup = self.createGroup(sName)
return newGroup
###-------------------------------------------------------------------------------------------
### UNGROUPED SELECTED CLASS
class UngroupSelected:
def __init__(self):
selectedObjects = mc.ls(sl=True, transforms=True)
filteredGroups = []
for obj in selectedObjects:
if obj.find('Group__') == 0:
filteredGroups.append(obj)
# no group selected just exit
if len(filteredGroups) == 0:
return
groupContainer = 'group_container'
# find any modules nested under the selected group
modules = []
for group in filteredGroups:
modules.extend(self.findChildModules(group))
# gather all module containers
moduleContainers = [groupContainer]
for module in modules:
moduleContainer = module + ':module_container'
moduleContainers.append(moduleContainer)
# unlock each container
for container in moduleContainers:
mc.lockNode(container, l=False, lockUnpublished=False)
# ungroup
for group in filteredGroups:
numChildren = len(mc.listRelatives(group, children=True))
if numChildren > 1:
mc.ungroup(group, absolute=True)
for attr in ['t','r','globalScale']:
mc.container(groupContainer, e=True, unbindAndUnpublish=group+'.'+attr)
parentGroup = mc.listRelatives(group, parent=True)
mc.delete(group)
# for the case that a group is left empty
if parentGroup != None:
parentGroup = parentGroup[0]
children = mc.listRelatives(parentGroup, children=True)
children = mc.ls(children, transforms=True)
if len(children) == 0:
mc.select(parentGroup, r=True)
UngroupSelected()
# lock the container
for container in moduleContainers:
if mc.objExists(container):
mc.lockNode(container, l=True, lockUnpublished=True)
def findChildModules(self, sGroup):
modules = []
children = mc.listRelatives(sGroup, children = True)
if children != None:
for child in children:
moduleNamespaceInfo = utils.stripLeadingNamespace(child)
if moduleNamespaceInfo:
modules.append(moduleNamespaceInfo[0])
elif child.find('Group__') != -1:
modules.extend(self.findChildModules(child))
return modules
|
mit
|
Hillshum/gPodder-tagging
|
src/gpodder/gtkui/desktop/podcastdirectory.py
|
2
|
6986
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2010 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import gtk
import pango
import urllib
import threading
import gpodder
_ = gpodder.gettext
from gpodder import util
from gpodder import opml
from gpodder import youtube
from gpodder.gtkui.opml import OpmlListModel
from gpodder.gtkui.interface.common import BuilderWidget
class gPodderPodcastDirectory(BuilderWidget):
finger_friendly_widgets = ['btnDownloadOpml', 'btnCancel', 'btnOK', 'treeviewChannelChooser']
def new(self):
if hasattr(self, 'custom_title'):
self.gPodderPodcastDirectory.set_title(self.custom_title)
if hasattr(self, 'hide_url_entry'):
self.hboxOpmlUrlEntry.hide_all()
new_parent = self.notebookChannelAdder.get_parent()
new_parent.remove(self.notebookChannelAdder)
self.vboxOpmlImport.reparent(new_parent)
if not hasattr(self, 'add_urls_callback'):
self.add_urls_callback = None
self.setup_treeview(self.treeviewChannelChooser)
self.setup_treeview(self.treeviewTopPodcastsChooser)
self.setup_treeview(self.treeviewYouTubeChooser)
self.notebookChannelAdder.connect('switch-page', lambda a, b, c: self.on_change_tab(c))
def setup_treeview(self, tv):
togglecell = gtk.CellRendererToggle()
togglecell.set_property( 'activatable', True)
togglecell.connect( 'toggled', self.callback_edited)
togglecolumn = gtk.TreeViewColumn( '', togglecell, active=OpmlListModel.C_SELECTED)
titlecell = gtk.CellRendererText()
titlecell.set_property('ellipsize', pango.ELLIPSIZE_END)
titlecolumn = gtk.TreeViewColumn(_('Podcast'), titlecell, markup=OpmlListModel.C_DESCRIPTION_MARKUP)
for itemcolumn in (togglecolumn, titlecolumn):
tv.append_column(itemcolumn)
def callback_edited( self, cell, path):
model = self.get_treeview().get_model()
model[path][OpmlListModel.C_SELECTED] = not model[path][OpmlListModel.C_SELECTED]
self.btnOK.set_sensitive(bool(len(self.get_selected_channels())))
def get_selected_channels(self, tab=None):
channels = []
model = self.get_treeview(tab).get_model()
if model is not None:
for row in model:
if row[OpmlListModel.C_SELECTED]:
channels.append(row[OpmlListModel.C_URL])
return channels
def on_change_tab(self, tab):
self.btnOK.set_sensitive( bool(len(self.get_selected_channels(tab))))
def thread_finished(self, model, tab=0):
if tab == 1:
tv = self.treeviewTopPodcastsChooser
elif tab == 2:
tv = self.treeviewYouTubeChooser
self.entryYoutubeSearch.set_sensitive(True)
self.btnSearchYouTube.set_sensitive(True)
self.btnOK.set_sensitive(False)
else:
tv = self.treeviewChannelChooser
self.btnDownloadOpml.set_sensitive(True)
self.entryURL.set_sensitive(True)
tv.set_model(model)
tv.set_sensitive(True)
def thread_func(self, tab=0):
if tab == 1:
model = OpmlListModel(opml.Importer(self._config.toplist_url))
if len(model) == 0:
self.notification(_('The specified URL does not provide any valid OPML podcast items.'), _('No feeds found'))
elif tab == 2:
model = OpmlListModel(youtube.find_youtube_channels(self.entryYoutubeSearch.get_text()))
if len(model) == 0:
self.notification(_('There are no YouTube channels that would match this query.'), _('No channels found'))
else:
url = self.entryURL.get_text()
model = OpmlListModel(opml.Importer(url))
if len(model) == 0:
self.notification(_('The specified URL does not provide any valid OPML podcast items.'), _('No feeds found'))
util.idle_add(self.thread_finished, model, tab)
def download_opml_file(self, url):
self.entryURL.set_text(url)
self.btnDownloadOpml.set_sensitive(False)
self.entryURL.set_sensitive(False)
self.btnOK.set_sensitive(False)
self.treeviewChannelChooser.set_sensitive(False)
threading.Thread(target=self.thread_func).start()
threading.Thread(target=lambda: self.thread_func(1)).start()
def select_all( self, value ):
enabled = False
model = self.get_treeview().get_model()
if model is not None:
for row in model:
row[OpmlListModel.C_SELECTED] = value
if value:
enabled = True
self.btnOK.set_sensitive(enabled)
def on_gPodderPodcastDirectory_destroy(self, widget, *args):
pass
def on_btnDownloadOpml_clicked(self, widget, *args):
self.download_opml_file(self.entryURL.get_text())
def on_btnSearchYouTube_clicked(self, widget, *args):
self.entryYoutubeSearch.set_sensitive(False)
self.treeviewYouTubeChooser.set_sensitive(False)
self.btnSearchYouTube.set_sensitive(False)
threading.Thread(target = lambda: self.thread_func(2)).start()
def on_btnSelectAll_clicked(self, widget, *args):
self.select_all(True)
def on_btnSelectNone_clicked(self, widget, *args):
self.select_all(False)
def on_btnOK_clicked(self, widget, *args):
channel_urls = self.get_selected_channels()
self.gPodderPodcastDirectory.destroy()
# add channels that have been selected
if self.add_urls_callback is not None:
self.add_urls_callback(channel_urls)
def on_btnCancel_clicked(self, widget, *args):
self.gPodderPodcastDirectory.destroy()
def on_entryYoutubeSearch_key_press_event(self, widget, event):
if event.keyval == gtk.keysyms.Return:
self.on_btnSearchYouTube_clicked(widget)
def get_treeview(self, tab=None):
if tab is None:
tab = self.notebookChannelAdder.get_current_page()
if tab == 0:
return self.treeviewChannelChooser
elif tab == 1:
return self.treeviewTopPodcastsChooser
else:
return self.treeviewYouTubeChooser
|
gpl-3.0
|
tersmitten/ansible
|
contrib/inventory/libvirt_lxc.py
|
196
|
1357
|
#!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['virsh', '-q', '-c', 'lxc:///', 'list', '--name', '--all'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'libvirt_lxc'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({'ansible_connection': 'libvirt_lxc'}))
else:
sys.stderr.write("Need an argument, either --list or --host <host>\n")
|
gpl-3.0
|
ai-se/parGALE
|
algorithms/serial/gale/gale.py
|
1
|
6295
|
from __future__ import print_function, division
import sys, os
sys.path.append(os.path.abspath("."))
from utils.lib import *
from algorithms.serial.algorithm import Algorithm
from where import Node, sqrt
__author__ = 'panzer'
def default_settings():
"""
Default Settings for NSGA 3
:return: default settings
"""
return O(
pop_size = 100,
gens = 50,
allowDomination = True,
gamma = 0.15
)
class GALE(Algorithm):
count = 0
unsatisfied = 0
"""
.. [Krall2015] Krall, Menzies et.all, "
GALE: Geometric Active Learning for Search-Based Software Engineering"
Check References folder for the paper
"""
def __init__(self, problem, **settings):
"""
Initialize GALE algorithm
:param problem: Instance of the problem
:param gens: Max number of generations
"""
Algorithm.__init__(self, GALE.__name__, problem)
self.select = self._select
self.evolve = self._evolve
self.recombine = self._recombine
self.settings = default_settings().update(**settings)
def run(self, init_pop=None):
if init_pop is None:
init_pop = self.problem.populate(self.settings.pop_size)
population = Node.format(init_pop)
best_solutions = []
gen = 0
while gen < self.settings.gens:
say(".")
total_evals = 0
# SELECTION
selectees, evals = self.select(population)
solutions, evals = self.get_best(selectees)
best_solutions += solutions
total_evals += evals
# EVOLUTION
selectees, evals = self.evolve(selectees)
total_evals += evals
population, evals = self.recombine(selectees, self.settings.pop_size)
total_evals += evals
gen += 1
print("")
return best_solutions
def get_best(self, non_dom_leaves):
"""
Return the best row from all the
non dominated leaves
:param non_dom_leaves:
:return:
"""
bests = []
evals = 0
for leaf in non_dom_leaves:
east = leaf._pop[0]
west = leaf._pop[-1]
if not east.evaluated:
east.evaluate(self.problem)
evals += 1
if not west.evaluated:
west.evaluate(self.problem)
evals += 1
weights = self.problem.directional_weights()
weighted_west = [c*w for c,w in zip(west.objectives, weights)]
weighted_east = [c*w for c,w in zip(east.objectives, weights)]
objs = self.problem.objectives
west_loss = Algorithm.dominates_continuous(weighted_west,
weighted_east,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
east_loss = Algorithm.dominates_continuous(weighted_east,
weighted_west,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
if east_loss < west_loss:
bests.append(east)
else:
bests.append(west)
return bests, evals
def _select(self, pop):
node = Node(self.problem, pop, self.settings.pop_size).divide(sqrt(pop))
non_dom_leafs = node.nonpruned_leaves()
all_leafs = node.leaves()
# Counting number of evals
evals = 0
for leaf in all_leafs:
for row in leaf._pop:
if row.evaluated:
evals+=1
return non_dom_leafs, evals
def _evolve(self, selected):
evals = 0
GAMMA = self.settings.gamma
for leaf in selected:
#Poles
east = leaf._pop[0]
west = leaf._pop[-1]
# Evaluate poles if required
if not east.evaluated:
east.evaluate(self.problem)
evals += 1
if not west.evaluated:
west.evaluate(self.problem)
evals += 1
weights = self.problem.directional_weights()
weighted_west = [c*w for c,w in zip(west.objectives, weights)]
weighted_east = [c*w for c,w in zip(east.objectives, weights)]
objs = self.problem.objectives
west_loss = Algorithm.dominates_continuous(weighted_west,
weighted_east,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
east_loss = Algorithm.dominates_continuous(weighted_east,
weighted_west,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
# Determine better Pole
if east_loss < west_loss:
south_pole,north_pole = east,west
else:
south_pole,north_pole = west,east
# Magnitude of the mutations
g = abs(south_pole.x - north_pole.x)
for row in leaf._pop:
clone = row.clone()
clone_x = row.x
for dec_index in range(len(self.problem.decisions)):
# Few naming shorthands
me = row.decisions[dec_index]
good = south_pole.decisions[dec_index]
bad = north_pole.decisions[dec_index]
dec = self.problem.decisions[dec_index]
if me > good: d = -1
elif me < good: d = +1
else : d = 0
# Mutating towards the better solution
row.decisions[dec_index] = min(dec.high, max(dec.low, me + me * g * d))
# Project the mutant
a = row.dist(self.problem, north_pole, is_obj=False)
b = row.dist(self.problem, south_pole, is_obj=False)
x = (a**2 + row.c**2 - b**2) / (2*row.c+0.00001)
row.x = x
GALE.count += 1
if abs(x - clone_x) > (g * GAMMA) or not self.problem.check_constraints(row.decisions):
GALE.unsatisfied += 1
row.decisions = clone.decisions
row.x = clone_x
pop = []
for leaf in selected:
for row in leaf._pop:
if row.evaluated:
row.evaluate(self.problem) # Re-evaluating
pop.append(row)
return pop, evals
def _recombine(self, mutants, total_size):
remaining = total_size - len(mutants)
pop = []
for _ in range(remaining):
pop.append(self.problem.generate())
return mutants + Node.format(pop), 0
def _test():
from problems.feature_models.webportal import WebPortal
import time
o = WebPortal()
gale = GALE(o)
start = time.time()
gale.run()
print(time.time() - start)
print(GALE.count, GALE.unsatisfied)
if __name__ == "__main__":
_test()
|
unlicense
|
alxgu/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_template_facts.py
|
55
|
3558
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_template_facts
short_description: Retrieve facts about one or more oVirt/RHV templates
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV templates."
notes:
- "This module creates a new top-level C(ovirt_templates) fact, which
contains a list of templates."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search template X from datacenter Y use following pattern:
name=X and datacenter=Y"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all templates which names start with C(centos) and
# belongs to data center C(west):
- ovirt_template_facts:
pattern: name=centos* and datacenter=west
- debug:
var: ovirt_templates
'''
RETURN = '''
ovirt_templates:
description: "List of dictionaries describing the templates. Template attributes are mapped to dictionary keys,
all templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
templates_service = connection.system_service().templates_service()
templates = templates_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_templates=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in templates
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
gpl-3.0
|
Serag8/Bachelor
|
google_appengine/lib/django-1.4/django/template/loaders/eggs.py
|
103
|
1038
|
# Wrapper for loading templates from eggs via pkg_resources.resource_string.
try:
from pkg_resources import resource_string
except ImportError:
resource_string = None
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.conf import settings
class Loader(BaseLoader):
is_usable = resource_string is not None
def load_template_source(self, template_name, template_dirs=None):
"""
Loads templates from Python eggs via pkg_resource.resource_string.
For every installed app, it tries to get the resource (app, template_name).
"""
if resource_string is not None:
pkg_name = 'templates/' + template_name
for app in settings.INSTALLED_APPS:
try:
return (resource_string(app, pkg_name).decode(settings.FILE_CHARSET), 'egg:%s:%s' % (app, pkg_name))
except:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
|
mit
|
yksalun/hugula
|
Client/tools/site-packages/pyExcelerator/Bitmap.py
|
25
|
12183
|
#!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 Roman V. Kiseliov
# Portions are Copyright (c) 2004 Evgeny Filatov <fufff@users.sourceforge.net>
# Portions are Copyright (c) 2002-2004 John McNamara (Perl Spreadsheet::WriteExcel)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. All advertising materials mentioning features or use of this
# software must display the following acknowledgment:
# "This product includes software developed by
# Roman V. Kiseliov <roman@kiseliov.ru>."
#
# 4. Redistributions of any form whatsoever must retain the following
# acknowledgment:
# "This product includes software developed by
# Roman V. Kiseliov <roman@kiseliov.ru>."
#
# THIS SOFTWARE IS PROVIDED BY Roman V. Kiseliov ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Roman V. Kiseliov OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
__rev_id__ = """$Id: Bitmap.py,v 1.4 2005/07/20 07:24:11 rvk Exp $"""
from BIFFRecords import BiffRecord
from struct import *
def _size_col(sheet, col):
return sheet.col_width(col)
def _size_row(sheet, row):
return sheet.row_height(row)
def _position_image(sheet, row_start, col_start, x1, y1, width, height):
"""Calculate the vertices that define the position of the image as required by
the OBJ record.
+------------+------------+
| A | B |
+-----+------------+------------+
| |(x1,y1) | |
| 1 |(A1)._______|______ |
| | | | |
| | | | |
+-----+----| BITMAP |-----+
| | | | |
| 2 | |______________. |
| | | (B2)|
| | | (x2,y2)|
+---- +------------+------------+
Example of a bitmap that covers some of the area from cell A1 to cell B2.
Based on the width and height of the bitmap we need to calculate 8 vars:
col_start, row_start, col_end, row_end, x1, y1, x2, y2.
The width and height of the cells are also variable and have to be taken into
account.
The values of col_start and row_start are passed in from the calling
function. The values of col_end and row_end are calculated by subtracting
the width and height of the bitmap from the width and height of the
underlying cells.
The vertices are expressed as a percentage of the underlying cell width as
follows (rhs values are in pixels):
x1 = X / W *1024
y1 = Y / H *256
x2 = (X-1) / W *1024
y2 = (Y-1) / H *256
Where: X is distance from the left side of the underlying cell
Y is distance from the top of the underlying cell
W is the width of the cell
H is the height of the cell
Note: the SDK incorrectly states that the height should be expressed as a
percentage of 1024.
col_start - Col containing upper left corner of object
row_start - Row containing top left corner of object
x1 - Distance to left side of object
y1 - Distance to top of object
width - Width of image frame
height - Height of image frame
"""
# Adjust start column for offsets that are greater than the col width
while x1 >= _size_col(sheet, col_start):
x1 -= _size_col(sheet, col_start)
col_start += 1
# Adjust start row for offsets that are greater than the row height
while y1 >= _size_row(sheet, row_start):
y1 -= _size_row(sheet, row_start)
row_start += 1
# Initialise end cell to the same as the start cell
row_end = row_start # Row containing bottom right corner of object
col_end = col_start # Col containing lower right corner of object
width = width + x1 - 1
height = height + y1 - 1
# Subtract the underlying cell widths to find the end cell of the image
while (width >= _size_col(sheet, col_end)):
width -= _size_col(sheet, col_end)
col_end += 1
# Subtract the underlying cell heights to find the end cell of the image
while (height >= _size_row(sheet, row_end)):
height -= _size_row(sheet, row_end)
row_end += 1
# Bitmap isn't allowed to start or finish in a hidden cell, i.e. a cell
# with zero height or width.
if ((_size_col(sheet, col_start) == 0) or (_size_col(sheet, col_end) == 0)
or (_size_row(sheet, row_start) == 0) or (_size_row(sheet, row_end) == 0)):
return
# Convert the pixel values to the percentage value expected by Excel
x1 = float(x1) / _size_col(sheet, col_start) * 1024
y1 = float(y1) / _size_row(sheet, row_start) * 256
# Distance to right side of object
x2 = float(width) / _size_col(sheet, col_end) * 1024
# Distance to bottom of object
y2 = float(height) / _size_row(sheet, row_end) * 256
return (col_start, x1, row_start, y1, col_end, x2, row_end, y2)
class ObjBmpRecord(BiffRecord):
_REC_ID = 0x005D # Record identifier
def __init__(self, row, col, sheet, im_data_bmp, x, y, scale_x, scale_y):
# Scale the frame of the image.
width = im_data_bmp.width * scale_x
height = im_data_bmp.height * scale_y
# Calculate the vertices of the image and write the OBJ record
col_start, x1, row_start, y1, col_end, x2, row_end, y2 = _position_image(sheet, row, col, x, y, width, height)
"""Store the OBJ record that precedes an IMDATA record. This could be generalise
to support other Excel objects.
"""
cObj = 0x0001 # Count of objects in file (set to 1)
OT = 0x0008 # Object type. 8 = Picture
id = 0x0001 # Object ID
grbit = 0x0614 # Option flags
colL = col_start # Col containing upper left corner of object
dxL = x1 # Distance from left side of cell
rwT = row_start # Row containing top left corner of object
dyT = y1 # Distance from top of cell
colR = col_end # Col containing lower right corner of object
dxR = x2 # Distance from right of cell
rwB = row_end # Row containing bottom right corner of object
dyB = y2 # Distance from bottom of cell
cbMacro = 0x0000 # Length of FMLA structure
Reserved1 = 0x0000 # Reserved
Reserved2 = 0x0000 # Reserved
icvBack = 0x09 # Background colour
icvFore = 0x09 # Foreground colour
fls = 0x00 # Fill pattern
fAuto = 0x00 # Automatic fill
icv = 0x08 # Line colour
lns = 0xff # Line style
lnw = 0x01 # Line weight
fAutoB = 0x00 # Automatic border
frs = 0x0000 # Frame style
cf = 0x0009 # Image format, 9 = bitmap
Reserved3 = 0x0000 # Reserved
cbPictFmla = 0x0000 # Length of FMLA structure
Reserved4 = 0x0000 # Reserved
grbit2 = 0x0001 # Option flags
Reserved5 = 0x0000 # Reserved
data = pack("<L", cObj)
data += pack("<H", OT)
data += pack("<H", id)
data += pack("<H", grbit)
data += pack("<H", colL)
data += pack("<H", dxL)
data += pack("<H", rwT)
data += pack("<H", dyT)
data += pack("<H", colR)
data += pack("<H", dxR)
data += pack("<H", rwB)
data += pack("<H", dyB)
data += pack("<H", cbMacro)
data += pack("<L", Reserved1)
data += pack("<H", Reserved2)
data += pack("<B", icvBack)
data += pack("<B", icvFore)
data += pack("<B", fls)
data += pack("<B", fAuto)
data += pack("<B", icv)
data += pack("<B", lns)
data += pack("<B", lnw)
data += pack("<B", fAutoB)
data += pack("<H", frs)
data += pack("<L", cf)
data += pack("<H", Reserved3)
data += pack("<H", cbPictFmla)
data += pack("<H", Reserved4)
data += pack("<H", grbit2)
data += pack("<L", Reserved5)
self._rec_data = data
def _process_bitmap(bitmap):
"""Convert a 24 bit bitmap into the modified internal format used by Windows.
This is described in BITMAPCOREHEADER and BITMAPCOREINFO structures in the
MSDN library.
"""
# Open file and binmode the data in case the platform needs it.
fh = file(bitmap, "rb")
try:
# Slurp the file into a string.
data = fh.read()
finally:
fh.close()
# Check that the file is big enough to be a bitmap.
if len(data) <= 0x36:
raise Exception("bitmap doesn't contain enough data.")
# The first 2 bytes are used to identify the bitmap.
if (data[:2] != "BM"):
raise Exception("bitmap doesn't appear to to be a valid bitmap image.")
# Remove bitmap data: ID.
data = data[2:]
# Read and remove the bitmap size. This is more reliable than reading
# the data size at offset 0x22.
#
size = unpack("<L", data[:4])[0]
size -= 0x36 # Subtract size of bitmap header.
size += 0x0C # Add size of BIFF header.
data = data[4:]
# Remove bitmap data: reserved, offset, header length.
data = data[12:]
# Read and remove the bitmap width and height. Verify the sizes.
width, height = unpack("<LL", data[:8])
data = data[8:]
if (width > 0xFFFF):
raise Exception("bitmap: largest image width supported is 65k.")
if (height > 0xFFFF):
raise Exception("bitmap: largest image height supported is 65k.")
# Read and remove the bitmap planes and bpp data. Verify them.
planes, bitcount = unpack("<HH", data[:4])
data = data[4:]
if (bitcount != 24):
raise Exception("bitmap isn't a 24bit true color bitmap.")
if (planes != 1):
raise Exception("bitmap: only 1 plane supported in bitmap image.")
# Read and remove the bitmap compression. Verify compression.
compression = unpack("<L", data[:4])[0]
data = data[4:]
if (compression != 0):
raise Exception("bitmap: compression not supported in bitmap image.")
# Remove bitmap data: data size, hres, vres, colours, imp. colours.
data = data[20:]
# Add the BITMAPCOREHEADER data
header = pack("<LHHHH", 0x000c, width, height, 0x01, 0x18)
data = header + data
return (width, height, size, data)
class ImDataBmpRecord(BiffRecord):
_REC_ID = 0x007F
def __init__(self, filename):
"""Insert a 24bit bitmap image in a worksheet. The main record required is
IMDATA but it must be proceeded by a OBJ record to define its position.
"""
BiffRecord.__init__(self)
self.width, self.height, self.size, data = _process_bitmap(filename)
# Write the IMDATA record to store the bitmap data
cf = 0x09
env = 0x01
lcb = self.size
self._rec_data = pack("<HHL", cf, env, lcb) + data
|
mit
|
nuxeh/keystone
|
keystone/tests/unit/test_ldap_tls_livetest.py
|
5
|
4398
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ldap
import ldap.modlist
from oslo_config import cfg
from keystone import exception
from keystone import identity
from keystone.tests import unit as tests
from keystone.tests.unit import test_ldap_livetest
CONF = cfg.CONF
def create_object(dn, attrs):
conn = ldap.initialize(CONF.ldap.url)
conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password)
ldif = ldap.modlist.addModlist(attrs)
conn.add_s(dn, ldif)
conn.unbind_s()
class LiveTLSLDAPIdentity(test_ldap_livetest.LiveLDAPIdentity):
def _ldap_skip_live(self):
self.skip_if_env_not_set('ENABLE_TLS_LDAP_LIVE_TEST')
def config_files(self):
config_files = super(LiveTLSLDAPIdentity, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_tls_liveldap.conf'))
return config_files
def config_overrides(self):
super(LiveTLSLDAPIdentity, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
def test_tls_certfile_demand_option(self):
self.config_fixture.config(group='ldap',
use_tls=True,
tls_cacertdir=None,
tls_req_cert='demand')
self.identity_api = identity.backends.ldap.Identity()
user = {'name': 'fake1',
'password': 'fakepass1',
'tenants': ['bar']}
user = self.identity_api.create_user('user')
user_ref = self.identity_api.get_user(user['id'])
self.assertEqual(user['id'], user_ref['id'])
user['password'] = 'fakepass2'
self.identity_api.update_user(user['id'], user)
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound, self.identity_api.get_user,
user['id'])
def test_tls_certdir_demand_option(self):
self.config_fixture.config(group='ldap',
use_tls=True,
tls_cacertdir=None,
tls_req_cert='demand')
self.identity_api = identity.backends.ldap.Identity()
user = {'id': 'fake1',
'name': 'fake1',
'password': 'fakepass1',
'tenants': ['bar']}
self.identity_api.create_user('fake1', user)
user_ref = self.identity_api.get_user('fake1')
self.assertEqual('fake1', user_ref['id'])
user['password'] = 'fakepass2'
self.identity_api.update_user('fake1', user)
self.identity_api.delete_user('fake1')
self.assertRaises(exception.UserNotFound, self.identity_api.get_user,
'fake1')
def test_tls_bad_certfile(self):
self.config_fixture.config(
group='ldap',
use_tls=True,
tls_req_cert='demand',
tls_cacertfile='/etc/keystone/ssl/certs/mythicalcert.pem',
tls_cacertdir=None)
self.identity_api = identity.backends.ldap.Identity()
user = {'name': 'fake1',
'password': 'fakepass1',
'tenants': ['bar']}
self.assertRaises(IOError, self.identity_api.create_user, user)
def test_tls_bad_certdir(self):
self.config_fixture.config(
group='ldap',
use_tls=True,
tls_cacertfile=None,
tls_req_cert='demand',
tls_cacertdir='/etc/keystone/ssl/mythicalcertdir')
self.identity_api = identity.backends.ldap.Identity()
user = {'name': 'fake1',
'password': 'fakepass1',
'tenants': ['bar']}
self.assertRaises(IOError, self.identity_api.create_user, user)
|
apache-2.0
|
vmax-feihu/hue
|
desktop/core/ext-py/Django-1.6.10/extras/csrf_migration_helper.py
|
59
|
12998
|
#!/usr/bin/env python
# This script aims to help developers locate forms and view code that needs to
# use the new CSRF protection in Django 1.2. It tries to find all the code that
# may need the steps described in the CSRF documentation. It does not modify
# any code directly, it merely attempts to locate it. Developers should be
# aware of its limitations, described below.
#
# For each template that contains at least one POST form, the following info is printed:
#
# <Absolute path to template>
# AKA: <Aliases (relative to template directory/directories that contain it)>
# POST forms: <Number of POST forms>
# With token: <Number of POST forms with the CSRF token already added>
# Without token:
# <File name and line number of form without token>
#
# Searching for:
# <Template names that need to be searched for in view code
# (includes templates that 'include' current template)>
#
# Found:
# <File name and line number of any view code found>
#
# The format used allows this script to be used in Emacs grep mode:
# M-x grep
# Run grep (like this): /path/to/my/virtualenv/python /path/to/django/src/extras/csrf_migration_helper.py --settings=mysettings /path/to/my/srcs
# Limitations
# ===========
#
# - All templates must be stored on disk in '.html' or '.htm' files.
# (extensions configurable below)
#
# - All Python code must be stored on disk in '.py' files. (extensions
# configurable below)
#
# - All templates must be accessible from TEMPLATE_DIRS or from the 'templates/'
# directory in apps specified in INSTALLED_APPS. Non-file based template
# loaders are out of the picture, because there is no way to ask them to
# return all templates.
#
# - It's impossible to programmatically determine which forms should and should
# not have the token added. The developer must decide when to do this,
# ensuring that the token is only added to internally targeted forms.
#
# - It's impossible to programmatically work out when a template is used. The
# attempts to trace back to view functions are guesses, and could easily fail
# in the following ways:
#
# * If the 'include' template tag is used with a variable
# i.e. {% include tname %} where tname is a variable containing the actual
# template name, rather than {% include "my_template.html" %}.
#
# * If the template name has been built up by view code instead of as a simple
# string. For example, generic views and the admin both do this. (These
# apps are both contrib and both use RequestContext already, as it happens).
#
# * If the 'ssl' tag (or any template tag other than 'include') is used to
# include the template in another template.
#
# - All templates belonging to apps referenced in INSTALLED_APPS will be
# searched, which may include third party apps or Django contrib. In some
# cases, this will be a good thing, because even if the templates of these
# apps have been fixed by someone else, your own view code may reference the
# same template and may need to be updated.
#
# You may, however, wish to comment out some entries in INSTALLED_APPS or
# TEMPLATE_DIRS before running this script.
# Improvements to this script are welcome!
# Configuration
# =============
TEMPLATE_EXTENSIONS = [
".html",
".htm",
]
PYTHON_SOURCE_EXTENSIONS = [
".py",
]
TEMPLATE_ENCODING = "UTF-8"
PYTHON_ENCODING = "UTF-8"
# Method
# ======
# Find templates:
# - template dirs
# - installed apps
#
# Search for POST forms
# - Work out what the name of the template is, as it would appear in an
# 'include' or get_template() call. This can be done by comparing template
# filename to all template dirs. Some templates can have more than one
# 'name' e.g. if a directory and one of its child directories are both in
# TEMPLATE_DIRS. This is actually a common hack used for
# overriding-and-extending admin templates.
#
# For each POST form,
# - see if it already contains '{% csrf_token %}' immediately after <form>
# - work back to the view function(s):
# - First, see if the form is included in any other templates, then
# recursively compile a list of affected templates.
# - Find any code function that references that template. This is just a
# brute force text search that can easily return false positives
# and fail to find real instances.
import os
import sys
import re
from optparse import OptionParser
USAGE = """
This tool helps to locate forms that need CSRF tokens added and the
corresponding view code. This processing is NOT fool proof, and you should read
the help contained in the script itself. Also, this script may need configuring
(by editing the script) before use.
Usage:
python csrf_migration_helper.py [--settings=path.to.your.settings] /path/to/python/code [more paths...]
Paths can be specified as relative paths.
With no arguments, this help is printed.
"""
_POST_FORM_RE = \
re.compile(r'(<form\W[^>]*\bmethod\s*=\s*(\'|"|)POST(\'|"|)\b[^>]*>)', re.IGNORECASE)
_FORM_CLOSE_RE = re.compile(r'</form\s*>')
_TOKEN_RE = re.compile('\{% csrf_token')
def get_template_dirs():
"""
Returns a set of all directories that contain project templates.
"""
from django.conf import settings
dirs = set()
if ('django.template.loaders.filesystem.load_template_source' in settings.TEMPLATE_LOADERS
or 'django.template.loaders.filesystem.Loader' in settings.TEMPLATE_LOADERS):
dirs.update(map(unicode, settings.TEMPLATE_DIRS))
if ('django.template.loaders.app_directories.load_template_source' in settings.TEMPLATE_LOADERS
or 'django.template.loaders.app_directories.Loader' in settings.TEMPLATE_LOADERS):
from django.template.loaders.app_directories import app_template_dirs
dirs.update(app_template_dirs)
return dirs
def make_template_info(filename, root_dirs):
"""
Creates a Template object for a filename, calculating the possible
relative_filenames from the supplied filename and root template directories
"""
return Template(filename,
[filename[len(d)+1:] for d in root_dirs if filename.startswith(d)])
class Template(object):
def __init__(self, absolute_filename, relative_filenames):
self.absolute_filename, self.relative_filenames = absolute_filename, relative_filenames
def content(self):
try:
return self._content
except AttributeError:
with open(self.absolute_filename) as fd:
try:
content = fd.read().decode(TEMPLATE_ENCODING)
except UnicodeDecodeError as e:
message = '%s in %s' % (
e[4], self.absolute_filename.encode('UTF-8', 'ignore'))
raise UnicodeDecodeError(*(e.args[:4] + (message,)))
self._content = content
return content
content = property(content)
def post_form_info(self):
"""
Get information about any POST forms in the template.
Returns [(linenumber, csrf_token added)]
"""
forms = {}
form_line = 0
for ln, line in enumerate(self.content.split("\n")):
if not form_line and _POST_FORM_RE.search(line):
# record the form with no CSRF token yet
form_line = ln + 1
forms[form_line] = False
if form_line and _TOKEN_RE.search(line):
# found the CSRF token
forms[form_line] = True
form_line = 0
if form_line and _FORM_CLOSE_RE.search(line):
# no token found by form closing tag
form_line = 0
return forms.items()
def includes_template(self, t):
"""
Returns true if this template includes template 't' (via {% include %})
"""
for r in t.relative_filenames:
if re.search(r'\{%\s*include\s+(\'|")' + re.escape(r) + r'(\1)\s*%\}', self.content):
return True
return False
def related_templates(self):
"""
Returns all templates that include this one, recursively. (starting
with this one)
"""
try:
return self._related_templates
except AttributeError:
pass
retval = set([self])
for t in self.all_templates:
if t.includes_template(self):
# If two templates mutually include each other, directly or
# indirectly, we have a problem here...
retval = retval.union(t.related_templates())
self._related_templates = retval
return retval
def __repr__(self):
return repr(self.absolute_filename)
def __eq__(self, other):
return self.absolute_filename == other.absolute_filename
def __hash__(self):
return hash(self.absolute_filename)
def get_templates(dirs):
"""
Returns all files in dirs that have template extensions, as Template
objects.
"""
templates = set()
for root in dirs:
for (dirpath, dirnames, filenames) in os.walk(root):
for f in filenames:
if len([True for e in TEMPLATE_EXTENSIONS if f.endswith(e)]) > 0:
t = make_template_info(os.path.join(dirpath, f), dirs)
# templates need to be able to search others:
t.all_templates = templates
templates.add(t)
return templates
def get_python_code(paths):
"""
Returns all Python code, as a list of tuples, each one being:
(filename, list of lines)
"""
retval = []
for p in paths:
if not os.path.isdir(p):
raise Exception("'%s' is not a directory." % p)
for (dirpath, dirnames, filenames) in os.walk(p):
for f in filenames:
if len([True for e in PYTHON_SOURCE_EXTENSIONS if f.endswith(e)]) > 0:
fn = os.path.join(dirpath, f)
with open(fn) as fd:
content = [l.decode(PYTHON_ENCODING) for l in fd.readlines()]
retval.append((fn, content))
return retval
def search_python_list(python_code, template_names):
"""
Searches python code for a list of template names.
Returns a list of tuples, each one being:
(filename, line number)
"""
retval = set()
for tn in template_names:
retval.update(search_python(python_code, tn))
return sorted(retval)
def search_python(python_code, template_name):
"""
Searches Python code for a template name.
Returns a list of tuples, each one being:
(filename, line number)
"""
retval = []
for fn, content in python_code:
for ln, line in enumerate(content):
if ((u'"%s"' % template_name) in line) or \
((u"'%s'" % template_name) in line):
retval.append((fn, ln + 1))
return retval
def main(pythonpaths):
template_dirs = get_template_dirs()
templates = get_templates(template_dirs)
python_code = get_python_code(pythonpaths)
for t in templates:
# Logic
form_matches = t.post_form_info()
num_post_forms = len(form_matches)
form_lines_without_token = [ln for (ln, has_token) in form_matches if not has_token]
if num_post_forms == 0:
continue
to_search = [rf for rt in t.related_templates() for rf in rt.relative_filenames]
found = search_python_list(python_code, to_search)
# Display:
print(t.absolute_filename)
for r in t.relative_filenames:
print(" AKA %s" % r)
print(" POST forms: %s" % num_post_forms)
print(" With token: %s" % (num_post_forms - len(form_lines_without_token)))
if form_lines_without_token:
print(" Without token:")
for ln in form_lines_without_token:
print("%s:%d:" % (t.absolute_filename, ln))
print('')
print(" Searching for:")
for r in to_search:
print(" " + r)
print('')
print(" Found:")
if len(found) == 0:
print(" Nothing")
else:
for fn, ln in found:
print("%s:%d:" % (fn, ln))
print('')
print("----")
parser = OptionParser(usage=USAGE)
parser.add_option("", "--settings", action="store", dest="settings", help="Dotted path to settings file")
if __name__ == '__main__':
options, args = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
settings = getattr(options, 'settings', None)
if settings is None:
if os.environ.get("DJANGO_SETTINGS_MODULE", None) is None:
print("You need to set DJANGO_SETTINGS_MODULE or use the '--settings' parameter")
sys.exit(1)
else:
os.environ["DJANGO_SETTINGS_MODULE"] = settings
main(args)
|
apache-2.0
|
davidvon/pipa-pay-server
|
site-packages/distribute-0.6.27-py2.7.egg/setuptools/tests/test_develop.py
|
64
|
2377
|
"""develop tests
"""
import sys
import os, shutil, tempfile, unittest
import tempfile
import site
from StringIO import StringIO
from distutils.errors import DistutilsError
from setuptools.command.develop import develop
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestDevelopTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
if sys.version >= "2.6":
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
if sys.version >= "2.6":
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_develop(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix'):
return
dist = Distribution()
dist.script_name = 'setup.py'
cmd = develop(dist)
cmd.user = 1
cmd.ensure_finalized()
cmd.install_dir = site.USER_SITE
cmd.user = 1
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cmd.run()
finally:
sys.stdout = old_stdout
# let's see if we got our egg link at the right place
content = os.listdir(site.USER_SITE)
content.sort()
self.assertEquals(content, ['UNKNOWN.egg-link', 'easy-install.pth'])
def test_develop_with_setup_requires(self):
wanted = ("Could not find suitable distribution for "
"Requirement.parse('I-DONT-EXIST')")
old_dir = os.getcwd()
os.chdir(self.dir)
try:
try:
dist = Distribution({'setup_requires': ['I_DONT_EXIST']})
except DistutilsError, e:
error = str(e)
if error == wanted:
pass
finally:
os.chdir(old_dir)
|
apache-2.0
|
mancoast/CPythonPyc_test
|
fail/340_test_format.py
|
84
|
17219
|
from test.support import verbose, TestFailed
import locale
import sys
import test.support as support
import unittest
maxsize = support.MAX_Py_ssize_t
# test string formatting operator (I am not sure if this is being tested
# elsewhere but, surely, some of the given cases are *not* tested because
# they crash python)
# test on unicode strings as well
def testformat(formatstr, args, output=None, limit=None, overflowok=False):
if verbose:
if output:
print("{!a} % {!a} =? {!a} ...".format(formatstr, args, output),
end=' ')
else:
print("{!a} % {!a} works? ...".format(formatstr, args), end=' ')
try:
result = formatstr % args
except OverflowError:
if not overflowok:
raise
if verbose:
print('overflow (this is fine)')
else:
if output and limit is None and result != output:
if verbose:
print('no')
raise AssertionError("%r %% %r == %r != %r" %
(formatstr, args, result, output))
# when 'limit' is specified, it determines how many characters
# must match exactly; lengths must always match.
# ex: limit=5, '12345678' matches '12345___'
# (mainly for floating point format tests for which an exact match
# can't be guaranteed due to rounding and representation errors)
elif output and limit is not None and (
len(result)!=len(output) or result[:limit]!=output[:limit]):
if verbose:
print('no')
print("%s %% %s == %s != %s" % \
(repr(formatstr), repr(args), repr(result), repr(output)))
else:
if verbose:
print('yes')
class FormatTest(unittest.TestCase):
def test_format(self):
testformat("%.1d", (1,), "1")
testformat("%.*d", (sys.maxsize,1), overflowok=True) # expect overflow
testformat("%.100d", (1,), '00000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'00000001', overflowok=True)
testformat("%#.117x", (1,), '0x00000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000001',
overflowok=True)
testformat("%#.118x", (1,), '0x00000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000001',
overflowok=True)
testformat("%f", (1.0,), "1.000000")
# these are trying to test the limits of the internal magic-number-length
# formatting buffer, if that number changes then these tests are less
# effective
testformat("%#.*g", (109, -1.e+49/3.))
testformat("%#.*g", (110, -1.e+49/3.))
testformat("%#.*g", (110, -1.e+100/3.))
# test some ridiculously large precision, expect overflow
testformat('%12.*f', (123456, 1.0))
# check for internal overflow validation on length of precision
# these tests should no longer cause overflow in Python
# 2.7/3.1 and later.
testformat("%#.*g", (110, -1.e+100/3.))
testformat("%#.*G", (110, -1.e+100/3.))
testformat("%#.*f", (110, -1.e+100/3.))
testformat("%#.*F", (110, -1.e+100/3.))
# Formatting of integers. Overflow is not ok
testformat("%x", 10, "a")
testformat("%x", 100000000000, "174876e800")
testformat("%o", 10, "12")
testformat("%o", 100000000000, "1351035564000")
testformat("%d", 10, "10")
testformat("%d", 100000000000, "100000000000")
big = 123456789012345678901234567890
testformat("%d", big, "123456789012345678901234567890")
testformat("%d", -big, "-123456789012345678901234567890")
testformat("%5d", -big, "-123456789012345678901234567890")
testformat("%31d", -big, "-123456789012345678901234567890")
testformat("%32d", -big, " -123456789012345678901234567890")
testformat("%-32d", -big, "-123456789012345678901234567890 ")
testformat("%032d", -big, "-0123456789012345678901234567890")
testformat("%-032d", -big, "-123456789012345678901234567890 ")
testformat("%034d", -big, "-000123456789012345678901234567890")
testformat("%034d", big, "0000123456789012345678901234567890")
testformat("%0+34d", big, "+000123456789012345678901234567890")
testformat("%+34d", big, " +123456789012345678901234567890")
testformat("%34d", big, " 123456789012345678901234567890")
testformat("%.2d", big, "123456789012345678901234567890")
testformat("%.30d", big, "123456789012345678901234567890")
testformat("%.31d", big, "0123456789012345678901234567890")
testformat("%32.31d", big, " 0123456789012345678901234567890")
testformat("%d", float(big), "123456________________________", 6)
big = 0x1234567890abcdef12345 # 21 hex digits
testformat("%x", big, "1234567890abcdef12345")
testformat("%x", -big, "-1234567890abcdef12345")
testformat("%5x", -big, "-1234567890abcdef12345")
testformat("%22x", -big, "-1234567890abcdef12345")
testformat("%23x", -big, " -1234567890abcdef12345")
testformat("%-23x", -big, "-1234567890abcdef12345 ")
testformat("%023x", -big, "-01234567890abcdef12345")
testformat("%-023x", -big, "-1234567890abcdef12345 ")
testformat("%025x", -big, "-0001234567890abcdef12345")
testformat("%025x", big, "00001234567890abcdef12345")
testformat("%0+25x", big, "+0001234567890abcdef12345")
testformat("%+25x", big, " +1234567890abcdef12345")
testformat("%25x", big, " 1234567890abcdef12345")
testformat("%.2x", big, "1234567890abcdef12345")
testformat("%.21x", big, "1234567890abcdef12345")
testformat("%.22x", big, "01234567890abcdef12345")
testformat("%23.22x", big, " 01234567890abcdef12345")
testformat("%-23.22x", big, "01234567890abcdef12345 ")
testformat("%X", big, "1234567890ABCDEF12345")
testformat("%#X", big, "0X1234567890ABCDEF12345")
testformat("%#x", big, "0x1234567890abcdef12345")
testformat("%#x", -big, "-0x1234567890abcdef12345")
testformat("%#.23x", -big, "-0x001234567890abcdef12345")
testformat("%#+.23x", big, "+0x001234567890abcdef12345")
testformat("%# .23x", big, " 0x001234567890abcdef12345")
testformat("%#+.23X", big, "+0X001234567890ABCDEF12345")
testformat("%#-+.23X", big, "+0X001234567890ABCDEF12345")
testformat("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
testformat("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
testformat("%#+27.23X", big, " +0X001234567890ABCDEF12345")
# next one gets two leading zeroes from precision, and another from the
# 0 flag and the width
testformat("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
# same, except no 0 flag
testformat("%#+27.23X", big, " +0X001234567890ABCDEF12345")
with self.assertWarns(DeprecationWarning):
testformat("%x", float(big), "123456_______________", 6)
big = 0o12345670123456701234567012345670 # 32 octal digits
testformat("%o", big, "12345670123456701234567012345670")
testformat("%o", -big, "-12345670123456701234567012345670")
testformat("%5o", -big, "-12345670123456701234567012345670")
testformat("%33o", -big, "-12345670123456701234567012345670")
testformat("%34o", -big, " -12345670123456701234567012345670")
testformat("%-34o", -big, "-12345670123456701234567012345670 ")
testformat("%034o", -big, "-012345670123456701234567012345670")
testformat("%-034o", -big, "-12345670123456701234567012345670 ")
testformat("%036o", -big, "-00012345670123456701234567012345670")
testformat("%036o", big, "000012345670123456701234567012345670")
testformat("%0+36o", big, "+00012345670123456701234567012345670")
testformat("%+36o", big, " +12345670123456701234567012345670")
testformat("%36o", big, " 12345670123456701234567012345670")
testformat("%.2o", big, "12345670123456701234567012345670")
testformat("%.32o", big, "12345670123456701234567012345670")
testformat("%.33o", big, "012345670123456701234567012345670")
testformat("%34.33o", big, " 012345670123456701234567012345670")
testformat("%-34.33o", big, "012345670123456701234567012345670 ")
testformat("%o", big, "12345670123456701234567012345670")
testformat("%#o", big, "0o12345670123456701234567012345670")
testformat("%#o", -big, "-0o12345670123456701234567012345670")
testformat("%#.34o", -big, "-0o0012345670123456701234567012345670")
testformat("%#+.34o", big, "+0o0012345670123456701234567012345670")
testformat("%# .34o", big, " 0o0012345670123456701234567012345670")
testformat("%#+.34o", big, "+0o0012345670123456701234567012345670")
testformat("%#-+.34o", big, "+0o0012345670123456701234567012345670")
testformat("%#-+37.34o", big, "+0o0012345670123456701234567012345670")
testformat("%#+37.34o", big, "+0o0012345670123456701234567012345670")
# next one gets one leading zero from precision
testformat("%.33o", big, "012345670123456701234567012345670")
# base marker shouldn't change that, since "0" is redundant
testformat("%#.33o", big, "0o012345670123456701234567012345670")
# but reduce precision, and base marker should add a zero
testformat("%#.32o", big, "0o12345670123456701234567012345670")
# one leading zero from precision, and another from "0" flag & width
testformat("%034.33o", big, "0012345670123456701234567012345670")
# base marker shouldn't change that
testformat("%0#34.33o", big, "0o012345670123456701234567012345670")
with self.assertWarns(DeprecationWarning):
testformat("%o", float(big), "123456__________________________", 6)
# Some small ints, in both Python int and flavors).
testformat("%d", 42, "42")
testformat("%d", -42, "-42")
testformat("%d", 42, "42")
testformat("%d", -42, "-42")
testformat("%d", 42.0, "42")
testformat("%#x", 1, "0x1")
testformat("%#x", 1, "0x1")
testformat("%#X", 1, "0X1")
testformat("%#X", 1, "0X1")
with self.assertWarns(DeprecationWarning):
testformat("%#x", 1.0, "0x1")
testformat("%#o", 1, "0o1")
testformat("%#o", 1, "0o1")
testformat("%#o", 0, "0o0")
testformat("%#o", 0, "0o0")
testformat("%o", 0, "0")
testformat("%o", 0, "0")
testformat("%d", 0, "0")
testformat("%d", 0, "0")
testformat("%#x", 0, "0x0")
testformat("%#x", 0, "0x0")
testformat("%#X", 0, "0X0")
testformat("%#X", 0, "0X0")
testformat("%x", 0x42, "42")
testformat("%x", -0x42, "-42")
testformat("%x", 0x42, "42")
testformat("%x", -0x42, "-42")
with self.assertWarns(DeprecationWarning):
testformat("%x", float(0x42), "42")
testformat("%o", 0o42, "42")
testformat("%o", -0o42, "-42")
testformat("%o", 0o42, "42")
testformat("%o", -0o42, "-42")
with self.assertWarns(DeprecationWarning):
testformat("%o", float(0o42), "42")
testformat("%r", "\u0378", "'\\u0378'") # non printable
testformat("%a", "\u0378", "'\\u0378'") # non printable
testformat("%r", "\u0374", "'\u0374'") # printable
testformat("%a", "\u0374", "'\\u0374'") # printable
# alternate float formatting
testformat('%g', 1.1, '1.1')
testformat('%#g', 1.1, '1.10000')
# Test exception for unknown format characters
if verbose:
print('Testing exceptions')
def test_exc(formatstr, args, exception, excmsg):
try:
testformat(formatstr, args)
except exception as exc:
if str(exc) == excmsg:
if verbose:
print("yes")
else:
if verbose: print('no')
print('Unexpected ', exception, ':', repr(str(exc)))
except:
if verbose: print('no')
print('Unexpected exception')
raise
else:
raise TestFailed('did not get expected exception: %s' % excmsg)
test_exc('abc %b', 1, ValueError,
"unsupported format character 'b' (0x62) at index 5")
#test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
# "unsupported format character '?' (0x3000) at index 5")
test_exc('%d', '1', TypeError, "%d format: a number is required, not str")
test_exc('%g', '1', TypeError, "a float is required")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
if maxsize == 2**31-1:
# crashes 2.2.1 and earlier:
try:
"%*d"%(maxsize, -127)
except MemoryError:
pass
else:
raise TestFailed('"%*d"%(maxsize, -127) should fail')
def test_non_ascii(self):
testformat("\u20ac=%f", (1.0,), "\u20ac=1.000000")
self.assertEqual(format("abc", "\u2007<5"), "abc\u2007\u2007")
self.assertEqual(format(123, "\u2007<5"), "123\u2007\u2007")
self.assertEqual(format(12.3, "\u2007<6"), "12.3\u2007\u2007")
self.assertEqual(format(0j, "\u2007<4"), "0j\u2007\u2007")
self.assertEqual(format(1+2j, "\u2007<8"), "(1+2j)\u2007\u2007")
self.assertEqual(format("abc", "\u2007>5"), "\u2007\u2007abc")
self.assertEqual(format(123, "\u2007>5"), "\u2007\u2007123")
self.assertEqual(format(12.3, "\u2007>6"), "\u2007\u200712.3")
self.assertEqual(format(1+2j, "\u2007>8"), "\u2007\u2007(1+2j)")
self.assertEqual(format(0j, "\u2007>4"), "\u2007\u20070j")
self.assertEqual(format("abc", "\u2007^5"), "\u2007abc\u2007")
self.assertEqual(format(123, "\u2007^5"), "\u2007123\u2007")
self.assertEqual(format(12.3, "\u2007^6"), "\u200712.3\u2007")
self.assertEqual(format(1+2j, "\u2007^8"), "\u2007(1+2j)\u2007")
self.assertEqual(format(0j, "\u2007^4"), "\u20070j\u2007")
def test_locale(self):
try:
oldloc = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, '')
except locale.Error as err:
self.skipTest("Cannot set locale: {}".format(err))
try:
localeconv = locale.localeconv()
sep = localeconv['thousands_sep']
point = localeconv['decimal_point']
text = format(123456789, "n")
self.assertIn(sep, text)
self.assertEqual(text.replace(sep, ''), '123456789')
text = format(1234.5, "n")
self.assertIn(sep, text)
self.assertIn(point, text)
self.assertEqual(text.replace(sep, ''), '1234' + point + '5')
finally:
locale.setlocale(locale.LC_ALL, oldloc)
@support.cpython_only
def test_optimisations(self):
text = "abcde" # 5 characters
self.assertIs("%s" % text, text)
self.assertIs("%.5s" % text, text)
self.assertIs("%.10s" % text, text)
self.assertIs("%1s" % text, text)
self.assertIs("%5s" % text, text)
self.assertIs("{0}".format(text), text)
self.assertIs("{0:s}".format(text), text)
self.assertIs("{0:.5s}".format(text), text)
self.assertIs("{0:.10s}".format(text), text)
self.assertIs("{0:1s}".format(text), text)
self.assertIs("{0:5s}".format(text), text)
self.assertIs(text % (), text)
self.assertIs(text.format(), text)
def test_precision(self):
f = 1.2
self.assertEqual(format(f, ".0f"), "1")
self.assertEqual(format(f, ".3f"), "1.200")
with self.assertRaises(ValueError) as cm:
format(f, ".%sf" % (sys.maxsize + 1))
c = complex(f)
self.assertEqual(format(c, ".0f"), "1+0j")
self.assertEqual(format(c, ".3f"), "1.200+0.000j")
with self.assertRaises(ValueError) as cm:
format(c, ".%sf" % (sys.maxsize + 1))
@support.cpython_only
def test_precision_c_limits(self):
from _testcapi import INT_MAX
f = 1.2
with self.assertRaises(ValueError) as cm:
format(f, ".%sf" % (INT_MAX + 1))
c = complex(f)
with self.assertRaises(ValueError) as cm:
format(c, ".%sf" % (INT_MAX + 1))
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
|
fbradyirl/home-assistant
|
tests/components/remote/test_init.py
|
4
|
3632
|
"""The tests for the Remote component, adapted from Light Test."""
# pylint: disable=protected-access
import unittest
from homeassistant.const import (
ATTR_ENTITY_ID,
STATE_ON,
STATE_OFF,
CONF_PLATFORM,
SERVICE_TURN_ON,
SERVICE_TURN_OFF,
)
import homeassistant.components.remote as remote
from tests.common import mock_service, get_test_home_assistant
from tests.components.remote import common
TEST_PLATFORM = {remote.DOMAIN: {CONF_PLATFORM: "test"}}
SERVICE_SEND_COMMAND = "send_command"
SERVICE_LEARN_COMMAND = "learn_command"
class TestRemote(unittest.TestCase):
"""Test the remote module."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_is_on(self):
"""Test is_on."""
self.hass.states.set("remote.test", STATE_ON)
assert remote.is_on(self.hass, "remote.test")
self.hass.states.set("remote.test", STATE_OFF)
assert not remote.is_on(self.hass, "remote.test")
self.hass.states.set(remote.ENTITY_ID_ALL_REMOTES, STATE_ON)
assert remote.is_on(self.hass)
self.hass.states.set(remote.ENTITY_ID_ALL_REMOTES, STATE_OFF)
assert not remote.is_on(self.hass)
def test_turn_on(self):
"""Test turn_on."""
turn_on_calls = mock_service(self.hass, remote.DOMAIN, SERVICE_TURN_ON)
common.turn_on(self.hass, entity_id="entity_id_val")
self.hass.block_till_done()
assert len(turn_on_calls) == 1
call = turn_on_calls[-1]
assert remote.DOMAIN == call.domain
def test_turn_off(self):
"""Test turn_off."""
turn_off_calls = mock_service(self.hass, remote.DOMAIN, SERVICE_TURN_OFF)
common.turn_off(self.hass, entity_id="entity_id_val")
self.hass.block_till_done()
assert len(turn_off_calls) == 1
call = turn_off_calls[-1]
assert call.domain == remote.DOMAIN
assert call.service == SERVICE_TURN_OFF
assert call.data[ATTR_ENTITY_ID] == "entity_id_val"
def test_send_command(self):
"""Test send_command."""
send_command_calls = mock_service(
self.hass, remote.DOMAIN, SERVICE_SEND_COMMAND
)
common.send_command(
self.hass,
entity_id="entity_id_val",
device="test_device",
command=["test_command"],
num_repeats="4",
delay_secs="0.6",
)
self.hass.block_till_done()
assert len(send_command_calls) == 1
call = send_command_calls[-1]
assert call.domain == remote.DOMAIN
assert call.service == SERVICE_SEND_COMMAND
assert call.data[ATTR_ENTITY_ID] == "entity_id_val"
def test_learn_command(self):
"""Test learn_command."""
learn_command_calls = mock_service(
self.hass, remote.DOMAIN, SERVICE_LEARN_COMMAND
)
common.learn_command(
self.hass,
entity_id="entity_id_val",
device="test_device",
command=["test_command"],
alternative=True,
timeout=20,
)
self.hass.block_till_done()
assert len(learn_command_calls) == 1
call = learn_command_calls[-1]
assert call.domain == remote.DOMAIN
assert call.service == SERVICE_LEARN_COMMAND
assert call.data[ATTR_ENTITY_ID] == "entity_id_val"
|
apache-2.0
|
nolanliou/tensorflow
|
tensorflow/python/kernel_tests/zero_division_test.py
|
139
|
2389
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for integer division by zero."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import test
class ZeroDivisionTest(test.TestCase):
def testZeros(self):
with self.test_session(use_gpu=True):
for dtype in dtypes.uint8, dtypes.int16, dtypes.int32, dtypes.int64:
zero = constant_op.constant(0, dtype=dtype)
one = constant_op.constant(1, dtype=dtype)
bads = [one // zero]
if dtype in (dtypes.int32, dtypes.int64):
bads.append(one % zero)
for bad in bads:
try:
result = bad.eval()
except errors_impl.OpError as e:
# Ideally, we'd get a nice exception. In theory, this should only
# happen on CPU, but 32 bit integer GPU division is actually on
# CPU due to a placer bug.
# TODO(irving): Make stricter once the placer bug is fixed.
self.assertIn('Integer division by zero', str(e))
else:
# On the GPU, integer division by zero produces all bits set.
# But apparently on some GPUs "all bits set" for 64 bit division
# means 32 bits set, so we allow 0xffffffff as well. This isn't
# very portable, so we may need to expand this list if other GPUs
# do different things.
self.assertTrue(test.is_gpu_available())
self.assertIn(result, (-1, 0xff, 0xffffffff))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
mush42/mezzanine
|
mezzanine/blog/tests.py
|
39
|
2373
|
from __future__ import unicode_literals
from unittest import skipUnless
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from django.core.urlresolvers import reverse
from mezzanine.blog.models import BlogPost
from mezzanine.conf import settings
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from mezzanine.pages.models import Page, RichTextPage
from mezzanine.utils.tests import TestCase
class BlogTests(TestCase):
def test_blog_views(self):
"""
Basic status code test for blog views.
"""
response = self.client.get(reverse("blog_post_list"))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse("blog_post_feed", args=("rss",)))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse("blog_post_feed", args=("atom",)))
self.assertEqual(response.status_code, 200)
blog_post = BlogPost.objects.create(title="Post", user=self._user,
status=CONTENT_STATUS_PUBLISHED)
response = self.client.get(blog_post.get_absolute_url())
self.assertEqual(response.status_code, 200)
@skipUnless("mezzanine.accounts" in settings.INSTALLED_APPS and
"mezzanine.pages" in settings.INSTALLED_APPS,
"accounts and pages apps required")
def test_login_protected_blog(self):
"""
Test the blog is login protected if its page has login_required
set to True.
"""
slug = settings.BLOG_SLUG or "/"
RichTextPage.objects.create(title="blog", slug=slug,
login_required=True)
response = self.client.get(reverse("blog_post_list"), follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.redirect_chain) > 0)
redirect_path = urlparse(response.redirect_chain[0][0]).path
self.assertEqual(redirect_path, settings.LOGIN_URL)
def test_blog_post_list_can_use_any_page_type(self):
"""Test that the blog post list can use any Page type."""
slug = settings.BLOG_SLUG or "/"
Page.objects.create(title="blog", slug=slug)
response = self.client.get(reverse("blog_post_list"))
self.assertEqual(response.status_code, 200)
|
bsd-2-clause
|
intake/filesystem_spec
|
fsspec/implementations/tests/test_ftp.py
|
1
|
3434
|
import os
import subprocess
import sys
import time
import pytest
import fsspec
from fsspec import open_files
from fsspec.implementations.ftp import FTPFileSystem
here = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def ftp():
pytest.importorskip("pyftpdlib")
P = subprocess.Popen(
[sys.executable, "-m", "pyftpdlib", "-d", here],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
try:
time.sleep(1)
yield "localhost", 2121
finally:
P.terminate()
P.wait()
def test_basic(ftp):
host, port = ftp
fs = FTPFileSystem(host, port)
assert fs.ls("/", detail=False) == sorted(os.listdir(here))
out = fs.cat("/" + os.path.basename(__file__))
assert out == open(__file__, "rb").read()
def test_not_cached(ftp):
host, port = ftp
fs = FTPFileSystem(host, port)
fs2 = FTPFileSystem(host, port)
assert fs is not fs2
@pytest.mark.parametrize("cache_type", ["bytes", "mmap"])
def test_complex(ftp_writable, cache_type):
from fsspec.core import BytesCache
host, port, user, pw = ftp_writable
files = open_files(
"ftp:///ou*",
host=host,
port=port,
username=user,
password=pw,
block_size=10000,
cache_type=cache_type,
)
assert len(files) == 1
with files[0] as fo:
assert fo.read(10) == b"hellohello"
if isinstance(fo.cache, BytesCache):
assert len(fo.cache.cache) == 10010
assert fo.read(2) == b"he"
assert fo.tell() == 12
def test_write_small(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
with fs.open("/out2", "wb") as f:
f.write(b"oi")
assert fs.cat("/out2") == b"oi"
def test_with_url(ftp_writable):
host, port, user, pw = ftp_writable
fo = fsspec.open("ftp://{}:{}@{}:{}/out".format(user, pw, host, port), "wb")
with fo as f:
f.write(b"hello")
fo = fsspec.open("ftp://{}:{}@{}:{}/out".format(user, pw, host, port), "rb")
with fo as f:
assert f.read() == b"hello"
@pytest.mark.parametrize("cache_type", ["bytes", "mmap"])
def test_write_big(ftp_writable, cache_type):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw, block_size=1000, cache_type=cache_type)
fn = "/bigger"
with fs.open(fn, "wb") as f:
f.write(b"o" * 500)
assert not fs.exists(fn)
f.write(b"o" * 1000)
fs.invalidate_cache()
assert fs.exists(fn)
f.write(b"o" * 200)
f.flush()
assert fs.info(fn)["size"] == 1700
assert fs.cat(fn) == b"o" * 1700
def test_transaction(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
fs.mkdir("/tmp")
fn = "/tr"
with fs.transaction:
with fs.open(fn, "wb") as f:
f.write(b"not")
assert not fs.exists(fn)
assert fs.exists(fn)
assert fs.cat(fn) == b"not"
fs.rm(fn)
assert not fs.exists(fn)
def test_transaction_with_cache(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
fs.mkdir("/tmp")
fs.mkdir("/tmp/dir")
assert "dir" in fs.ls("/tmp", detail=False)
with fs.transaction:
fs.rmdir("/tmp/dir")
assert "dir" not in fs.ls("/tmp", detail=False)
assert not fs.exists("/tmp/dir")
|
bsd-3-clause
|
endlessm/chromium-browser
|
tools/metrics/histograms/update_use_counter_css.py
|
10
|
2761
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Scans the Chromium source of UseCounter, formats the Feature enum for
histograms.xml and merges it. This script can also generate a python code
snippet to put in uma.py of Chromium Dashboard. Make sure that you review the
output for correctness.
"""
import optparse
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import path_util
import update_histogram_enum
import update_use_counter_feature_enum
USE_COUNTER_MOJOM_PATH = 'third_party/blink/public/mojom/use_counter/'\
'css_property_id.mojom'
def EnumToCssProperty(enum_name):
"""Converts a camel cased enum name to the lower case CSS property."""
# The first group also searches for uppercase letters to account for single
# uppercase letters, such as in "ZIndex" that need to convert to "z-index".
# Special case total page measured for backward compat.
if enum_name == "TotalPagesMeasured":
return "Total Pages Measured"
return re.sub(r'([a-zA-Z])([A-Z])', r'\1-\2', enum_name).lower()
def ReadCssProperties(filename):
# Read the file as a list of lines
with open(path_util.GetInputFile(filename)) as f:
content = f.readlines()
# Looking for a single line like "kFontWeight = 10,"
ENUM_REGEX = re.compile(r"""k(\w+)\s*= # capture the enum name
\s*(\d+),? # capture the id
""", re.VERBOSE)
properties = {}
for line in content:
enum_match = ENUM_REGEX.search(line)
if enum_match:
enum_name = enum_match.group(1)
property_id = int(enum_match.group(2))
# Properties with id = 0 are invalid. Skip them.
if property_id == 0:
continue
properties[property_id] = EnumToCssProperty(enum_name)
return properties
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('--for-dashboard', action='store_true', dest='dashboard',
default=False,
help='Print enum definition formatted for use in uma.py of '
'Chromium dashboard developed at '
'https://github.com/GoogleChrome/chromium-dashboard')
options, args = parser.parse_args()
if options.dashboard:
enum_dict = ReadCssProperties(USE_COUNTER_MOJOM_PATH)
update_use_counter_feature_enum.PrintEnumForDashboard(enum_dict)
else:
update_histogram_enum.UpdateHistogramFromDict(
'MappedCSSProperties', ReadCssProperties(USE_COUNTER_MOJOM_PATH),
USE_COUNTER_MOJOM_PATH, os.path.basename(__file__))
|
bsd-3-clause
|
bdh1011/wau
|
venv/lib/python2.7/site-packages/celery/events/dumper.py
|
8
|
3285
|
# -*- coding: utf-8 -*-
"""
celery.events.dumper
~~~~~~~~~~~~~~~~~~~~
This is a simple program that dumps events to the console
as they happen. Think of it like a `tcpdump` for Celery events.
"""
from __future__ import absolute_import, print_function
import sys
from datetime import datetime
from celery.app import app_or_default
from celery.utils.functional import LRUCache
from celery.utils.timeutils import humanize_seconds
__all__ = ['Dumper', 'evdump']
TASK_NAMES = LRUCache(limit=0xFFF)
HUMAN_TYPES = {'worker-offline': 'shutdown',
'worker-online': 'started',
'worker-heartbeat': 'heartbeat'}
CONNECTION_ERROR = """\
-> Cannot connect to %s: %s.
Trying again %s
"""
def humanize_type(type):
try:
return HUMAN_TYPES[type.lower()]
except KeyError:
return type.lower().replace('-', ' ')
class Dumper(object):
def __init__(self, out=sys.stdout):
self.out = out
def say(self, msg):
print(msg, file=self.out)
# need to flush so that output can be piped.
try:
self.out.flush()
except AttributeError:
pass
def on_event(self, ev):
timestamp = datetime.utcfromtimestamp(ev.pop('timestamp'))
type = ev.pop('type').lower()
hostname = ev.pop('hostname')
if type.startswith('task-'):
uuid = ev.pop('uuid')
if type in ('task-received', 'task-sent'):
task = TASK_NAMES[uuid] = '{0}({1}) args={2} kwargs={3}' \
.format(ev.pop('name'), uuid,
ev.pop('args'),
ev.pop('kwargs'))
else:
task = TASK_NAMES.get(uuid, '')
return self.format_task_event(hostname, timestamp,
type, task, ev)
fields = ', '.join(
'{0}={1}'.format(key, ev[key]) for key in sorted(ev)
)
sep = fields and ':' or ''
self.say('{0} [{1}] {2}{3} {4}'.format(
hostname, timestamp, humanize_type(type), sep, fields),
)
def format_task_event(self, hostname, timestamp, type, task, event):
fields = ', '.join(
'{0}={1}'.format(key, event[key]) for key in sorted(event)
)
sep = fields and ':' or ''
self.say('{0} [{1}] {2}{3} {4} {5}'.format(
hostname, timestamp, humanize_type(type), sep, task, fields),
)
def evdump(app=None, out=sys.stdout):
app = app_or_default(app)
dumper = Dumper(out=out)
dumper.say('-> evdump: starting capture...')
conn = app.connection().clone()
def _error_handler(exc, interval):
dumper.say(CONNECTION_ERROR % (
conn.as_uri(), exc, humanize_seconds(interval, 'in', ' ')
))
while 1:
try:
conn.ensure_connection(_error_handler)
recv = app.events.Receiver(conn, handlers={'*': dumper.on_event})
recv.capture()
except (KeyboardInterrupt, SystemExit):
return conn and conn.close()
except conn.connection_errors + conn.channel_errors:
dumper.say('-> Connection lost, attempting reconnect')
if __name__ == '__main__': # pragma: no cover
evdump()
|
mit
|
LBenzahia/cltk
|
cltk/stop/old_english/stops.py
|
2
|
5395
|
"""This stopword list is adapted from the
Introduction to Old English website at ``https://lrc.la.utexas.edu/eieol/engol``.
"""
__author__ = ['Sourav Singh <ssouravsingh12@gmail.com>']
__license__ = 'GPL License.'
STOPS_LIST = ['and',
'on',
'þonne',
'wið',
'to',
'þæt',
'þe',
'ne',
'ic',
'me',
'heo',
'him',
'he',
'swa'
'þis'
'mid'
'þu'
'ofer'
'his'
'þriwa'
'seo'
'hit'
'se'
'þas'
'cweð'
'þæs'
'in'
'sy'
'ða'
'ðy'
'ær'
'ðonne'
'næfre'
'þone'
'ge'
'ðas'
'þære'
'þam'
'is'
'of'
'gif'
'þæm'
'nu'
'under'
'wiþ'
'geond'
'æfter'
'ðis'
'do'
'hwæt'
'her'
'þurh'
'þus'
'lytel'
'æt'
'ðin'
'willian'
'cume'
'þeos'
'þara'
'are'
'cuman'
'com'
'ænig'
'þon'
'for'
'us'
'ac'
'bot'
'ende'
'wæs',
'wǣre',
'wes',
'wǣron',
'wǣren',
'wesað',
'ic',
'wit',
'wē',
'mīn',
'uncer',
'ūser',
'ūre',
'mē',
'unc',
'ūs',
'mec',
'uncit',
'ūsic',
'ðū',
'git',
'gē',
'ðīn',
'incer',
'ēower',
'ēowre',
'ðē',
'inc',
'ēow',
'ðec',
'incit',
'ēowic',
'hē',
'hēo',
'hīe',
'hit',
'hyt',
'hī',
'hȳ',
'hire',
'hira',
'heora',
'hiera',
'heom',
'hine',
'nǣr',
'nǣfre',
'nǣnig',
'nolde',
'noldon',
'be',
'beforan',
'betweox',
'for',
'from',
'fram',
'mid',
'tō',
'geond',
'oð',
'þurh',
'ofer',
'under',
'bēo',
'bist',
'biþ',
'bēoþ',
'bēon',
'ēom',
'sīe',
'eart',
'sī',
'is',
'sēo',
'sindon',
'sint',
'nēom',
'neart',
'nis',
'sīo',
'ðæt',
'tæt',
'ðæs',
'ðǣre',
'ðǣm',
'ðām',
'ðone',
'ðā',
'ðȳ',
'ðē',
'ðon',
'ðāra',
'ðǣra',
'ðes',
'ðēos',
'ðisse',
'ðeosse',
'ðises',
'ðisses',
'ðisum',
'ðissum',
'ðisne',
'ðās',
'ðīs',
'ðȳs',
'ðissa',
'ðeossa',
'ðeosum',
'ðeossum',
'twēgen',
'twā',
'tū',
'twēgra',
'twǣm',
'þrīe',
'þrēo',
'þrēora',
'þrīm',
'endlefan',
'twelf',
'twēntig',
'þrēotīene',
'þrītig',
'fēower',
'fēowertīene',
'fēowertig',
'fīf',
'fīftīene',
'fīftig',
'siex',
'siextīene',
'siextig',
'seofon',
'seofontīene',
'seofontig',
'eahta',
'eahtatīene',
'eahtatig',
'nigon',
'nigontīene',
'nigontig',
'tīen',
'hund',
'gā',
'gǣst',
'gǣð',
'gāð',
'gān',
'gānde',
'gangende',
'gegān',
'ēode',
'ēodest',
'ēodon',
'ēoden']
|
mit
|
0111001101111010/open-health-inspection-api
|
venv/lib/python2.7/site-packages/pip/_vendor/distlib/markers.py
|
1261
|
6282
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Parser for the environment markers micro-language defined in PEP 345."""
import ast
import os
import sys
import platform
from .compat import python_implementation, string_types
from .util import in_venv
__all__ = ['interpret']
class Evaluator(object):
"""
A limited evaluator for Python expressions.
"""
operators = {
'eq': lambda x, y: x == y,
'gt': lambda x, y: x > y,
'gte': lambda x, y: x >= y,
'in': lambda x, y: x in y,
'lt': lambda x, y: x < y,
'lte': lambda x, y: x <= y,
'not': lambda x: not x,
'noteq': lambda x, y: x != y,
'notin': lambda x, y: x not in y,
}
allowed_values = {
'sys_platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os_name': os.name,
'platform_in_venv': str(in_venv()),
'platform_release': platform.release(),
'platform_version': platform.version(),
'platform_machine': platform.machine(),
'platform_python_implementation': python_implementation(),
}
def __init__(self, context=None):
"""
Initialise an instance.
:param context: If specified, names are looked up in this mapping.
"""
self.context = context or {}
self.source = None
def get_fragment(self, offset):
"""
Get the part of the source which is causing a problem.
"""
fragment_len = 10
s = '%r' % (self.source[offset:offset + fragment_len])
if offset + fragment_len < len(self.source):
s += '...'
return s
def get_handler(self, node_type):
"""
Get a handler for the specified AST node type.
"""
return getattr(self, 'do_%s' % node_type, None)
def evaluate(self, node, filename=None):
"""
Evaluate a source string or node, using ``filename`` when
displaying errors.
"""
if isinstance(node, string_types):
self.source = node
kwargs = {'mode': 'eval'}
if filename:
kwargs['filename'] = filename
try:
node = ast.parse(node, **kwargs)
except SyntaxError as e:
s = self.get_fragment(e.offset)
raise SyntaxError('syntax error %s' % s)
node_type = node.__class__.__name__.lower()
handler = self.get_handler(node_type)
if handler is None:
if self.source is None:
s = '(source not available)'
else:
s = self.get_fragment(node.col_offset)
raise SyntaxError("don't know how to evaluate %r %s" % (
node_type, s))
return handler(node)
def get_attr_key(self, node):
assert isinstance(node, ast.Attribute), 'attribute node expected'
return '%s.%s' % (node.value.id, node.attr)
def do_attribute(self, node):
if not isinstance(node.value, ast.Name):
valid = False
else:
key = self.get_attr_key(node)
valid = key in self.context or key in self.allowed_values
if not valid:
raise SyntaxError('invalid expression: %s' % key)
if key in self.context:
result = self.context[key]
else:
result = self.allowed_values[key]
return result
def do_boolop(self, node):
result = self.evaluate(node.values[0])
is_or = node.op.__class__ is ast.Or
is_and = node.op.__class__ is ast.And
assert is_or or is_and
if (is_and and result) or (is_or and not result):
for n in node.values[1:]:
result = self.evaluate(n)
if (is_or and result) or (is_and and not result):
break
return result
def do_compare(self, node):
def sanity_check(lhsnode, rhsnode):
valid = True
if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str):
valid = False
#elif (isinstance(lhsnode, ast.Attribute)
# and isinstance(rhsnode, ast.Attribute)):
# klhs = self.get_attr_key(lhsnode)
# krhs = self.get_attr_key(rhsnode)
# valid = klhs != krhs
if not valid:
s = self.get_fragment(node.col_offset)
raise SyntaxError('Invalid comparison: %s' % s)
lhsnode = node.left
lhs = self.evaluate(lhsnode)
result = True
for op, rhsnode in zip(node.ops, node.comparators):
sanity_check(lhsnode, rhsnode)
op = op.__class__.__name__.lower()
if op not in self.operators:
raise SyntaxError('unsupported operation: %r' % op)
rhs = self.evaluate(rhsnode)
result = self.operators[op](lhs, rhs)
if not result:
break
lhs = rhs
lhsnode = rhsnode
return result
def do_expression(self, node):
return self.evaluate(node.body)
def do_name(self, node):
valid = False
if node.id in self.context:
valid = True
result = self.context[node.id]
elif node.id in self.allowed_values:
valid = True
result = self.allowed_values[node.id]
if not valid:
raise SyntaxError('invalid expression: %s' % node.id)
return result
def do_str(self, node):
return node.s
def interpret(marker, execution_context=None):
"""
Interpret a marker and return a result depending on environment.
:param marker: The marker to interpret.
:type marker: str
:param execution_context: The context used for name lookup.
:type execution_context: mapping
"""
return Evaluator(execution_context).evaluate(marker.strip())
|
gpl-2.0
|
FNCS/ns-3.24
|
src/topology-read/bindings/modulegen__gcc_LP64.py
|
28
|
172229
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.topology_read', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper [class]
module.add_class('TopologyReaderHelper')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## topology-reader.h (module 'topology-read'): ns3::TopologyReader [class]
module.add_class('TopologyReader', parent=root_module['ns3::Object'])
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link [class]
module.add_class('Link', outer_class=root_module['ns3::TopologyReader'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## inet-topology-reader.h (module 'topology-read'): ns3::InetTopologyReader [class]
module.add_class('InetTopologyReader', parent=root_module['ns3::TopologyReader'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## orbis-topology-reader.h (module 'topology-read'): ns3::OrbisTopologyReader [class]
module.add_class('OrbisTopologyReader', parent=root_module['ns3::TopologyReader'])
## rocketfuel-topology-reader.h (module 'topology-read'): ns3::RocketfuelTopologyReader [class]
module.add_class('RocketfuelTopologyReader', parent=root_module['ns3::TopologyReader'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_container('std::map< std::string, std::string >', ('std::string', 'std::string'), container_type=u'map')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&')
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TopologyReaderHelper_methods(root_module, root_module['ns3::TopologyReaderHelper'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TopologyReader_methods(root_module, root_module['ns3::TopologyReader'])
register_Ns3TopologyReaderLink_methods(root_module, root_module['ns3::TopologyReader::Link'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3InetTopologyReader_methods(root_module, root_module['ns3::InetTopologyReader'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3OrbisTopologyReader_methods(root_module, root_module['ns3::OrbisTopologyReader'])
register_Ns3RocketfuelTopologyReader_methods(root_module, root_module['ns3::RocketfuelTopologyReader'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TopologyReaderHelper_methods(root_module, cls):
## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper::TopologyReaderHelper(ns3::TopologyReaderHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TopologyReaderHelper const &', 'arg0')])
## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper::TopologyReaderHelper() [constructor]
cls.add_constructor([])
## topology-reader-helper.h (module 'topology-read'): ns3::Ptr<ns3::TopologyReader> ns3::TopologyReaderHelper::GetTopologyReader() [member function]
cls.add_method('GetTopologyReader',
'ns3::Ptr< ns3::TopologyReader >',
[])
## topology-reader-helper.h (module 'topology-read'): void ns3::TopologyReaderHelper::SetFileName(std::string const fileName) [member function]
cls.add_method('SetFileName',
'void',
[param('std::string const', 'fileName')])
## topology-reader-helper.h (module 'topology-read'): void ns3::TopologyReaderHelper::SetFileType(std::string const fileType) [member function]
cls.add_method('SetFileType',
'void',
[param('std::string const', 'fileType')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TopologyReader_methods(root_module, cls):
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::TopologyReader() [constructor]
cls.add_constructor([])
## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::AddLink(ns3::TopologyReader::Link link) [member function]
cls.add_method('AddLink',
'void',
[param('ns3::TopologyReader::Link', 'link')])
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::GetFileName() const [member function]
cls.add_method('GetFileName',
'std::string',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::TopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## topology-reader.h (module 'topology-read'): std::_List_const_iterator<ns3::TopologyReader::Link> ns3::TopologyReader::LinksBegin() const [member function]
cls.add_method('LinksBegin',
'std::_List_const_iterator< ns3::TopologyReader::Link >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): bool ns3::TopologyReader::LinksEmpty() const [member function]
cls.add_method('LinksEmpty',
'bool',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): std::_List_const_iterator<ns3::TopologyReader::Link> ns3::TopologyReader::LinksEnd() const [member function]
cls.add_method('LinksEnd',
'std::_List_const_iterator< ns3::TopologyReader::Link >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): int ns3::TopologyReader::LinksSize() const [member function]
cls.add_method('LinksSize',
'int',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::TopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_pure_virtual=True, is_virtual=True)
## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::SetFileName(std::string const & fileName) [member function]
cls.add_method('SetFileName',
'void',
[param('std::string const &', 'fileName')])
return
def register_Ns3TopologyReaderLink_methods(root_module, cls):
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link::Link(ns3::TopologyReader::Link const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TopologyReader::Link const &', 'arg0')])
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link::Link(ns3::Ptr<ns3::Node> fromPtr, std::string const & fromName, ns3::Ptr<ns3::Node> toPtr, std::string const & toName) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'fromPtr'), param('std::string const &', 'fromName'), param('ns3::Ptr< ns3::Node >', 'toPtr'), param('std::string const &', 'toName')])
## topology-reader.h (module 'topology-read'): std::_Rb_tree_const_iterator<std::pair<const std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::basic_string<char, std::char_traits<char>, std::allocator<char> > > > ns3::TopologyReader::Link::AttributesBegin() const [member function]
cls.add_method('AttributesBegin',
'std::_Rb_tree_const_iterator< std::pair< std::basic_string< char, std::char_traits< char >, std::allocator< char > > const, std::basic_string< char, std::char_traits< char >, std::allocator< char > > > >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): std::_Rb_tree_const_iterator<std::pair<const std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::basic_string<char, std::char_traits<char>, std::allocator<char> > > > ns3::TopologyReader::Link::AttributesEnd() const [member function]
cls.add_method('AttributesEnd',
'std::_Rb_tree_const_iterator< std::pair< std::basic_string< char, std::char_traits< char >, std::allocator< char > > const, std::basic_string< char, std::char_traits< char >, std::allocator< char > > > >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetAttribute(std::string const & name) const [member function]
cls.add_method('GetAttribute',
'std::string',
[param('std::string const &', 'name')],
is_const=True)
## topology-reader.h (module 'topology-read'): bool ns3::TopologyReader::Link::GetAttributeFailSafe(std::string const & name, std::string & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string const &', 'name'), param('std::string &', 'value')],
is_const=True)
## topology-reader.h (module 'topology-read'): ns3::Ptr<ns3::Node> ns3::TopologyReader::Link::GetFromNode() const [member function]
cls.add_method('GetFromNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetFromNodeName() const [member function]
cls.add_method('GetFromNodeName',
'std::string',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): ns3::Ptr<ns3::Node> ns3::TopologyReader::Link::GetToNode() const [member function]
cls.add_method('GetToNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetToNodeName() const [member function]
cls.add_method('GetToNodeName',
'std::string',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::Link::SetAttribute(std::string const & name, std::string const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string const &', 'name'), param('std::string const &', 'value')])
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3InetTopologyReader_methods(root_module, cls):
## inet-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::InetTopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## inet-topology-reader.h (module 'topology-read'): ns3::InetTopologyReader::InetTopologyReader() [constructor]
cls.add_constructor([])
## inet-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::InetTopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3OrbisTopologyReader_methods(root_module, cls):
## orbis-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::OrbisTopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## orbis-topology-reader.h (module 'topology-read'): ns3::OrbisTopologyReader::OrbisTopologyReader() [constructor]
cls.add_constructor([])
## orbis-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::OrbisTopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_virtual=True)
return
def register_Ns3RocketfuelTopologyReader_methods(root_module, cls):
## rocketfuel-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::RocketfuelTopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## rocketfuel-topology-reader.h (module 'topology-read'): ns3::RocketfuelTopologyReader::RocketfuelTopologyReader() [constructor]
cls.add_constructor([])
## rocketfuel-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::RocketfuelTopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_virtual=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
gpl-2.0
|
bratsche/Neutron-Drive
|
google_appengine/lib/django_1_2/django/contrib/localflavor/za/forms.py
|
273
|
1908
|
"""
South Africa-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField
from django.utils.checksums import luhn
from django.utils.translation import gettext as _
import re
from datetime import date
id_re = re.compile(r'^(?P<yy>\d\d)(?P<mm>\d\d)(?P<dd>\d\d)(?P<mid>\d{4})(?P<end>\d{3})')
class ZAIDField(Field):
"""A form field for South African ID numbers -- the checksum is validated
using the Luhn checksum, and uses a simlistic (read: not entirely accurate)
check for the birthdate
"""
default_error_messages = {
'invalid': _(u'Enter a valid South African ID number'),
}
def clean(self, value):
super(ZAIDField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# strip spaces and dashes
value = value.strip().replace(' ', '').replace('-', '')
match = re.match(id_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
g = match.groupdict()
try:
# The year 2000 is conveniently a leapyear.
# This algorithm will break in xx00 years which aren't leap years
# There is no way to guess the century of a ZA ID number
d = date(int(g['yy']) + 2000, int(g['mm']), int(g['dd']))
except ValueError:
raise ValidationError(self.error_messages['invalid'])
if not luhn(value):
raise ValidationError(self.error_messages['invalid'])
return value
class ZAPostCodeField(RegexField):
default_error_messages = {
'invalid': _(u'Enter a valid South African postal code'),
}
def __init__(self, *args, **kwargs):
super(ZAPostCodeField, self).__init__(r'^\d{4}$',
max_length=None, min_length=None, *args, **kwargs)
|
bsd-3-clause
|
google/importlab
|
importlab/import_finder.py
|
1
|
5424
|
# NOTE: Do not add any dependencies to this file - it needs to be run in a
# subprocess by a python version that might not have any installed packages,
# including importlab itself.
from __future__ import print_function
import ast
import json
import os
import sys
# Pytype doesn't recognize the `major` attribute:
# https://github.com/google/pytype/issues/127.
if sys.version_info[0] >= 3:
# Note that `import importlib` does not work: accessing `importlib.util`
# will give an attribute error. This is hard to reproduce in a unit test but
# can be seen by installing importlab in a Python 3 environment and running
# `importlab --tree --trim` on a file that imports one of:
# * jsonschema (`pip install jsonschema`)
# * pytype (`pip install pytype`),
# * dotenv (`pip install python-dotenv`)
# * IPython (`pip install ipython`)
# A correct output will look like:
# Reading 1 files
# Source tree:
# + foo.py
# :: jsonschema/__init__.py
# An incorrect output will be missing the line with the import.
import importlib.util
else:
import imp
class ImportFinder(ast.NodeVisitor):
"""Walk an AST collecting import statements."""
def __init__(self):
# tuples of (name, alias, is_from, is_star)
self.imports = []
def visit_Import(self, node):
for alias in node.names:
self.imports.append((alias.name, alias.asname, False, False))
def visit_ImportFrom(self, node):
module_name = '.'*node.level + (node.module or '')
for alias in node.names:
if alias.name == '*':
self.imports.append((module_name, alias.asname, True, True))
else:
if not module_name.endswith('.'):
module_name = module_name + '.'
name = module_name + alias.name
asname = alias.asname or alias.name
self.imports.append((name, asname, True, False))
def _find_package(parts):
"""Helper function for _resolve_import_versioned."""
for i in range(len(parts), 0, -1):
prefix = '.'.join(parts[0:i])
if prefix in sys.modules:
return i, sys.modules[prefix]
return 0, None
def is_builtin(name):
return name in sys.builtin_module_names or name.startswith("__future__")
# Pytype doesn't recognize the `major` attribute:
# https://github.com/google/pytype/issues/127.
if sys.version_info[0] < 3:
def _resolve_import_versioned(name):
"""Python 2 helper function for resolve_import."""
parts = name.split('.')
i, mod = _find_package(parts)
if mod:
if hasattr(mod, '__file__'):
path = [os.path.dirname(mod.__file__)]
elif hasattr(mod, '__path__'):
path = mod.__path__
else:
path = None
else:
path = None
for part in parts[i:]:
try:
if path:
spec = imp.find_module(part, [path])
else:
spec = imp.find_module(part)
except ImportError:
return None
path = spec[1]
return path
else:
def _resolve_import_versioned(name):
"""Python 3 helper function for resolve_import."""
try:
spec = importlib.util.find_spec(name)
return spec and spec.origin
except Exception:
# find_spec may re-raise an arbitrary exception encountered while
# inspecting a module. Since we aren't able to get the file path in
# this case, we consider the import unresolved.
return None
def _resolve_import(name):
"""Helper function for resolve_import."""
if name in sys.modules:
return getattr(sys.modules[name], '__file__', name + '.so')
return _resolve_import_versioned(name)
def resolve_import(name, is_from, is_star):
"""Use python to resolve an import.
Args:
name: The fully qualified module name.
Returns:
The path to the module source file or None.
"""
# Don't try to resolve relative imports or builtins here; they will be
# handled by resolve.Resolver
if name.startswith('.') or is_builtin(name):
return None
ret = _resolve_import(name)
if ret is None and is_from and not is_star:
package, _ = name.rsplit('.', 1)
ret = _resolve_import(package)
return ret
def get_imports(filename):
"""Get all the imports in a file.
Each import is a tuple of:
(name, alias, is_from, is_star, source_file)
"""
with open(filename, "rb") as f:
src = f.read()
finder = ImportFinder()
finder.visit(ast.parse(src, filename=filename))
imports = []
for i in finder.imports:
name, _, is_from, is_star = i
imports.append(i + (resolve_import(name, is_from, is_star),))
return imports
def print_imports(filename):
"""Print imports in csv format to stdout."""
print(json.dumps(get_imports(filename)))
def read_imports(imports_str):
"""Print imports in csv format to stdout."""
return json.loads(imports_str)
if __name__ == "__main__":
# This is used to parse a file with a different python version, launching a
# subprocess and communicating with it via reading stdout.
filename = sys.argv[1]
print_imports(filename)
|
apache-2.0
|
shakalaca/ASUS_ZenFone_A450CG
|
external/chromium_org/third_party/mesa/src/src/gallium/drivers/svga/svgadump/svga_dump.py
|
50
|
11879
|
#!/usr/bin/env python
'''
Generates dumper for the SVGA 3D command stream using pygccxml.
Jose Fonseca <jfonseca@vmware.com>
'''
copyright = '''
/**********************************************************
* Copyright 2009 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
'''
import os
import sys
from pygccxml import parser
from pygccxml import declarations
from pygccxml.declarations import algorithm
from pygccxml.declarations import decl_visitor
from pygccxml.declarations import type_traits
from pygccxml.declarations import type_visitor
enums = True
class decl_dumper_t(decl_visitor.decl_visitor_t):
def __init__(self, instance = '', decl = None):
decl_visitor.decl_visitor_t.__init__(self)
self._instance = instance
self.decl = decl
def clone(self):
return decl_dumper_t(self._instance, self.decl)
def visit_class(self):
class_ = self.decl
assert self.decl.class_type in ('struct', 'union')
for variable in class_.variables():
if variable.name != '':
#print 'variable = %r' % variable.name
dump_type(self._instance + '.' + variable.name, variable.type)
def visit_enumeration(self):
if enums:
print ' switch(%s) {' % ("(*cmd)" + self._instance,)
for name, value in self.decl.values:
print ' case %s:' % (name,)
print ' _debug_printf("\\t\\t%s = %s\\n");' % (self._instance, name)
print ' break;'
print ' default:'
print ' _debug_printf("\\t\\t%s = %%i\\n", %s);' % (self._instance, "(*cmd)" + self._instance)
print ' break;'
print ' }'
else:
print ' _debug_printf("\\t\\t%s = %%i\\n", %s);' % (self._instance, "(*cmd)" + self._instance)
def dump_decl(instance, decl):
dumper = decl_dumper_t(instance, decl)
algorithm.apply_visitor(dumper, decl)
class type_dumper_t(type_visitor.type_visitor_t):
def __init__(self, instance, type_):
type_visitor.type_visitor_t.__init__(self)
self.instance = instance
self.type = type_
def clone(self):
return type_dumper_t(self.instance, self.type)
def visit_char(self):
self.print_instance('%i')
def visit_unsigned_char(self):
self.print_instance('%u')
def visit_signed_char(self):
self.print_instance('%i')
def visit_wchar(self):
self.print_instance('%i')
def visit_short_int(self):
self.print_instance('%i')
def visit_short_unsigned_int(self):
self.print_instance('%u')
def visit_bool(self):
self.print_instance('%i')
def visit_int(self):
self.print_instance('%i')
def visit_unsigned_int(self):
self.print_instance('%u')
def visit_long_int(self):
self.print_instance('%li')
def visit_long_unsigned_int(self):
self.print_instance('%lu')
def visit_long_long_int(self):
self.print_instance('%lli')
def visit_long_long_unsigned_int(self):
self.print_instance('%llu')
def visit_float(self):
self.print_instance('%f')
def visit_double(self):
self.print_instance('%f')
def visit_array(self):
for i in range(type_traits.array_size(self.type)):
dump_type(self.instance + '[%i]' % i, type_traits.base_type(self.type))
def visit_pointer(self):
self.print_instance('%p')
def visit_declarated(self):
#print 'decl = %r' % self.type.decl_string
decl = type_traits.remove_declarated(self.type)
dump_decl(self.instance, decl)
def print_instance(self, format):
print ' _debug_printf("\\t\\t%s = %s\\n", %s);' % (self.instance, format, "(*cmd)" + self.instance)
def dump_type(instance, type_):
type_ = type_traits.remove_alias(type_)
visitor = type_dumper_t(instance, type_)
algorithm.apply_visitor(visitor, type_)
def dump_struct(decls, class_):
print 'static void'
print 'dump_%s(const %s *cmd)' % (class_.name, class_.name)
print '{'
dump_decl('', class_)
print '}'
print ''
cmds = [
('SVGA_3D_CMD_SURFACE_DEFINE', 'SVGA3dCmdDefineSurface', (), 'SVGA3dSize'),
('SVGA_3D_CMD_SURFACE_DESTROY', 'SVGA3dCmdDestroySurface', (), None),
('SVGA_3D_CMD_SURFACE_COPY', 'SVGA3dCmdSurfaceCopy', (), 'SVGA3dCopyBox'),
('SVGA_3D_CMD_SURFACE_STRETCHBLT', 'SVGA3dCmdSurfaceStretchBlt', (), None),
('SVGA_3D_CMD_SURFACE_DMA', 'SVGA3dCmdSurfaceDMA', (), 'SVGA3dCopyBox'),
('SVGA_3D_CMD_CONTEXT_DEFINE', 'SVGA3dCmdDefineContext', (), None),
('SVGA_3D_CMD_CONTEXT_DESTROY', 'SVGA3dCmdDestroyContext', (), None),
('SVGA_3D_CMD_SETTRANSFORM', 'SVGA3dCmdSetTransform', (), None),
('SVGA_3D_CMD_SETZRANGE', 'SVGA3dCmdSetZRange', (), None),
('SVGA_3D_CMD_SETRENDERSTATE', 'SVGA3dCmdSetRenderState', (), 'SVGA3dRenderState'),
('SVGA_3D_CMD_SETRENDERTARGET', 'SVGA3dCmdSetRenderTarget', (), None),
('SVGA_3D_CMD_SETTEXTURESTATE', 'SVGA3dCmdSetTextureState', (), 'SVGA3dTextureState'),
('SVGA_3D_CMD_SETMATERIAL', 'SVGA3dCmdSetMaterial', (), None),
('SVGA_3D_CMD_SETLIGHTDATA', 'SVGA3dCmdSetLightData', (), None),
('SVGA_3D_CMD_SETLIGHTENABLED', 'SVGA3dCmdSetLightEnabled', (), None),
('SVGA_3D_CMD_SETVIEWPORT', 'SVGA3dCmdSetViewport', (), None),
('SVGA_3D_CMD_SETCLIPPLANE', 'SVGA3dCmdSetClipPlane', (), None),
('SVGA_3D_CMD_CLEAR', 'SVGA3dCmdClear', (), 'SVGA3dRect'),
('SVGA_3D_CMD_PRESENT', 'SVGA3dCmdPresent', (), 'SVGA3dCopyRect'),
('SVGA_3D_CMD_SHADER_DEFINE', 'SVGA3dCmdDefineShader', (), None),
('SVGA_3D_CMD_SHADER_DESTROY', 'SVGA3dCmdDestroyShader', (), None),
('SVGA_3D_CMD_SET_SHADER', 'SVGA3dCmdSetShader', (), None),
('SVGA_3D_CMD_SET_SHADER_CONST', 'SVGA3dCmdSetShaderConst', (), None),
('SVGA_3D_CMD_DRAW_PRIMITIVES', 'SVGA3dCmdDrawPrimitives', (('SVGA3dVertexDecl', 'numVertexDecls'), ('SVGA3dPrimitiveRange', 'numRanges')), 'SVGA3dVertexDivisor'),
('SVGA_3D_CMD_SETSCISSORRECT', 'SVGA3dCmdSetScissorRect', (), None),
('SVGA_3D_CMD_BEGIN_QUERY', 'SVGA3dCmdBeginQuery', (), None),
('SVGA_3D_CMD_END_QUERY', 'SVGA3dCmdEndQuery', (), None),
('SVGA_3D_CMD_WAIT_FOR_QUERY', 'SVGA3dCmdWaitForQuery', (), None),
#('SVGA_3D_CMD_PRESENT_READBACK', None, (), None),
('SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN', 'SVGA3dCmdBlitSurfaceToScreen', (), 'SVGASignedRect'),
]
def dump_cmds():
print r'''
void
svga_dump_command(uint32_t cmd_id, const void *data, uint32_t size)
{
const uint8_t *body = (const uint8_t *)data;
const uint8_t *next = body + size;
'''
print ' switch(cmd_id) {'
indexes = 'ijklmn'
for id, header, body, footer in cmds:
print ' case %s:' % id
print ' _debug_printf("\\t%s\\n");' % id
print ' {'
print ' const %s *cmd = (const %s *)body;' % (header, header)
if len(body):
print ' unsigned ' + ', '.join(indexes[:len(body)]) + ';'
print ' dump_%s(cmd);' % header
print ' body = (const uint8_t *)&cmd[1];'
for i in range(len(body)):
struct, count = body[i]
idx = indexes[i]
print ' for(%s = 0; %s < cmd->%s; ++%s) {' % (idx, idx, count, idx)
print ' dump_%s((const %s *)body);' % (struct, struct)
print ' body += sizeof(%s);' % struct
print ' }'
if footer is not None:
print ' while(body + sizeof(%s) <= next) {' % footer
print ' dump_%s((const %s *)body);' % (footer, footer)
print ' body += sizeof(%s);' % footer
print ' }'
if id == 'SVGA_3D_CMD_SHADER_DEFINE':
print ' svga_shader_dump((const uint32_t *)body,'
print ' (unsigned)(next - body)/sizeof(uint32_t),'
print ' FALSE);'
print ' body = next;'
print ' }'
print ' break;'
print ' default:'
print ' _debug_printf("\\t0x%08x\\n", cmd_id);'
print ' break;'
print ' }'
print r'''
while(body + sizeof(uint32_t) <= next) {
_debug_printf("\t\t0x%08x\n", *(const uint32_t *)body);
body += sizeof(uint32_t);
}
while(body + sizeof(uint32_t) <= next)
_debug_printf("\t\t0x%02x\n", *body++);
}
'''
print r'''
void
svga_dump_commands(const void *commands, uint32_t size)
{
const uint8_t *next = commands;
const uint8_t *last = next + size;
assert(size % sizeof(uint32_t) == 0);
while(next < last) {
const uint32_t cmd_id = *(const uint32_t *)next;
if(SVGA_3D_CMD_BASE <= cmd_id && cmd_id < SVGA_3D_CMD_MAX) {
const SVGA3dCmdHeader *header = (const SVGA3dCmdHeader *)next;
const uint8_t *body = (const uint8_t *)&header[1];
next = body + header->size;
if(next > last)
break;
svga_dump_command(cmd_id, body, header->size);
}
else if(cmd_id == SVGA_CMD_FENCE) {
_debug_printf("\tSVGA_CMD_FENCE\n");
_debug_printf("\t\t0x%08x\n", ((const uint32_t *)next)[1]);
next += 2*sizeof(uint32_t);
}
else {
_debug_printf("\t0x%08x\n", cmd_id);
next += sizeof(uint32_t);
}
}
}
'''
def main():
print copyright.strip()
print
print '/**'
print ' * @file'
print ' * Dump SVGA commands.'
print ' *'
print ' * Generated automatically from svga3d_reg.h by svga_dump.py.'
print ' */'
print
print '#include "svga_types.h"'
print '#include "svga_shader_dump.h"'
print '#include "svga3d_reg.h"'
print
print '#include "util/u_debug.h"'
print '#include "svga_dump.h"'
print
config = parser.config_t(
include_paths = ['../../../include', '../include'],
compiler = 'gcc',
)
headers = [
'svga_types.h',
'svga3d_reg.h',
]
decls = parser.parse(headers, config, parser.COMPILATION_MODE.ALL_AT_ONCE)
global_ns = declarations.get_global_namespace(decls)
names = set()
for id, header, body, footer in cmds:
names.add(header)
for struct, count in body:
names.add(struct)
if footer is not None:
names.add(footer)
for class_ in global_ns.classes(lambda decl: decl.name in names):
dump_struct(decls, class_)
dump_cmds()
if __name__ == '__main__':
main()
|
gpl-2.0
|
automl/paramsklearn
|
tests/test_classification.py
|
1
|
31256
|
import os
import resource
import sys
import traceback
import unittest
import mock
import numpy as np
import sklearn.datasets
import sklearn.decomposition
import sklearn.cross_validation
import sklearn.ensemble
import sklearn.svm
from sklearn.utils.testing import assert_array_almost_equal
from HPOlibConfigSpace.configuration_space import ConfigurationSpace, \
Configuration
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter
from ParamSklearn.classification import ParamSklearnClassifier
from ParamSklearn.components.base import ParamSklearnClassificationAlgorithm
from ParamSklearn.components.base import ParamSklearnPreprocessingAlgorithm
import ParamSklearn.components.classification as classification_components
import ParamSklearn.components.feature_preprocessing as preprocessing_components
from ParamSklearn.util import get_dataset
from ParamSklearn.constants import *
class TestParamSklearnClassifier(unittest.TestCase):
def test_io_dict(self):
classifiers = classification_components._classifiers
for c in classifiers:
if classifiers[c] == classification_components.ClassifierChoice:
continue
props = classifiers[c].get_properties()
self.assertIn('input', props)
self.assertIn('output', props)
inp = props['input']
output = props['output']
self.assertIsInstance(inp, tuple)
self.assertIsInstance(output, tuple)
for i in inp:
self.assertIn(i, (SPARSE, DENSE, SIGNED_DATA, UNSIGNED_DATA))
self.assertEqual(output, (PREDICTIONS,))
self.assertIn('handles_regression', props)
self.assertFalse(props['handles_regression'])
self.assertIn('handles_classification', props)
self.assertIn('handles_multiclass', props)
self.assertIn('handles_multilabel', props)
def test_find_classifiers(self):
classifiers = classification_components._classifiers
self.assertGreaterEqual(len(classifiers), 2)
for key in classifiers:
if hasattr(classifiers[key], 'get_components'):
continue
self.assertIn(ParamSklearnClassificationAlgorithm,
classifiers[key].__bases__)
def test_find_preprocessors(self):
preprocessors = preprocessing_components._preprocessors
self.assertGreaterEqual(len(preprocessors), 1)
for key in preprocessors:
if hasattr(preprocessors[key], 'get_components'):
continue
self.assertIn(ParamSklearnPreprocessingAlgorithm,
preprocessors[key].__bases__)
def test_default_configuration(self):
for i in range(2):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
X_train, Y_train, X_test, Y_test = get_dataset(dataset='iris')
auto = ParamSklearnClassifier(default)
auto = auto.fit(X_train, Y_train)
predictions = auto.predict(X_test)
self.assertAlmostEqual(0.9599999999999995,
sklearn.metrics.accuracy_score(predictions, Y_test))
scores = auto.predict_proba(X_test)
def test_repr(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
representation = repr(ParamSklearnClassifier(default))
cls = eval(representation)
self.assertIsInstance(cls, ParamSklearnClassifier)
def test_multilabel(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
dataset_properties = {'multilabel': True}
cs = ParamSklearnClassifier.get_hyperparameter_search_space(dataset_properties=dataset_properties)
print(cs)
cs.seed(5)
for i in range(50):
X, Y = sklearn.datasets.\
make_multilabel_classification(n_samples=150,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=True,
sparse=False,
return_indicator=True,
return_distributions=False,
random_state=1)
X_train = X[:100, :]
Y_train = Y[:100, :]
X_test = X[101:, :]
Y_test = Y[101:, ]
config = cs.sample_configuration()
config._populate_values()
if 'classifier:passive_aggressive:n_iter' in config:
config._values['classifier:passive_aggressive:n_iter'] = 5
if 'classifier:sgd:n_iter' in config:
config._values['classifier:sgd:n_iter'] = 5
cls = ParamSklearnClassifier(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabilities = cls.predict_proba(X_test_)
[self.assertIsInstance(i, np.ndarray) for i in predicted_probabilities]
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
def test_configurations(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
print(cs)
cs.seed(1)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls = ParamSklearnClassifier(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabiliets = cls.predict_proba(X_test_)
self.assertIsInstance(predicted_probabiliets, np.ndarray)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
def test_configurations_signed_data(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'signed': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls = ParamSklearnClassifier(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabiliets = cls.predict_proba(X_test_)
self.assertIsInstance(predicted_probabiliets, np.ndarray)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
def test_configurations_sparse(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
print(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls = ParamSklearnClassifier(config, random_state=1)
try:
cls.fit(X_train, Y_train)
predictions = cls.predict(X_test)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
traceback.print_tb(sys.exc_info()[2])
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
raise e
def test_configurations_categorical_data(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
print(config)
categorical = [True, True, True, False, False, True, True, True,
False, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, False,
False, False, True, True, True]
this_directory = os.path.dirname(__file__)
X = np.loadtxt(os.path.join(this_directory, "components",
"data_preprocessing", "dataset.pkl"))
y = X[:, -1].copy()
X = X[:,:-1]
X_train, X_test, Y_train, Y_test = \
sklearn.cross_validation.train_test_split(X, y)
cls = ParamSklearnClassifier(config, random_state=1,)
try:
cls.fit(X_train, Y_train,
init_params={'one_hot_encoding:categorical_features': categorical})
predictions = cls.predict(X_test)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
traceback.print_tb(sys.exc_info()[2])
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
raise e
def test_get_hyperparameter_search_space(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
self.assertIsInstance(cs, ConfigurationSpace)
conditions = cs.get_conditions()
self.assertEqual(len(cs.get_hyperparameter(
'rescaling:__choice__').choices), 4)
self.assertEqual(len(cs.get_hyperparameter(
'classifier:__choice__').choices), 16)
self.assertEqual(len(cs.get_hyperparameter(
'preprocessor:__choice__').choices), 14)
hyperparameters = cs.get_hyperparameters()
self.assertEqual(145, len(hyperparameters))
#for hp in sorted([str(h) for h in hyperparameters]):
# print hp
# The four parameters which are always active are classifier,
# preprocessor, imputation strategy and scaling strategy
self.assertEqual(len(hyperparameters) - 6, len(conditions))
def test_get_hyperparameter_search_space_include_exclude_models(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'classifier': ['libsvm_svc']})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__'),
CategoricalHyperparameter('classifier:__choice__', ['libsvm_svc']))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
exclude={'classifier': ['libsvm_svc']})
self.assertNotIn('libsvm_svc', str(cs))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'preprocessor': ['select_percentile_classification']})
self.assertEqual(cs.get_hyperparameter('preprocessor:__choice__'),
CategoricalHyperparameter('preprocessor:__choice__',
['select_percentile_classification']))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
exclude={'preprocessor': ['select_percentile_classification']})
self.assertNotIn('select_percentile_classification', str(cs))
def test_get_hyperparameter_search_space_preprocessor_contradicts_default_classifier(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'preprocessor': ['densifier']},
dataset_properties={'sparse': True})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__').default,
'qda')
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'preprocessor': ['nystroem_sampler']})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__').default,
'sgd')
def test_get_hyperparameter_search_space_only_forbidden_combinations(self):
self.assertRaisesRegexp(AssertionError, "No valid pipeline found.",
ParamSklearnClassifier.get_hyperparameter_search_space,
include={'classifier': ['multinomial_nb'],
'preprocessor': ['pca']},
dataset_properties={'sparse':True})
# It must also be catched that no classifiers which can handle sparse
# data are located behind the densifier
self.assertRaisesRegexp(ValueError, "Cannot find a legal default "
"configuration.",
ParamSklearnClassifier.get_hyperparameter_search_space,
include={'classifier': ['liblinear_svc'],
'preprocessor': ['densifier']},
dataset_properties={'sparse': True})
@unittest.skip("Wait until HPOlibConfigSpace is fixed.")
def test_get_hyperparameter_search_space_dataset_properties(self):
cs_mc = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'multiclass': True})
self.assertNotIn('bernoulli_nb', str(cs_mc))
cs_ml = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'multilabel': True})
self.assertNotIn('k_nearest_neighbors', str(cs_ml))
self.assertNotIn('liblinear', str(cs_ml))
self.assertNotIn('libsvm_svc', str(cs_ml))
self.assertNotIn('sgd', str(cs_ml))
cs_sp = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
self.assertIn('extra_trees', str(cs_sp))
self.assertIn('gradient_boosting', str(cs_sp))
self.assertIn('random_forest', str(cs_sp))
cs_mc_ml = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'multilabel': True, 'multiclass': True})
self.assertEqual(cs_ml, cs_mc_ml)
def test_predict_batched(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
cls = ParamSklearnClassifier(default)
# Multiclass
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647,), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647, 2), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
def test_predict_batched_sparse(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
config = Configuration(cs,
values={"balancing:strategy": "none",
"classifier:__choice__": "random_forest",
"imputation:strategy": "mean",
"one_hot_encoding:minimum_fraction": 0.01,
"one_hot_encoding:use_minimum_fraction": "True",
"preprocessor:__choice__": "no_preprocessing",
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:min_samples_split': 2,
'classifier:random_forest:min_samples_leaf': 2,
'classifier:random_forest:max_features': 0.5,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:n_estimators': 100,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
"rescaling:__choice__": "min/max"})
cls = ParamSklearnClassifier(config)
# Multiclass
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647,), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647, 2), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
def test_predict_proba_batched(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
# Multiclass
cls = ParamSklearnClassifier(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = ParamSklearnClassifier(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertIsInstance(prediction, list)
self.assertEqual(2, len(prediction))
self.assertEqual((1647, 10), prediction[0].shape)
self.assertEqual((1647, 10), prediction[1].shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
def test_predict_proba_batched_sparse(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
config = Configuration(cs,
values={"balancing:strategy": "none",
"classifier:__choice__": "random_forest",
"imputation:strategy": "mean",
"one_hot_encoding:minimum_fraction": 0.01,
"one_hot_encoding:use_minimum_fraction": 'True',
"preprocessor:__choice__": "no_preprocessing",
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:min_samples_split': 2,
'classifier:random_forest:min_samples_leaf': 2,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:max_features': 0.5,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:n_estimators': 100,
"rescaling:__choice__": "min/max"})
# Multiclass
cls = ParamSklearnClassifier(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = ParamSklearnClassifier(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertIsInstance(prediction, list)
self.assertEqual(2, len(prediction))
self.assertEqual((1647, 10), prediction[0].shape)
self.assertEqual((1647, 10), prediction[1].shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
@unittest.skip("test_check_random_state Not yet Implemented")
def test_check_random_state(self):
raise NotImplementedError()
@unittest.skip("test_validate_input_X Not yet Implemented")
def test_validate_input_X(self):
raise NotImplementedError()
@unittest.skip("test_validate_input_Y Not yet Implemented")
def test_validate_input_Y(self):
raise NotImplementedError()
def test_set_params(self):
pass
def test_get_params(self):
pass
|
bsd-3-clause
|
Drvanon/Game
|
venv/lib/python3.3/site-packages/sqlalchemy/sql/visitors.py
|
1
|
10003
|
# sql/visitors.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for usage in
some kinds of expression transformation. Other kinds of transformation
use a non-visitor traversal system.
For many examples of how the visit system is used, see the
sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules.
For an introduction to clause adaption, see
http://techspot.zzzeek.org/2008/01/23/expression-transformations/
"""
from collections import deque
from .. import util
import operator
from .. import exc
__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor',
'CloningVisitor', 'ReplacingCloningVisitor', 'iterate',
'iterate_depthfirst', 'traverse_using', 'traverse',
'cloned_traverse', 'replacement_traverse']
class VisitableType(type):
"""Metaclass which assigns a `_compiler_dispatch` method to classes
having a `__visit_name__` attribute.
The _compiler_dispatch attribute becomes an instance method which
looks approximately like the following::
def _compiler_dispatch (self, visitor, **kw):
'''Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.'''
visit_attr = 'visit_%s' % self.__visit_name__
return getattr(visitor, visit_attr)(self, **kw)
Classes having no __visit_name__ attribute will remain unaffected.
"""
def __init__(cls, clsname, bases, clsdict):
if cls.__name__ == 'Visitable' or not hasattr(cls, '__visit_name__'):
super(VisitableType, cls).__init__(clsname, bases, clsdict)
return
_generate_dispatch(cls)
super(VisitableType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatch(cls):
"""Return an optimized visit dispatch function for the cls
for use by the compiler.
"""
if '__visit_name__' in cls.__dict__:
visit_name = cls.__visit_name__
if isinstance(visit_name, str):
# There is an optimization opportunity here because the
# the string name of the class's __visit_name__ is known at
# this early stage (import time) so it can be pre-constructed.
getter = operator.attrgetter("visit_%s" % visit_name)
def _compiler_dispatch(self, visitor, **kw):
try:
meth = getter(visitor)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
else:
# The optimization opportunity is lost for this case because the
# __visit_name__ is not yet a string. As a result, the visit
# string has to be recalculated with each compilation.
def _compiler_dispatch(self, visitor, **kw):
visit_attr = 'visit_%s' % self.__visit_name__
try:
meth = getattr(visitor, visit_attr)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
_compiler_dispatch.__doc__ = \
"""Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.
"""
cls._compiler_dispatch = _compiler_dispatch
class Visitable(object, metaclass=VisitableType):
"""Base class for visitable objects, applies the
``VisitableType`` metaclass.
"""
class ClauseVisitor(object):
"""Base class for visitor objects which can traverse using
the traverse() function.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self._visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith('visit_'):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def _visitor_iterator(self):
"""iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, '_next', None)
def chain(self, visitor):
"""'chain' an additional ClauseVisitor onto this ClauseVisitor.
the chained visitor will receive all visit events after this one.
"""
tail = list(self._visitor_iterator)[-1]
tail._next = visitor
return self
class CloningVisitor(ClauseVisitor):
"""Base class for visitor objects which can traverse using
the cloned_traverse() function.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict)
class ReplacingCloningVisitor(CloningVisitor):
"""Base class for visitor objects which can traverse using
the replacement_traverse() function.
"""
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""traverse and visit the given expression structure."""
def replace(elem):
for v in self._visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
def iterate(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
"""
stack = deque([obj])
while stack:
t = stack.popleft()
yield t
for c in t.get_children(**opts):
stack.append(c)
def iterate_depthfirst(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be depth-first.
"""
stack = deque([obj])
traversal = deque()
while stack:
t = stack.pop()
traversal.appendleft(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def traverse_using(iterator, obj, visitors):
"""visit the given expression structure using the given iterator of
objects.
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""traverse and visit the given expression structure using the default
iterator.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def traverse_depthfirst(obj, opts, visitors):
"""traverse and visit the given expression structure using the
depth-first iterator.
"""
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""clone the given expression structure, allowing
modifications by visitors."""
cloned = util.column_dict()
stop_on = util.column_set(opts.get('stop_on', []))
def clone(elem):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
cloned[id(elem)] = newelem = elem._clone()
newelem._copy_internals(clone=clone)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(obj)
return obj
def replacement_traverse(obj, opts, replace):
"""clone the given expression structure, allowing element
replacement by a given replacement function."""
cloned = util.column_dict()
stop_on = util.column_set([id(x) for x in opts.get('stop_on', [])])
def clone(elem, **kw):
if id(elem) in stop_on or \
'no_replacement_traverse' in elem._annotations:
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
if elem not in cloned:
cloned[elem] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
return cloned[elem]
if obj is not None:
obj = clone(obj, **opts)
return obj
|
apache-2.0
|
rynomster/django
|
django/db/backends/sqlite3/base.py
|
4
|
18967
|
"""
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import re
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils import six, timezone
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.safestring import SafeBytes
try:
import pytz
except ImportError:
pytz = None
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
# Some of these import sqlite3, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def adapt_datetime_warn_on_aware_datetime(value):
# Remove this function and rely on the default adapter in Django 2.0.
if settings.USE_TZ and timezone.is_aware(value):
warnings.warn(
"The SQLite database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
# This doesn't account for the database connection's timezone,
# which isn't known. (That's why this adapter is deprecated.)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime))
Database.register_converter(str("timestamp"), decoder(parse_datetime))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime))
Database.register_converter(str("decimal"), decoder(backend_utils.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_warn_on_aware_datetime)
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
if six.PY2:
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
if self.features.can_share_in_memory_db:
kwargs.update({'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
return conn
def init_connection_state(self):
pass
def create_cursor(self):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db(self.settings_dict['NAME']):
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self, name):
return name == ":memory:" or "mode=memory" in force_text(name)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, six.integer_types):
lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000))
real_lhs = parse_duration(lhs)
if real_lhs is None:
real_lhs = backend_utils.typecast_timestamp(lhs)
if isinstance(rhs, six.integer_types):
rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000))
real_rhs = parse_duration(rhs)
if real_rhs is None:
real_rhs = backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return (left - right).total_seconds() * 1000000
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
|
bsd-3-clause
|
znoland3/zachdemo
|
venvdir/lib/python3.4/site-packages/setuptools/msvc9_support.py
|
429
|
2187
|
try:
import distutils.msvc9compiler
except ImportError:
pass
unpatched = dict()
def patch_for_specialized_compiler():
"""
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
"""
if 'distutils' not in globals():
# The module isn't available to be patched
return
if unpatched:
# Already patched
return
unpatched.update(vars(distutils.msvc9compiler))
distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
distutils.msvc9compiler.query_vcvarsall = query_vcvarsall
def find_vcvarsall(version):
Reg = distutils.msvc9compiler.Reg
VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = VC_BASE % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = VC_BASE % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
import os
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
return unpatched['find_vcvarsall'](version)
def query_vcvarsall(version, *args, **kwargs):
try:
return unpatched['query_vcvarsall'](version, *args, **kwargs)
except distutils.errors.DistutilsPlatformError as exc:
if exc and "vcvarsall.bat" in exc.args[0]:
message = 'Microsoft Visual C++ %0.1f is required (%s).' % (version, exc.args[0])
if int(version) == 9:
# This redirection link is maintained by Microsoft.
# Contact vspython@microsoft.com if it needs updating.
raise distutils.errors.DistutilsPlatformError(
message + ' Get it from http://aka.ms/vcpython27'
)
raise distutils.errors.DistutilsPlatformError(message)
raise
|
mit
|
edlabh/SickRage
|
lib/tvdb_api/tvdb_api.py
|
12
|
35715
|
# !/usr/bin/env python2
# encoding:utf-8
# author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:unlicense (http://unlicense.org/)
from functools import wraps
import traceback
__author__ = "dbr/Ben"
__version__ = "1.9"
import os
import re
import time
import getpass
import StringIO
import tempfile
import warnings
import logging
import zipfile
import datetime as dt
import requests
import xmltodict
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
try:
import gzip
except ImportError:
gzip = None
from dateutil.parser import parse
from cachecontrol import CacheControl, caches
from tvdb_ui import BaseUI, ConsoleUI
from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound, tvdb_showincomplete,
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
def log():
return logging.getLogger("tvdb_api")
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
class ShowContainer(dict):
"""Simple dict that holds a series of Show instances
"""
def __init__(self):
self._stack = []
self._lastgc = time.time()
def __setitem__(self, key, value):
self._stack.append(key)
#keep only the 100th latest results
if time.time() - self._lastgc > 20:
for o in self._stack[:-100]:
del self[o]
self._stack = self._stack[-100:]
self._lastgc = time.time()
super(ShowContainer, self).__setitem__(key, value)
class Show(dict):
"""Holds a dict of seasons, and show data.
"""
def __init__(self):
dict.__init__(self)
self.data = {}
def __repr__(self):
return "<Show %s (containing %s seasons)>" % (
self.data.get(u'seriesname', 'instance'),
len(self)
)
def __getattr__(self, key):
if key in self:
# Key is an episode, return it
return self[key]
if key in self.data:
# Non-numeric request is for show-data
return self.data[key]
raise AttributeError
def __getitem__(self, key):
if key in self:
# Key is an episode, return it
return dict.__getitem__(self, key)
if key in self.data:
# Non-numeric request is for show-data
return dict.__getitem__(self.data, key)
# Data wasn't found, raise appropriate error
if isinstance(key, int) or key.isdigit():
# Episode number x was not found
raise tvdb_seasonnotfound("Could not find season %s" % (repr(key)))
else:
# If it's not numeric, it must be an attribute name, which
# doesn't exist, so attribute error.
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def airedOn(self, date):
ret = self.search(str(date), 'firstaired')
if len(ret) == 0:
raise tvdb_episodenotfound("Could not find any episodes that aired on %s" % date)
return ret
def search(self, term=None, key=None):
"""
Search all episodes in show. Can search all data, or a specific key (for
example, episodename)
Always returns an array (can be empty). First index contains the first
match, and so on.
Each array index is an Episode() instance, so doing
search_results[0]['episodename'] will retrieve the episode name of the
first match.
Search terms are converted to lower case (unicode) strings.
# Examples
These examples assume t is an instance of Tvdb():
>>> t = Tvdb()
>>>
To search for all episodes of Scrubs with a bit of data
containing "my first day":
>>> t['Scrubs'].search("my first day")
[<Episode 01x01 - My First Day>]
>>>
Search for "My Name Is Earl" episode named "Faked His Own Death":
>>> t['My Name Is Earl'].search('Faked His Own Death', key = 'episodename')
[<Episode 01x04 - Faked His Own Death>]
>>>
To search Scrubs for all episodes with "mentor" in the episode name:
>>> t['scrubs'].search('mentor', key = 'episodename')
[<Episode 01x02 - My Mentor>, <Episode 03x15 - My Tormented Mentor>]
>>>
# Using search results
>>> results = t['Scrubs'].search("my first")
>>> print results[0]['episodename']
My First Day
>>> for x in results: print x['episodename']
My First Day
My First Step
My First Kill
>>>
"""
results = []
for cur_season in self.values():
searchresult = cur_season.search(term=term, key=key)
if len(searchresult) != 0:
results.extend(searchresult)
return results
class Season(dict):
def __init__(self, show=None):
"""The show attribute points to the parent show
"""
self.show = show
def __repr__(self):
return "<Season instance (containing %s episodes)>" % (
len(self.keys())
)
def __getattr__(self, episode_number):
if episode_number in self:
return self[episode_number]
raise AttributeError
def __getitem__(self, episode_number):
if episode_number not in self:
raise tvdb_episodenotfound("Could not find episode %s" % (repr(episode_number)))
else:
return dict.__getitem__(self, episode_number)
def search(self, term=None, key=None):
"""Search all episodes in season, returns a list of matching Episode
instances.
>>> t = Tvdb()
>>> t['scrubs'][1].search('first day')
[<Episode 01x01 - My First Day>]
>>>
See Show.search documentation for further information on search
"""
results = []
for ep in self.values():
searchresult = ep.search(term=term, key=key)
if searchresult is not None:
results.append(
searchresult
)
return results
class Episode(dict):
def __init__(self, season=None):
"""The season attribute points to the parent season
"""
self.season = season
def __repr__(self):
seasno = int(self.get(u'seasonnumber', 0))
epno = int(self.get(u'episodenumber', 0))
epname = self.get(u'episodename')
if epname is not None:
return "<Episode %02dx%02d - %s>" % (seasno, epno, epname)
else:
return "<Episode %02dx%02d>" % (seasno, epno)
def __getattr__(self, key):
if key in self:
return self[key]
raise AttributeError
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def search(self, term=None, key=None):
"""Search episode data for term, if it matches, return the Episode (self).
The key parameter can be used to limit the search to a specific element,
for example, episodename.
This primarily for use use by Show.search and Season.search. See
Show.search for further information on search
Simple example:
>>> e = Episode()
>>> e['episodename'] = "An Example"
>>> e.search("examp")
<Episode 00x00 - An Example>
>>>
Limiting by key:
>>> e.search("examp", key = "episodename")
<Episode 00x00 - An Example>
>>>
"""
if term == None:
raise TypeError("must supply string to search for (contents)")
term = unicode(term).lower()
for cur_key, cur_value in self.items():
cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()
if key is not None and cur_key != key:
# Do not search this key
continue
if cur_value.find(unicode(term).lower()) > -1:
return self
class Actors(list):
"""Holds all Actor instances for a show
"""
pass
class Actor(dict):
"""Represents a single actor. Should contain..
id,
image,
name,
role,
sortorder
"""
def __repr__(self):
return "<Actor \"%s\">" % (self.get("name"))
class Tvdb:
"""Create easy-to-use interface to name of season/episode name
>>> t = Tvdb()
>>> t['Scrubs'][1][24]['episodename']
u'My Last Day'
"""
def __init__(self,
interactive=False,
select_first=False,
debug=False,
cache=True,
banners=False,
actors=False,
custom_ui=None,
language=None,
search_all_languages=False,
apikey=None,
forceConnect=False,
useZip=False,
dvdorder=False,
proxy=None):
"""interactive (True/False):
When True, uses built-in console UI is used to select the correct show.
When False, the first search result is used.
select_first (True/False):
Automatically selects the first series search result (rather
than showing the user a list of more than one series).
Is overridden by interactive = False, or specifying a custom_ui
debug (True/False) DEPRECATED:
Replaced with proper use of logging module. To show debug messages:
>>> import logging
>>> logging.basicConfig(level = logging.DEBUG)
cache (True/False/str/unicode/urllib2 opener):
Retrieved XML are persisted to to disc. If true, stores in
tvdb_api folder under your systems TEMP_DIR, if set to
str/unicode instance it will use this as the cache
location. If False, disables caching. Can also be passed
an arbitrary Python object, which is used as a urllib2
opener, which should be created by urllib2.build_opener
banners (True/False):
Retrieves the banners for a show. These are accessed
via the _banners key of a Show(), for example:
>>> Tvdb(banners=True)['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
actors (True/False):
Retrieves a list of the actors for a show. These are accessed
via the _actors key of a Show(), for example:
>>> t = Tvdb(actors=True)
>>> t['scrubs']['_actors'][0]['name']
u'Zach Braff'
custom_ui (tvdb_ui.BaseUI subclass):
A callable subclass of tvdb_ui.BaseUI (overrides interactive option)
language (2 character language abbreviation):
The language of the returned data. Is also the language search
uses. Default is "en" (English). For full list, run..
>>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
['da', 'fi', 'nl', ...]
search_all_languages (True/False):
By default, Tvdb will only search in the language specified using
the language option. When this is True, it will search for the
show in and language
apikey (str/unicode):
Override the default thetvdb.com API key. By default it will use
tvdb_api's own key (fine for small scripts), but you can use your
own key if desired - this is recommended if you are embedding
tvdb_api in a larger application)
See http://thetvdb.com/?tab=apiregister to get your own key
forceConnect (bool):
If true it will always try to connect to theTVDB.com even if we
recently timed out. By default it will wait one minute before
trying again, and any requests within that one minute window will
return an exception immediately.
useZip (bool):
Download the zip archive where possibale, instead of the xml.
This is only used when all episodes are pulled.
And only the main language xml is used, the actor and banner xml are lost.
"""
self.shows = ShowContainer() # Holds all Show classes
self.corrections = {} # Holds show-name to show_id mapping
self.config = {}
if apikey is not None:
self.config['apikey'] = apikey
else:
self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key
self.config['debug_enabled'] = debug # show debugging messages
self.config['custom_ui'] = custom_ui
self.config['interactive'] = interactive # prompt for correct series?
self.config['select_first'] = select_first
self.config['search_all_languages'] = search_all_languages
self.config['useZip'] = useZip
self.config['dvdorder'] = dvdorder
self.config['proxy'] = proxy
if cache is True:
self.config['cache_enabled'] = True
self.config['cache_location'] = self._getTempDir()
elif cache is False:
self.config['cache_enabled'] = False
elif isinstance(cache, basestring):
self.config['cache_enabled'] = True
self.config['cache_location'] = cache
else:
raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))
self.config['session'] = requests.Session()
self.config['banners_enabled'] = banners
self.config['actors_enabled'] = actors
if self.config['debug_enabled']:
warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. "
"To enable debug messages, use the following code before importing: "
"import logging; logging.basicConfig(level=logging.DEBUG)")
logging.basicConfig(level=logging.DEBUG)
# List of language from http://thetvdb.com/api/0629B785CE550C8D/languages.xml
# Hard-coded here as it is realtively static, and saves another HTTP request, as
# recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
self.config['valid_languages'] = [
"da", "fi", "nl", "de", "it", "es", "fr", "pl", "hu", "el", "tr",
"ru", "he", "ja", "pt", "zh", "cs", "sl", "hr", "ko", "en", "sv", "no"
]
# thetvdb.com should be based around numeric language codes,
# but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
# requires the language ID, thus this mapping is required (mainly
# for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}
if language is None:
self.config['language'] = 'en'
else:
if language not in self.config['valid_languages']:
raise ValueError("Invalid language %s, options are: %s" % (
language, self.config['valid_languages']
))
else:
self.config['language'] = language
# The following url_ configs are based of the
# http://thetvdb.com/wiki/index.php/Programmers_API
self.config['base_url'] = "http://thetvdb.com"
if self.config['search_all_languages']:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php" % self.config
self.config['params_getSeries'] = {"seriesname": "", "language": "all"}
else:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php" % self.config
self.config['params_getSeries'] = {"seriesname": "", "language": self.config['language']}
self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config
self.config['url_epInfo_zip'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.zip" % self.config
self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config
self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config
self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config
self.config['url_updates_all'] = u"%(base_url)s/api/%(apikey)s/updates_all.zip" % self.config
self.config['url_updates_month'] = u"%(base_url)s/api/%(apikey)s/updates_month.zip" % self.config
self.config['url_updates_week'] = u"%(base_url)s/api/%(apikey)s/updates_week.zip" % self.config
self.config['url_updates_day'] = u"%(base_url)s/api/%(apikey)s/updates_day.zip" % self.config
def _getTempDir(self):
"""Returns the [system temp dir]/tvdb_api-u501 (or
tvdb_api-myuser)
"""
if hasattr(os, 'getuid'):
uid = "u%d" % (os.getuid())
else:
# For Windows
try:
uid = getpass.getuser()
except ImportError:
return os.path.join(tempfile.gettempdir(), "tvdb_api")
return os.path.join(tempfile.gettempdir(), "tvdb_api-%s" % (uid))
@retry(tvdb_error)
def _loadUrl(self, url, params=None, language=None):
try:
log().debug("Retrieving URL %s" % url)
# get response from TVDB
if self.config['cache_enabled']:
session = CacheControl(sess=self.config['session'], cache=caches.FileCache(self.config['cache_location'], use_dir_lock=True), cache_etags=False)
if self.config['proxy']:
log().debug("Using proxy for URL: %s" % url)
session.proxies = {
"http": self.config['proxy'],
"https": self.config['proxy'],
}
resp = session.get(url.strip(), params=params)
else:
resp = requests.get(url.strip(), params=params)
resp.raise_for_status()
except requests.exceptions.HTTPError, e:
raise tvdb_error("HTTP error " + str(e.errno) + " while loading URL " + str(url))
except requests.exceptions.ConnectionError, e:
raise tvdb_error("Connection error " + str(e.message) + " while loading URL " + str(url))
except requests.exceptions.Timeout, e:
raise tvdb_error("Connection timed out " + str(e.message) + " while loading URL " + str(url))
except Exception as e:
raise tvdb_error("Unknown exception while loading URL " + url + ": " + repr(e))
def process(path, key, value):
key = key.lower()
# clean up value and do type changes
if value:
try:
if key == 'firstaired' and value in "0000-00-00":
new_value = str(dt.date.fromordinal(1))
new_value = re.sub("([-]0{2}){1,}", "", new_value)
fixDate = parse(new_value, fuzzy=True).date()
value = fixDate.strftime("%Y-%m-%d")
elif key == 'firstaired':
value = parse(value, fuzzy=True).date()
value = value.strftime("%Y-%m-%d")
#if key == 'airs_time':
# value = parse(value).time()
# value = value.strftime("%I:%M %p")
except:
pass
return key, value
if 'application/zip' in resp.headers.get("Content-Type", ''):
try:
log().debug("We recived a zip file unpacking now ...")
zipdata = StringIO.StringIO()
zipdata.write(resp.content)
myzipfile = zipfile.ZipFile(zipdata)
return xmltodict.parse(myzipfile.read('%s.xml' % language), postprocessor=process)
except zipfile.BadZipfile:
raise tvdb_error("Bad zip file received from thetvdb.com, could not read it")
else:
try:
return xmltodict.parse(resp.content.decode('utf-8'), postprocessor=process)
except:
return dict([(u'data', None)])
def _getetsrc(self, url, params=None, language=None):
"""Loads a URL using caching, returns an ElementTree of the source
"""
try:
return self._loadUrl(url, params=params, language=language).values()[0]
except Exception, e:
raise tvdb_error(e)
def _setItem(self, sid, seas, ep, attrib, value):
"""Creates a new episode, creating Show(), Season() and
Episode()s as required. Called by _getShowData to populate show
Since the nice-to-use tvdb[1][24]['name] interface
makes it impossible to do tvdb[1][24]['name] = "name"
and still be capable of checking if an episode exists
so we can raise tvdb_shownotfound, we have a slightly
less pretty method of setting items.. but since the API
is supposed to be read-only, this is the best way to
do it!
The problem is that calling tvdb[1][24]['episodename'] = "name"
calls __getitem__ on tvdb[1], there is no way to check if
tvdb.__dict__ should have a key "1" before we auto-create it
"""
if sid not in self.shows:
self.shows[sid] = Show()
if seas not in self.shows[sid]:
self.shows[sid][seas] = Season(show=self.shows[sid])
if ep not in self.shows[sid][seas]:
self.shows[sid][seas][ep] = Episode(season=self.shows[sid][seas])
self.shows[sid][seas][ep][attrib] = value
def _setShowData(self, sid, key, value):
"""Sets self.shows[sid] to a new Show instance, or sets the data
"""
if sid not in self.shows:
self.shows[sid] = Show()
self.shows[sid].data[key] = value
def _cleanData(self, data):
"""Cleans up strings returned by TheTVDB.com
Issues corrected:
- Replaces & with &
- Trailing whitespace
"""
data = unicode(data).replace(u"&", u"&")
data = data.strip()
return data
def search(self, series):
"""This searches TheTVDB.com for the series name
and returns the result list
"""
series = series.encode("utf-8")
log().debug("Searching for show %s" % series)
self.config['params_getSeries']['seriesname'] = series
results = self._getetsrc(self.config['url_getSeries'], self.config['params_getSeries'])
if not results:
return
return results.values()[0]
def _getSeries(self, series):
"""This searches TheTVDB.com for the series name,
If a custom_ui UI is configured, it uses this to select the correct
series. If not, and interactive == True, ConsoleUI is used, if not
BaseUI is used to select the first result.
"""
allSeries = self.search(series)
if not allSeries:
log().debug('Series result returned zero')
raise tvdb_shownotfound("Show search returned zero results (cannot find show on TVDB)")
if not isinstance(allSeries, list):
allSeries = [allSeries]
if self.config['custom_ui'] is not None:
log().debug("Using custom UI %s" % (repr(self.config['custom_ui'])))
CustomUI = self.config['custom_ui']
ui = CustomUI(config=self.config)
else:
if not self.config['interactive']:
log().debug('Auto-selecting first search result using BaseUI')
ui = BaseUI(config=self.config)
else:
log().debug('Interactively selecting show using ConsoleUI')
ui = ConsoleUI(config=self.config)
return ui.selectSeries(allSeries)
def _parseBanners(self, sid):
"""Parses banners XML, from
http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/banners.xml
Banners are retrieved using t['show name]['_banners'], for example:
>>> t = Tvdb(banners = True)
>>> t['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
>>> t['scrubs']['_banners']['poster']['680x1000']['35308']['_bannerpath']
u'http://thetvdb.com/banners/posters/76156-2.jpg'
>>>
Any key starting with an underscore has been processed (not the raw
data from the XML)
This interface will be improved in future versions.
"""
log().debug('Getting season banners for %s' % (sid))
bannersEt = self._getetsrc(self.config['url_seriesBanner'] % (sid))
if not bannersEt:
log().debug('Banners result returned zero')
return
banners = {}
for cur_banner in bannersEt['banner'] if isinstance(bannersEt['banner'], list) else [bannersEt['banner']]:
bid = cur_banner['id']
btype = cur_banner['bannertype']
btype2 = cur_banner['bannertype2']
if btype is None or btype2 is None:
continue
if not btype in banners:
banners[btype] = {}
if not btype2 in banners[btype]:
banners[btype][btype2] = {}
if not bid in banners[btype][btype2]:
banners[btype][btype2][bid] = {}
for k, v in cur_banner.items():
if k is None or v is None:
continue
k, v = k.lower(), v.lower()
banners[btype][btype2][bid][k] = v
for k, v in banners[btype][btype2][bid].items():
if k.endswith("path"):
new_key = "_%s" % (k)
log().debug("Transforming %s to %s" % (k, new_key))
new_url = self.config['url_artworkPrefix'] % (v)
banners[btype][btype2][bid][new_key] = new_url
self._setShowData(sid, "_banners", banners)
def _parseActors(self, sid):
"""Parsers actors XML, from
http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/actors.xml
Actors are retrieved using t['show name]['_actors'], for example:
>>> t = Tvdb(actors = True)
>>> actors = t['scrubs']['_actors']
>>> type(actors)
<class 'tvdb_api.Actors'>
>>> type(actors[0])
<class 'tvdb_api.Actor'>
>>> actors[0]
<Actor "Zach Braff">
>>> sorted(actors[0].keys())
['id', 'image', 'name', 'role', 'sortorder']
>>> actors[0]['name']
u'Zach Braff'
>>> actors[0]['image']
u'http://thetvdb.com/banners/actors/43640.jpg'
Any key starting with an underscore has been processed (not the raw
data from the XML)
"""
log().debug("Getting actors for %s" % (sid))
actorsEt = self._getetsrc(self.config['url_actorsInfo'] % (sid))
if not actorsEt:
log().debug('Actors result returned zero')
return
cur_actors = Actors()
for cur_actor in actorsEt['actor'] if isinstance(actorsEt['actor'], list) else [actorsEt['actor']]:
curActor = Actor()
for k, v in cur_actor.items():
if k is None or v is None:
continue
k = k.lower()
if k == "image":
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
curActor[k] = v
cur_actors.append(curActor)
self._setShowData(sid, '_actors', cur_actors)
def _getShowData(self, sid, language, getEpInfo=False):
"""Takes a series ID, gets the epInfo URL and parses the TVDB
XML file into the shows dict in layout:
shows[series_id][season_number][episode_number]
"""
if self.config['language'] is None:
log().debug('Config language is none, using show language')
if language is None:
raise tvdb_error("config['language'] was None, this should not happen")
getShowInLanguage = language
else:
log().debug(
'Configured language %s override show language of %s' % (
self.config['language'],
language
)
)
getShowInLanguage = self.config['language']
# Parse show information
log().debug('Getting all series data for %s' % (sid))
seriesInfoEt = self._getetsrc(
self.config['url_seriesInfo'] % (sid, getShowInLanguage)
)
if not seriesInfoEt:
log().debug('Series result returned zero')
raise tvdb_error("Series result returned zero")
# get series data
for k, v in seriesInfoEt['series'].items():
if v is not None:
if k in ['banner', 'fanart', 'poster']:
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
self._setShowData(sid, k, v)
# get episode data
if getEpInfo:
# Parse banners
if self.config['banners_enabled']:
self._parseBanners(sid)
# Parse actors
if self.config['actors_enabled']:
self._parseActors(sid)
# Parse episode data
log().debug('Getting all episodes of %s' % (sid))
if self.config['useZip']:
url = self.config['url_epInfo_zip'] % (sid, language)
else:
url = self.config['url_epInfo'] % (sid, language)
epsEt = self._getetsrc(url, language=language)
if not epsEt:
log().debug('Series results incomplete')
raise tvdb_showincomplete("Show search returned incomplete results (cannot find complete show on TVDB)")
if 'episode' not in epsEt:
return False
episodes = epsEt["episode"]
if not isinstance(episodes, list):
episodes = [episodes]
for cur_ep in episodes:
if self.config['dvdorder']:
log().debug('Using DVD ordering.')
use_dvd = cur_ep['dvd_season'] != None and cur_ep['dvd_episodenumber'] != None
else:
use_dvd = False
if use_dvd:
seasnum, epno = cur_ep['dvd_season'], cur_ep['dvd_episodenumber']
else:
seasnum, epno = cur_ep['seasonnumber'], cur_ep['episodenumber']
if seasnum is None or epno is None:
log().warning("An episode has incomplete season/episode number (season: %r, episode: %r)" % (
seasnum, epno))
continue # Skip to next episode
# float() is because https://github.com/dbr/tvnamer/issues/95 - should probably be fixed in TVDB data
seas_no = int(float(seasnum))
ep_no = int(float(epno))
for k, v in cur_ep.items():
k = k.lower()
if v is not None:
if k == 'filename':
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
self._setItem(sid, seas_no, ep_no, k, v)
return True
def _nameToSid(self, name):
"""Takes show name, returns the correct series ID (if the show has
already been grabbed), or grabs all episodes and returns
the correct SID.
"""
if name in self.corrections:
log().debug('Correcting %s to %s' % (name, self.corrections[name]))
return self.corrections[name]
else:
log().debug('Getting show %s' % (name))
selected_series = self._getSeries(name)
if isinstance(selected_series, dict):
selected_series = [selected_series]
sids = list(int(x['id']) for x in selected_series if
self._getShowData(int(x['id']), self.config['language']))
self.corrections.update(dict((x['seriesname'], int(x['id'])) for x in selected_series))
return sids
def __getitem__(self, key):
"""Handles tvdb_instance['seriesname'] calls.
The dict index should be the show id
"""
if isinstance(key, (int, long)):
# Item is integer, treat as show id
if key not in self.shows:
self._getShowData(key, self.config['language'], True)
return self.shows[key]
key = str(key).lower()
self.config['searchterm'] = key
selected_series = self._getSeries(key)
if isinstance(selected_series, dict):
selected_series = [selected_series]
[[self._setShowData(show['id'], k, v) for k, v in show.items()] for show in selected_series]
return selected_series
def __repr__(self):
return str(self.shows)
def main():
"""Simple example of using tvdb_api - it just
grabs an episode name interactively.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
tvdb_instance = Tvdb(interactive=True, cache=False)
print tvdb_instance['Lost']['seriesname']
print tvdb_instance['Lost'][1][4]['episodename']
if __name__ == '__main__':
main()
|
gpl-3.0
|
blackbliss/medity-expo-2014
|
remote-api/flask/lib/python2.7/site-packages/flask_httpauth.py
|
2
|
3984
|
"""
flask.ext.httpauth
==================
This module provides Basic and Digest HTTP authentication for Flask routes.
:copyright: (C) 2014 by Miguel Grinberg.
:license: BSD, see LICENSE for more details.
"""
from functools import wraps
from hashlib import md5
from random import Random, SystemRandom
from flask import request, make_response, session
class HTTPAuth(object):
def __init__(self):
def default_get_password(username):
return None
def default_auth_error():
return "Unauthorized Access"
self.realm = "Authentication Required"
self.get_password(default_get_password)
self.error_handler(default_auth_error)
def get_password(self, f):
self.get_password_callback = f
return f
def error_handler(self, f):
@wraps(f)
def decorated(*args, **kwargs):
res = f(*args, **kwargs)
if type(res) == str:
res = make_response(res)
res.status_code = 401
if 'WWW-Authenticate' not in res.headers.keys():
res.headers['WWW-Authenticate'] = self.authenticate_header()
return res
self.auth_error_callback = decorated
return decorated
def login_required(self, f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth:
return self.auth_error_callback()
password = self.get_password_callback(auth.username)
if not self.authenticate(auth, password):
return self.auth_error_callback()
return f(*args, **kwargs)
return decorated
def username(self):
return request.authorization.username
class HTTPBasicAuth(HTTPAuth):
def __init__(self):
super(HTTPBasicAuth, self).__init__()
self.hash_password(None)
self.verify_password(None)
def hash_password(self, f):
self.hash_password_callback = f
def verify_password(self, f):
self.verify_password_callback = f
def authenticate_header(self):
return 'Basic realm="' + self.realm + '"'
def authenticate(self, auth, stored_password):
client_password = auth.password
if self.verify_password_callback:
return self.verify_password_callback(auth.username, client_password)
if self.hash_password_callback:
try:
client_password = self.hash_password_callback(client_password)
except TypeError:
client_password = self.hash_password_callback(auth.username, client_password)
return client_password == stored_password
class HTTPDigestAuth(HTTPAuth):
def __init__(self):
super(HTTPDigestAuth, self).__init__()
self.random = SystemRandom()
try:
self.random.random()
except NotImplementedError:
self.random = Random()
def get_nonce(self):
return md5(str(self.random.random()).encode('utf-8')).hexdigest()
def authenticate_header(self):
session["auth_nonce"] = self.get_nonce()
session["auth_opaque"] = self.get_nonce()
return 'Digest realm="' + self.realm + '",nonce="' + session["auth_nonce"] + '",opaque="' + session["auth_opaque"] + '"'
def authenticate(self, auth, password):
if not auth.username or not auth.realm or not auth.uri or not auth.nonce or not auth.response or not password:
return False
if auth.nonce != session.get("auth_nonce") or auth.opaque != session.get("auth_opaque"):
return False
a1 = auth.username + ":" + auth.realm + ":" + password
ha1 = md5(a1.encode('utf-8')).hexdigest()
a2 = request.method + ":" + auth.uri
ha2 = md5(a2.encode('utf-8')).hexdigest()
a3 = ha1 + ":" + auth.nonce + ":" + ha2
response = md5(a3.encode('utf-8')).hexdigest()
return response == auth.response
|
mit
|
vrum/yaml-cpp
|
test/gmock-1.7.0/gtest/scripts/fuse_gtest_files.py
|
2577
|
8813
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
|
mit
|
mattrobenolt/django
|
tests/redirects_tests/tests.py
|
336
|
3396
|
from django import http
from django.conf import settings
from django.contrib.redirects.middleware import RedirectFallbackMiddleware
from django.contrib.redirects.models import Redirect
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, modify_settings, override_settings
from django.utils import six
@modify_settings(MIDDLEWARE_CLASSES={'append':
'django.contrib.redirects.middleware.RedirectFallbackMiddleware'})
@override_settings(APPEND_SLASH=False, SITE_ID=1)
class RedirectTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=settings.SITE_ID)
def test_model(self):
r1 = Redirect.objects.create(
site=self.site, old_path='/initial', new_path='/new_target')
self.assertEqual(six.text_type(r1), "/initial ---> /new_target")
def test_redirect(self):
Redirect.objects.create(
site=self.site, old_path='/initial', new_path='/new_target')
response = self.client.get('/initial')
self.assertRedirects(response,
'/new_target', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial')
self.assertRedirects(response,
'/new_target/', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash_and_query_string(self):
Redirect.objects.create(
site=self.site, old_path='/initial/?foo', new_path='/new_target/')
response = self.client.get('/initial?foo')
self.assertRedirects(response,
'/new_target/', status_code=301, target_status_code=404)
def test_response_gone(self):
"""When the redirect target is '', return a 410"""
Redirect.objects.create(
site=self.site, old_path='/initial', new_path='')
response = self.client.get('/initial')
self.assertEqual(response.status_code, 410)
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sites_not_installed(self):
with self.assertRaises(ImproperlyConfigured):
RedirectFallbackMiddleware()
class OverriddenRedirectFallbackMiddleware(RedirectFallbackMiddleware):
# Use HTTP responses different from the defaults
response_gone_class = http.HttpResponseForbidden
response_redirect_class = http.HttpResponseRedirect
@modify_settings(MIDDLEWARE_CLASSES={'append':
'redirects_tests.tests.OverriddenRedirectFallbackMiddleware'})
@override_settings(SITE_ID=1)
class OverriddenRedirectMiddlewareTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=settings.SITE_ID)
def test_response_gone_class(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 403)
def test_response_redirect_class(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 302)
|
bsd-3-clause
|
ppwwyyxx/tensorflow
|
tensorflow/python/ops/structured/structured_tensor_spec_test.py
|
3
|
9305
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StructuredTensorSpec."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.structured import structured_tensor
from tensorflow.python.ops.structured.structured_tensor import StructuredTensor
from tensorflow.python.ops.structured.structured_tensor import StructuredTensorSpec
from tensorflow.python.platform import googletest
# TypeSpecs consts for fields types.
T_3 = tensor_spec.TensorSpec([3])
T_1_2 = tensor_spec.TensorSpec([1, 2])
T_1_2_8 = tensor_spec.TensorSpec([1, 2, 8])
T_1_2_3_4 = tensor_spec.TensorSpec([1, 2, 3, 4])
T_2_3 = tensor_spec.TensorSpec([2, 3])
R_1_N = ragged_tensor.RaggedTensorSpec([1, None])
R_1_N_N = ragged_tensor.RaggedTensorSpec([1, None, None])
R_2_1_N = ragged_tensor.RaggedTensorSpec([2, 1, None])
# pylint: disable=g-long-lambda
@test_util.run_all_in_graph_and_eager_modes
class StructuredTensorSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
# TODO(edloper): Add a subclass of TensorFlowTestCase that overrides
# assertAllEqual etc to work with StructuredTensors.
def assertAllEqual(self, a, b, msg=None):
if not (isinstance(a, structured_tensor.StructuredTensor) or
isinstance(b, structured_tensor.StructuredTensor)):
return super(StructuredTensorSpecTest, self).assertAllEqual(a, b, msg)
if not (isinstance(a, structured_tensor.StructuredTensor) and
isinstance(b, structured_tensor.StructuredTensor)):
# TODO(edloper) Add support for this once structured_factory_ops is added.
raise ValueError('Not supported yet')
self.assertEqual(repr(a.shape), repr(b.shape))
self.assertEqual(set(a.field_names()), set(b.field_names()))
for field in a.field_names():
self.assertAllEqual(a.field_value(field), b.field_value(field))
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1_fields = dict(a=T_1_2_3_4)
spec1 = StructuredTensorSpec([1, 2, 3], spec1_fields)
self.assertEqual(spec1._shape, (1, 2, 3))
self.assertEqual(spec1._field_specs, spec1_fields)
spec2_fields = dict(a=T_1_2, b=T_1_2_8, c=R_1_N, d=R_1_N_N, s=spec1)
spec2 = StructuredTensorSpec([1, 2], spec2_fields)
self.assertEqual(spec2._shape, (1, 2))
self.assertEqual(spec2._field_specs, spec2_fields)
def testValueType(self):
spec1 = StructuredTensorSpec([1, 2, 3], dict(a=T_1_2))
self.assertEqual(spec1.value_type, StructuredTensor)
@parameterized.parameters([
(StructuredTensorSpec([1, 2, 3], {}),
(tensor_shape.TensorShape([1, 2, 3]), {})),
(StructuredTensorSpec([], {'a': T_1_2}),
(tensor_shape.TensorShape([]), {'a': T_1_2})),
(StructuredTensorSpec([1, 2], {'a': T_1_2, 'b': R_1_N}),
(tensor_shape.TensorShape([1, 2]), {'a': T_1_2, 'b': R_1_N})),
(StructuredTensorSpec([], {'a': T_1_2}),
(tensor_shape.TensorShape([]), {'a': T_1_2})),
]) # pyformat: disable
def testSerialize(self, spec, expected):
serialization = spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(StructuredTensorSpec([1, 2, 3], {}), {}),
(StructuredTensorSpec([], {'a': T_1_2}), {'a': T_1_2}),
(StructuredTensorSpec([1, 2], {'a': T_1_2, 'b': R_1_N}),
{'a': T_1_2, 'b': R_1_N}),
(StructuredTensorSpec([], {'a': T_1_2}), {'a': T_1_2}),
]) # pyformat: disable
def testComponentSpecs(self, spec, expected):
self.assertEqual(spec._component_specs, expected)
@parameterized.parameters([
{
'shape': [],
'fields': dict(x=[[1.0, 2.0]]),
'field_specs': dict(x=T_1_2),
},
{
'shape': [1, 2, 3],
'fields': {},
'field_specs': {},
},
{
'shape': [2],
'fields': dict(
a=ragged_factory_ops.constant_value([[1.0], [2.0, 3.0]]),
b=[[4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]),
'field_specs': dict(a=R_1_N, b=T_2_3),
},
]) # pyformat: disable
def testToFromComponents(self, shape, fields, field_specs):
components = fields
struct = StructuredTensor(shape, fields)
spec = StructuredTensorSpec(shape, field_specs)
actual_components = spec._to_components(struct)
self.assertAllTensorsEqual(actual_components, components)
rt_reconstructed = spec._from_components(actual_components)
self.assertAllEqual(struct, rt_reconstructed)
@parameterized.parameters([
{
'unbatched': StructuredTensorSpec([], {}),
'batch_size': 5,
'batched': StructuredTensorSpec([5], {}),
},
{
'unbatched': StructuredTensorSpec([1, 2], {}),
'batch_size': 5,
'batched': StructuredTensorSpec([5, 1, 2], {}),
},
{
'unbatched': StructuredTensorSpec([], dict(a=T_3, b=R_1_N)),
'batch_size': 2,
'batched': StructuredTensorSpec([2], dict(a=T_2_3, b=R_2_1_N)),
}
]) # pyformat: disable
def testBatchUnbatch(self, unbatched, batch_size, batched):
self.assertEqual(unbatched._batch(batch_size), batched)
self.assertEqual(batched._unbatch(), unbatched)
@parameterized.parameters([
{
'unbatched': lambda: [
StructuredTensor([], {'a': 1, 'b': [5, 6]}),
StructuredTensor([], {'a': 2, 'b': [7, 8]})],
'batch_size': 2,
'batched': lambda: StructuredTensor([2], {
'a': [1, 2],
'b': [[5, 6], [7, 8]]}),
},
{
'unbatched': lambda: [
StructuredTensor([3], {
'a': [1, 2, 3],
'b': [[5, 6], [6, 7], [7, 8]]}),
StructuredTensor([3], {
'a': [2, 3, 4],
'b': [[2, 2], [3, 3], [4, 4]]})],
'batch_size': 2,
'batched': lambda: StructuredTensor([2, 3], {
'a': [[1, 2, 3], [2, 3, 4]],
'b': [[[5, 6], [6, 7], [7, 8]],
[[2, 2], [3, 3], [4, 4]]]}),
},
{
'unbatched': lambda: [
StructuredTensor([], {
'a': 1,
'b': StructuredTensor([], {'x': [5]})}),
StructuredTensor([], {
'a': 2,
'b': StructuredTensor([], {'x': [6]})})],
'batch_size': 2,
'batched': lambda: StructuredTensor([2], {
'a': [1, 2],
'b': StructuredTensor([2], {'x': [[5], [6]]})}),
},
]) # pyformat: disable
def testBatchUnbatchValues(self, unbatched, batch_size, batched):
batched = batched() # Deferred init because it creates tensors.
unbatched = unbatched() # Deferred init because it creates tensors.
# Test batching.
unbatched_spec = type_spec.type_spec_from_value(unbatched[0])
unbatched_tensor_lists = [unbatched_spec._to_tensor_list(st)
for st in unbatched]
batched_tensor_list = [array_ops.stack(tensors)
for tensors in zip(*unbatched_tensor_lists)]
actual_batched = unbatched_spec._batch(batch_size)._from_tensor_list(
batched_tensor_list)
self.assertAllEqual(actual_batched, batched)
# Test unbatching
batched_spec = type_spec.type_spec_from_value(batched)
batched_tensor_list = batched_spec._to_tensor_list(batched)
unbatched_tensor_lists = zip(
*[array_ops.unstack(tensor) for tensor in batched_tensor_list])
actual_unbatched = [
batched_spec._unbatch()._from_tensor_list(tensor_list)
for tensor_list in unbatched_tensor_lists]
self.assertLen(actual_unbatched, len(unbatched))
for (actual, expected) in zip(actual_unbatched, unbatched):
self.assertAllEqual(actual, expected)
if __name__ == '__main__':
googletest.main()
|
apache-2.0
|
tastynoodle/django
|
django/contrib/gis/feeds.py
|
5
|
5930
|
from __future__ import unicode_literals
from django.contrib.syndication.views import Feed as BaseFeed
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
class GeoFeedMixin(object):
"""
This mixin provides the necessary routines for SyndicationFeed subclasses
to produce simple GeoRSS or W3C Geo elements.
"""
def georss_coords(self, coords):
"""
In GeoRSS coordinate pairs are ordered by lat/lon and separated by
a single white space. Given a tuple of coordinates, this will return
a unicode GeoRSS representation.
"""
return ' '.join('%f %f' % (coord[1], coord[0]) for coord in coords)
def add_georss_point(self, handler, coords, w3c_geo=False):
"""
Adds a GeoRSS point with the given coords using the given handler.
Handles the differences between simple GeoRSS and the more pouplar
W3C Geo specification.
"""
if w3c_geo:
lon, lat = coords[:2]
handler.addQuickElement('geo:lat', '%f' % lat)
handler.addQuickElement('geo:lon', '%f' % lon)
else:
handler.addQuickElement('georss:point', self.georss_coords((coords,)))
def add_georss_element(self, handler, item, w3c_geo=False):
"""
This routine adds a GeoRSS XML element using the given item and handler.
"""
# Getting the Geometry object.
geom = item.get('geometry', None)
if not geom is None:
if isinstance(geom, (list, tuple)):
# Special case if a tuple/list was passed in. The tuple may be
# a point or a box
box_coords = None
if isinstance(geom[0], (list, tuple)):
# Box: ( (X0, Y0), (X1, Y1) )
if len(geom) == 2:
box_coords = geom
else:
raise ValueError('Only should be two sets of coordinates.')
else:
if len(geom) == 2:
# Point: (X, Y)
self.add_georss_point(handler, geom, w3c_geo=w3c_geo)
elif len(geom) == 4:
# Box: (X0, Y0, X1, Y1)
box_coords = (geom[:2], geom[2:])
else:
raise ValueError('Only should be 2 or 4 numeric elements.')
# If a GeoRSS box was given via tuple.
if not box_coords is None:
if w3c_geo: raise ValueError('Cannot use simple GeoRSS box in W3C Geo feeds.')
handler.addQuickElement('georss:box', self.georss_coords(box_coords))
else:
# Getting the lower-case geometry type.
gtype = str(geom.geom_type).lower()
if gtype == 'point':
self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)
else:
if w3c_geo: raise ValueError('W3C Geo only supports Point geometries.')
# For formatting consistent w/the GeoRSS simple standard:
# http://georss.org/1.0#simple
if gtype in ('linestring', 'linearring'):
handler.addQuickElement('georss:line', self.georss_coords(geom.coords))
elif gtype in ('polygon',):
# Only support the exterior ring.
handler.addQuickElement('georss:polygon', self.georss_coords(geom[0].coords))
else:
raise ValueError('Geometry type "%s" not supported.' % geom.geom_type)
### SyndicationFeed subclasses ###
class GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(GeoRSSFeed, self).rss_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoRSSFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoRSSFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class GeoAtom1Feed(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super(GeoAtom1Feed, self).root_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoAtom1Feed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoAtom1Feed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(W3CGeoFeed, self).rss_attributes()
attrs['xmlns:geo'] = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
return attrs
def add_item_elements(self, handler, item):
super(W3CGeoFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item, w3c_geo=True)
def add_root_elements(self, handler):
super(W3CGeoFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed, w3c_geo=True)
### Feed subclass ###
class Feed(BaseFeed):
"""
This is a subclass of the `Feed` from `django.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {'geometry' : self.__get_dynamic_attr('geometry', obj)}
def item_extra_kwargs(self, item):
return {'geometry' : self.__get_dynamic_attr('item_geometry', item)}
|
bsd-3-clause
|
arbrandes/edx-platform
|
openedx/core/lib/tests/assertions/events.py
|
3
|
9890
|
"""Assertions related to event validation"""
import json
import pprint
def assert_event_matches(expected, actual, tolerate=None):
"""
Compare two event dictionaries.
Fail if any discrepancies exist, and output the list of all discrepancies. The intent is to produce clearer
error messages than "{ some massive dict } != { some other massive dict }", instead enumerating the keys that
differ. Produces period separated "paths" to keys in the output, so "context.foo" refers to the following
structure:
{
'context': {
'foo': 'bar' # this key, value pair
}
}
The other key difference between this comparison and `assertEquals` is that it supports differing levels of
tolerance for discrepancies. We don't want to litter our tests full of exact match tests because then anytime we
add a field to all events, we have to go update every single test that has a hardcoded complete event structure in
it. Instead we support making partial assertions about structure and content of the event. So if I say my expected
event looks like this:
{
'event_type': 'foo.bar',
'event': {
'user_id': 10
}
}
This method will raise an assertion error if the actual event either does not contain the above fields in their
exact locations in the hierarchy, or if it does contain them but has different values for them. Note that it will
*not* necessarily raise an assertion error if the actual event contains other fields that are not listed in the
expected event. For example, the following event would not raise an assertion error:
{
'event_type': 'foo.bar',
'referer': 'http://example.com'
'event': {
'user_id': 10
}
}
Note that the extra "referer" field is not considered an error by default.
The `tolerate` parameter takes a set that allows you to specify varying degrees of tolerance for some common
eventing related issues. See the `EventMatchTolerates` class for more information about the various flags that are
supported here.
Example output if an error is found:
Unexpected differences found in structs:
* <path>: not found in actual
* <path>: <expected_value> != <actual_value> (expected != actual)
Expected:
{ <expected event }
Actual:
{ <actual event> }
"<path>" is a "." separated string indicating the key that differed. In the examples above "event.user_id" would
refer to the value of the "user_id" field contained within the dictionary referred to by the "event" field in the
root dictionary.
"""
differences = get_event_differences(expected, actual, tolerate=tolerate)
if len(differences) > 0:
debug_info = [
'',
'Expected:',
block_indent(expected),
'Actual:',
block_indent(actual),
'Tolerating:',
block_indent(EventMatchTolerates.default_if_not_defined(tolerate)),
]
differences = ['* ' + d for d in differences]
message_lines = differences + debug_info
raise AssertionError('Unexpected differences found in structs:\n\n' + '\n'.join(message_lines))
class EventMatchTolerates:
"""
Represents groups of flags that specify the level of tolerance for deviation between an expected event and an actual
event.
These are common event specific deviations that we don't want to handle with special case logic throughout our
tests.
"""
# Allow the "event" field to be a string, currently this is the case for all browser events.
STRING_PAYLOAD = 'string_payload'
# Allow unexpected fields to exist in the top level event dictionary.
ROOT_EXTRA_FIELDS = 'root_extra_fields'
# Allow unexpected fields to exist in the "context" dictionary. This is where new fields that appear in multiple
# events are most commonly added, so we frequently want to tolerate variation here.
CONTEXT_EXTRA_FIELDS = 'context_extra_fields'
# Allow unexpected fields to exist in the "event" dictionary. Typically in unit tests we don't want to allow this
# type of variance since there are typically only a small number of tests for a particular event type.
PAYLOAD_EXTRA_FIELDS = 'payload_extra_fields'
@classmethod
def default(cls):
"""A reasonable set of tolerated variations."""
# NOTE: "payload_extra_fields" is deliberately excluded from this list since we want to detect erroneously added
# fields in the payload by default.
return {
cls.STRING_PAYLOAD,
cls.ROOT_EXTRA_FIELDS,
cls.CONTEXT_EXTRA_FIELDS,
}
@classmethod
def lenient(cls):
"""Allow all known variations."""
return cls.default() | {
cls.PAYLOAD_EXTRA_FIELDS
}
@classmethod
def strict(cls):
"""Allow no variation at all."""
return frozenset()
@classmethod
def default_if_not_defined(cls, tolerates=None):
"""Use the provided tolerance or provide a default one if None was specified."""
if tolerates is None:
return cls.default()
else:
return tolerates
def assert_events_equal(expected, actual):
"""
Strict comparison of two events.
This asserts that every field in the real event exactly matches the expected event.
"""
assert_event_matches(expected, actual, tolerate=EventMatchTolerates.strict())
def get_event_differences(expected, actual, tolerate=None):
"""Given two events, gather a list of differences between them given some set of tolerated variances."""
tolerate = EventMatchTolerates.default_if_not_defined(tolerate)
# Some events store their payload in a JSON string instead of a dict. Comparing these strings can be problematic
# since the keys may be in different orders, so we parse the string here if we were expecting a dict.
if EventMatchTolerates.STRING_PAYLOAD in tolerate:
expected = parse_event_payload(expected)
actual = parse_event_payload(actual)
def should_strict_compare(path):
"""
We want to be able to vary the degree of strictness we apply depending on the testing context.
Some tests will want to assert that the entire event matches exactly, others will tolerate some variance in the
context or root fields, but not in the payload (for example).
"""
if path == [] and EventMatchTolerates.ROOT_EXTRA_FIELDS in tolerate:
return False
elif path == ['event'] and EventMatchTolerates.PAYLOAD_EXTRA_FIELDS in tolerate:
return False
elif path == ['context'] and EventMatchTolerates.CONTEXT_EXTRA_FIELDS in tolerate:
return False
else:
return True
return compare_structs(expected, actual, should_strict_compare=should_strict_compare)
def block_indent(text, spaces=4):
"""
Given a multi-line string, indent every line of it by the given number of spaces.
If `text` is not a string it is formatted using pprint.pformat.
"""
return '\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])
def parse_event_payload(event):
"""
Given an event, parse the 'event' field, if found otherwise 'data' field as a JSON string.
Note that this may simply return the same event unchanged, or return a new copy of the event with the payload
parsed. It will never modify the event in place.
"""
payload_key = 'event' if 'event' in event else 'data'
if payload_key in event and isinstance(event[payload_key], str):
event = event.copy()
try:
event[payload_key] = json.loads(event[payload_key])
except ValueError:
pass
return event
def compare_structs(expected, actual, should_strict_compare=None, path=None):
"""
Traverse two structures to ensure that the `actual` structure contains all of the elements within the `expected`
one.
Note that this performs a "deep" comparison, descending into dictionaries, lists and ohter collections to ensure
that the structure matches the expectation.
If a particular value is not recognized, it is simply compared using the "!=" operator.
"""
if path is None:
path = []
differences = []
if isinstance(expected, dict) and isinstance(actual, dict):
expected_keys = frozenset(list(expected.keys()))
actual_keys = frozenset(list(actual.keys()))
for key in expected_keys - actual_keys:
differences.append(f'{_path_to_string(path + [key])}: not found in actual')
if should_strict_compare is not None and should_strict_compare(path):
for key in actual_keys - expected_keys:
differences.append(f'{_path_to_string(path + [key])}: only defined in actual')
for key in expected_keys & actual_keys:
child_differences = compare_structs(expected[key], actual[key], should_strict_compare, path + [key])
differences.extend(child_differences)
elif expected != actual:
differences.append('{path}: {a} != {b} (expected != actual)'.format(
path=_path_to_string(path),
a=repr(expected),
b=repr(actual)
))
return differences
def is_matching_event(expected_event, actual_event, tolerate=None):
"""Return True iff the `actual_event` matches the `expected_event` given the tolerances."""
return len(get_event_differences(expected_event, actual_event, tolerate=tolerate)) == 0
def _path_to_string(path):
"""Convert a list of path elements into a single path string."""
return '.'.join(path)
|
agpl-3.0
|
samsu/neutron
|
services/loadbalancer/drivers/driver_base.py
|
12
|
3022
|
# Copyright 2014 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db.loadbalancer import loadbalancer_db as lb_db
from neutron.services.loadbalancer.drivers import driver_mixins
class NotImplementedManager(object):
"""Helper class to make any subclass of LBAbstractDriver explode if it
is missing any of the required object managers.
"""
def create(self, context, obj):
raise NotImplementedError()
def update(self, context, old_obj, obj):
raise NotImplementedError()
def delete(self, context, obj):
raise NotImplementedError()
class LoadBalancerBaseDriver(object):
"""LBaaSv2 object model drivers should subclass LBAbstractDriver, and
initialize the following manager classes to create, update, and delete
the various load balancer objects.
"""
load_balancer = NotImplementedManager()
listener = NotImplementedManager()
pool = NotImplementedManager()
member = NotImplementedManager()
health_monitor = NotImplementedManager()
def __init__(self, plugin):
self.plugin = plugin
class BaseLoadBalancerManager(driver_mixins.BaseRefreshMixin,
driver_mixins.BaseStatsMixin,
driver_mixins.BaseStatusUpdateMixin,
driver_mixins.BaseManagerMixin):
def __init__(self, driver):
super(BaseLoadBalancerManager, self).__init__(driver)
# TODO(dougw), use lb_db.LoadBalancer when v2 lbaas
# TODO(dougw), get rid of __init__() in StatusHelperManager, and
# the if is not None clauses; after fixing this next line,
# it can become a mandatory variable for that subclass.
self.model_class = None
class BaseListenerManager(driver_mixins.BaseManagerMixin):
pass
class BasePoolManager(driver_mixins.BaseStatusUpdateMixin,
driver_mixins.BaseManagerMixin):
def __init__(self, driver):
super(BasePoolManager, self).__init__(driver)
self.model_class = lb_db.Pool
class BaseMemberManager(driver_mixins.BaseStatusUpdateMixin,
driver_mixins.BaseManagerMixin):
def __init__(self, driver):
super(BaseMemberManager, self).__init__(driver)
self.model_class = lb_db.Member
class BaseHealthMonitorManager(
driver_mixins.BaseHealthMonitorStatusUpdateMixin,
driver_mixins.BaseManagerMixin):
pass
|
apache-2.0
|
NickelMedia/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
|
121
|
8467
|
# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for printing.py."""
import optparse
import StringIO
import sys
import time
import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system import logtesting
from webkitpy import port
from webkitpy.layout_tests.controllers import manager
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.views import printing
def get_options(args):
print_options = printing.print_options()
option_parser = optparse.OptionParser(option_list=print_options)
return option_parser.parse_args(args)
class TestUtilityFunctions(unittest.TestCase):
def test_print_options(self):
options, args = get_options([])
self.assertIsNotNone(options)
class Testprinter(unittest.TestCase):
def assertEmpty(self, stream):
self.assertFalse(stream.getvalue())
def assertNotEmpty(self, stream):
self.assertTrue(stream.getvalue())
def assertWritten(self, stream, contents):
self.assertEqual(stream.buflist, contents)
def reset(self, stream):
stream.buflist = []
stream.buf = ''
def get_printer(self, args=None):
args = args or []
printing_options = printing.print_options()
option_parser = optparse.OptionParser(option_list=printing_options)
options, args = option_parser.parse_args(args)
host = MockHost()
self._port = host.port_factory.get('test', options)
nproc = 2
regular_output = StringIO.StringIO()
printer = printing.Printer(self._port, options, regular_output)
return printer, regular_output
def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
failures = []
if result_type == test_expectations.TIMEOUT:
failures = [test_failures.FailureTimeout()]
elif result_type == test_expectations.CRASH:
failures = [test_failures.FailureCrash()]
return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
def test_configure_and_cleanup(self):
# This test verifies that calling cleanup repeatedly and deleting
# the object is safe.
printer, err = self.get_printer()
printer.cleanup()
printer.cleanup()
printer = None
def test_print_config(self):
printer, err = self.get_printer()
# FIXME: it's lame that i have to set these options directly.
printer._options.pixel_tests = True
printer._options.new_baseline = True
printer._options.time_out_ms = 6000
printer._options.slow_time_out_ms = 12000
printer.print_config('/tmp')
self.assertIn("Using port 'test-mac-leopard'", err.getvalue())
self.assertIn('Test configuration: <leopard, x86, release>', err.getvalue())
self.assertIn('Placing test results in /tmp', err.getvalue())
self.assertIn('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic', err.getvalue())
self.assertIn('Using Release build', err.getvalue())
self.assertIn('Pixel tests enabled', err.getvalue())
self.assertIn('Command line:', err.getvalue())
self.assertIn('Regular timeout: ', err.getvalue())
self.reset(err)
printer._options.quiet = True
printer.print_config('/tmp')
self.assertNotIn('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic', err.getvalue())
def test_print_one_line_summary(self):
printer, err = self.get_printer()
printer._print_one_line_summary(1, 1, 0)
self.assertWritten(err, ["The test ran as expected.\n", "\n"])
printer, err = self.get_printer()
printer._print_one_line_summary(1, 1, 0)
self.assertWritten(err, ["The test ran as expected.\n", "\n"])
printer, err = self.get_printer()
printer._print_one_line_summary(2, 1, 1)
self.assertWritten(err, ["\n", "1 test ran as expected, 1 didn't:\n", "\n"])
printer, err = self.get_printer()
printer._print_one_line_summary(3, 2, 1)
self.assertWritten(err, ["\n", "2 tests ran as expected, 1 didn't:\n", "\n"])
printer, err = self.get_printer()
printer._print_one_line_summary(3, 2, 0)
self.assertWritten(err, ['\n', "2 tests ran as expected (1 didn't run).\n", '\n'])
def test_test_status_line(self):
printer, _ = self.get_printer()
printer._meter.number_of_columns = lambda: 80
actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(80, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associa...after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: 89
actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(89, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-...ents-after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: sys.maxint
actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(90, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: 18
actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(18, len(actual))
self.assertEqual(actual, '[0/0] f...l passed')
printer._meter.number_of_columns = lambda: 10
actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(actual, '[0/0] associated-elements-after-index-assertion-fail1.html passed')
def test_details(self):
printer, err = self.get_printer(['--details'])
result = self.get_result('passes/image.html')
printer.print_started_test('passes/image.html')
printer.print_finished_test(result, expected=False, exp_str='', got_str='')
self.assertNotEmpty(err)
def test_print_found(self):
printer, err = self.get_printer()
printer.print_found(100, 10, 1, 1)
self.assertWritten(err, ["Found 100 tests; running 10, skipping 90.\n"])
self.reset(err)
printer.print_found(100, 10, 2, 3)
self.assertWritten(err, ["Found 100 tests; running 10 (6 times each: --repeat-each=2 --iterations=3), skipping 90.\n"])
|
bsd-3-clause
|
rodrigocaldas/trabalho_programacao_web
|
modules/requests/packages/chardet/big5freq.py
|
3133
|
82594
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
|
mit
|
KaranToor/MA450
|
google-cloud-sdk/.install/.backup/lib/surface/compute/instance_groups/unmanaged/describe.py
|
3
|
1636
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""instance-groups unmanaged describe command."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import instance_groups_utils
class Describe(base_classes.ZonalDescriber):
"""Describe an instance group."""
@staticmethod
def Args(parser):
base_classes.ZonalDescriber.Args(parser)
@property
def service(self):
return self.compute.instanceGroups
@property
def resource_type(self):
return 'instanceGroups'
def ComputeDynamicProperties(self, args, items):
return instance_groups_utils.ComputeInstanceGroupManagerMembership(
compute=self.compute,
project=self.project,
http=self.http,
batch_url=self.batch_url,
items=items,
filter_mode=instance_groups_utils.InstanceGroupFilteringMode.ALL_GROUPS)
detailed_help = {
'brief': 'Describe an instance group',
'DESCRIPTION': """\
*{command}* displays detailed information about a Google Compute
Engine instance group.
""",
}
|
apache-2.0
|
tedlaz/pyted
|
tests/pyappgen/pyappgen/qtreports.py
|
1
|
3002
|
# -*- coding: utf-8 -*-
'''
Created on 2014-01-24
@author: tedlaz
'''
from PyQt4 import QtGui, Qt
class rptDlg(QtGui.QDialog):
def __init__(self,html=u'Δοκιμή',title='Document1',parent=None):
super(rptDlg, self).__init__(parent)
self.setAttribute(Qt.Qt.WA_DeleteOnClose)
self.odtName = '%s.odt' % title
self.pdfName = '%s.pdf' % title
self.setWindowTitle(title)
self.editor = QtGui.QTextEdit(self)
self.editor.setFont(QtGui.QFont('Arial',12))
self.buttonPdf = QtGui.QPushButton(u'Εξαγωγή σε pdf', self)
self.buttonPdf.clicked.connect(self.saveAsPdf)
self.buttonOdt = QtGui.QPushButton(u'Εξαγωγή σε odt', self)
self.buttonOdt.clicked.connect(self.saveAsOdt)
self.buttonPreview = QtGui.QPushButton(u'Προεπισκόπιση', self)
self.buttonPreview.clicked.connect(self.handlePreview)
layout = QtGui.QGridLayout(self)
layout.addWidget(self.editor, 0, 0, 1, 3)
layout.addWidget(self.buttonPdf, 1, 0)
layout.addWidget(self.buttonOdt, 1, 1)
layout.addWidget(self.buttonPreview, 1, 2)
self.editor.setHtml(html)
def handlePrint(self):
dialog = QtGui.QPrintDialog()
if dialog.exec_() == QtGui.QDialog.Accepted:
self.editor.document().print_(dialog.printer())
def saveAsPdf(self):
fname = '%s' % QtGui.QFileDialog.getSaveFileName(self,
u"Αποθήκευση σε μορφή pdf",
self.pdfName,
"pdf (*.pdf)")
if fname:
printer = QtGui.QPrinter()
printer.setOutputFormat(QtGui.QPrinter.PdfFormat)
printer.setOutputFileName(fname)
self.editor.document().print_(printer)
def saveAsOdt(self):
fname = '%s' % QtGui.QFileDialog.getSaveFileName(self,
u"Αποθήκευση σε μορφή Libre Office (odt)",
self.odtName,
"Libre Office (*.odt)")
if fname:
doc = QtGui.QTextDocument()
cursor = QtGui.QTextCursor(doc)
cursor.insertHtml(self.editor.toHtml())
writer = QtGui.QTextDocumentWriter()
odf_format = writer.supportedDocumentFormats()[1]
writer.setFormat(odf_format)
writer.setFileName(fname)
writer.write(doc)
def handlePreview(self):
dialog = QtGui.QPrintPreviewDialog()
dialog.paintRequested.connect(self.editor.print_)
dialog.exec_()
if __name__ == "__main__":
import sys
import test_printHtml
app = QtGui.QApplication(sys.argv)
window = rptDlg(test_printHtml.toHtml(),test_printHtml.reportTitle)
window.resize(640, 480)
window.show()
sys.exit(app.exec_())
|
gpl-3.0
|
thresholdsoftware/asylum
|
openerp/addons/account_budget/wizard/__init__.py
|
444
|
1196
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_budget_crossovered_report
import account_budget_analytic
import account_budget_crossovered_summary_report
import account_budget_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
southpawtech/TACTIC-DEV
|
src/bin/js_compactor.py
|
7
|
3086
|
############################################################
#
# Copyright (c) 2009, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
import os
import tacticenv
from pyasm.common import Environment
import tactic.ui.app.js_includes as js_includes
# This function gathers all javascript files listed in js_includes (as defined by js_includes.all_lists)
# and puts them all in on single javascript file.
#
# The compacting process also removes block comments and lines that begin with the line comment marker ("//").
# Also removed are any leading spaces on any line written out to the single javascript file.
#
# The generated output file is placed into tactic/src/context/spt_js and the file is named with:
#
# _compact_spt_all_r<release_number>.js
#
# ... where <release_number> is the TACTIC install's release version number with dots changed to underscores.
#
def compact_javascript_files():
print " "
print "Processing javascript files to compact into single '_compact_spt_all.js' file ..."
print " "
context_path = "%s/src/context" % Environment.get_install_dir()
all_js_path = js_includes.get_compact_js_filepath()
out_fp = open( all_js_path, "w" )
for (dir, includes_list) in js_includes.all_lists:
for include in includes_list:
js_path = "%s/%s/%s" % (context_path, dir, include)
print " >> processing '%s' ..." % js_path
out_fp.write( "// %s\n\n" % js_path )
in_fp = open( js_path, "r" )
done = False
comment_flag = False
while not done:
line = in_fp.readline()
if not line:
done = True
continue
line = line.strip()
if line.startswith("//"):
continue
start_comment_idx = line.find( "/*" )
if start_comment_idx != -1:
comment_flag = True
tmp_line = line[ : start_comment_idx ].strip()
if tmp_line:
out_fp.write( "%s\n" % tmp_line )
if line.find( "*/" ) != -1:
comment_flag = False
tmp_line = line[ line.find("*/") + 2 : ].strip()
if tmp_line:
out_fp.write( "%s\n" % tmp_line )
continue
if comment_flag:
continue
line = line.strip()
if line:
out_fp.write( "%s\n" % line )
in_fp.close()
out_fp.write( "\n\n" )
out_fp.close()
print " "
print "Generated compact '%s' file." % all_js_path
print " "
print "DONE compacting javascript files into single file."
print " "
if __name__ == "__main__":
compact_javascript_files()
|
epl-1.0
|
libvirt/autotest
|
client/common_lib/magic.py
|
1
|
58623
|
#!/usr/bin/python
"""
Library used to determine a file MIME type by its magic number, it doesn't have
any external dependencies. Based on work of Jason Petrone (jp_py@jsnp.net),
adapted to autotest.
Command Line Usage: Running as 'python magic.py file_path' will print a
mime string (or just a description) of the file present on file_path.
API Usage:
magic.guess_type(file_path) - Returns a description of what the file on
path 'file' contains. This function name was chosen due to a similar
function on python standard library 'mimetypes'.
@license: GPL v2
@copyright: Jason Petrone (jp_py@jsnp.net) 2000
@copyright: Lucas Meneghel Rodrigues (lmr@redhat.com) 2010
@see: http://www.jsnp.net/code/magic.py
"""
import logging, optparse, os, re, sys, string, struct
import logging_config, logging_manager
def _str_to_num(n):
"""
Convert a hex or octal string to a decimal number.
@param n: Hex or octal string to be converted.
@return: Resulting decimal number.
"""
val = 0
col = long(1)
if n[:1] == 'x': n = '0' + n
if n[:2] == '0x':
# hex
n = string.lower(n[2:])
while len(n) > 0:
l = n[len(n) - 1]
val = val + string.hexdigits.index(l) * col
col = col * 16
n = n[:len(n)-1]
elif n[0] == '\\':
# octal
n = n[1:]
while len(n) > 0:
l = n[len(n) - 1]
if ord(l) < 48 or ord(l) > 57:
break
val = val + int(l) * col
col = col * 8
n = n[:len(n)-1]
else:
val = string.atol(n)
return val
class MagicLoggingConfig(logging_config.LoggingConfig):
def configure_logging(self, results_dir=None, verbose=False):
super(MagicLoggingConfig, self).configure_logging(use_console=True,
verbose=verbose)
class MagicTest(object):
"""
Compile a magic database entry so it can be compared with data read from
files.
"""
def __init__(self, offset, t, op, value, msg, mask=None):
"""
Reads magic database data. Maps the list fields into class attributes.
@param offset: Offset from start of the file.
@param t: Type of the magic data.
@param op: Operation to be performed when comparing the data.
@param value: Expected value of the magic data for a given data type.
@param msg: String representing the file mimetype.
"""
if t.count('&') > 0:
mask = _str_to_num(t[t.index('&')+1:])
t = t[:t.index('&')]
if type(offset) == type('a'):
self.offset = _str_to_num(offset)
else:
self.offset = offset
self.type = t
self.msg = msg
self.subTests = []
self.op = op
self.mask = mask
self.value = value
def test(self, data):
"""
Compare data read from file with self.value if operator is '='.
@param data: Data read from the file.
@return: None if no match between data and expected value string. Else,
print matching mime type information.
"""
if self.mask:
data = data & self.mask
if self.op == '=':
if self.value == data:
return self.msg
elif self.op == '<':
pass
elif self.op == '>':
pass
elif self.op == '&':
pass
elif self.op == '^':
pass
return None
def compare(self, data):
"""
Compare data read from the file with the expected data for this
particular mime type register.
@param data: Data read from the file.
"""
try:
if self.type == 'string':
c = ''; s = ''
for i in range(0, len(self.value)+1):
if i + self.offset > len(data) - 1: break
s = s + c
[c] = struct.unpack('c', data[self.offset + i])
data = s
elif self.type == 'short':
[data] = struct.unpack('h', data[self.offset:self.offset + 2])
elif self.type == 'leshort':
[data] = struct.unpack('<h', data[self.offset:self.offset + 2])
elif self.type == 'beshort':
[data] = struct.unpack('>H', data[self.offset:self.offset + 2])
elif self.type == 'long':
[data] = struct.unpack('l', data[self.offset:self.offset + 4])
elif self.type == 'lelong':
[data] = struct.unpack('<l', data[self.offset:self.offset + 4])
elif self.type == 'belong':
[data] = struct.unpack('>l', data[self.offset:self.offset + 4])
else:
pass
except Exception:
return None
return self.test(data)
magic_database = [
[0L, 'leshort', '=', 1538L, 'application/x-alan-adventure-game'],
[0L, 'string', '=', 'TADS', 'application/x-tads-game'],
[0L, 'short', '=', 420L, 'application/x-executable-file'],
[0L, 'short', '=', 421L, 'application/x-executable-file'],
[0L, 'leshort', '=', 603L, 'application/x-executable-file'],
[0L, 'string', '=', 'Core\001', 'application/x-executable-file'],
[0L, 'string', '=', 'AMANDA: TAPESTART DATE', 'application/x-amanda-header'],
[0L, 'belong', '=', 1011L, 'application/x-executable-file'],
[0L, 'belong', '=', 999L, 'application/x-library-file'],
[0L, 'belong', '=', 435L, 'video/mpeg'],
[0L, 'belong', '=', 442L, 'video/mpeg'],
[0L, 'beshort&0xfff0', '=', 65520L, 'audio/mpeg'],
[4L, 'leshort', '=', 44817L, 'video/fli'],
[4L, 'leshort', '=', 44818L, 'video/flc'],
[0L, 'string', '=', 'MOVI', 'video/x-sgi-movie'],
[4L, 'string', '=', 'moov', 'video/quicktime'],
[4L, 'string', '=', 'mdat', 'video/quicktime'],
[0L, 'long', '=', 100554L, 'application/x-apl-workspace'],
[0L, 'string', '=', 'FiLeStArTfIlEsTaRt', 'text/x-apple-binscii'],
[0L, 'string', '=', '\012GL', 'application/data'],
[0L, 'string', '=', 'v\377', 'application/data'],
[0L, 'string', '=', 'NuFile', 'application/data'],
[0L, 'string', '=', 'N\365F\351l\345', 'application/data'],
[0L, 'belong', '=', 333312L, 'application/data'],
[0L, 'belong', '=', 333319L, 'application/data'],
[257L, 'string', '=', 'ustar\000', 'application/x-tar'],
[257L, 'string', '=', 'ustar \000', 'application/x-gtar'],
[0L, 'short', '=', 70707L, 'application/x-cpio'],
[0L, 'short', '=', 143561L, 'application/x-bcpio'],
[0L, 'string', '=', '070707', 'application/x-cpio'],
[0L, 'string', '=', '070701', 'application/x-cpio'],
[0L, 'string', '=', '070702', 'application/x-cpio'],
[0L, 'string', '=', '!<arch>\012debian', 'application/x-dpkg'],
[0L, 'string', '=', '\xed\xab\xee\xdb', 'application/x-rpm'],
[0L, 'long', '=', 177555L, 'application/x-ar'],
[0L, 'short', '=', 177555L, 'application/data'],
[0L, 'long', '=', 177545L, 'application/data'],
[0L, 'short', '=', 177545L, 'application/data'],
[0L, 'long', '=', 100554L, 'application/x-apl-workspace'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'string', '=', '!<arch>\012__________E', 'application/x-ar'],
[0L, 'string', '=', '-h-', 'application/data'],
[0L, 'string', '=', '!<arch>', 'application/x-ar'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'belong', '=', 1711210496L, 'application/x-ar'],
[0L, 'belong', '=', 1013019198L, 'application/x-ar'],
[0L, 'long', '=', 557605234L, 'application/x-ar'],
[0L, 'lelong', '=', 177555L, 'application/data'],
[0L, 'leshort', '=', 177555L, 'application/data'],
[0L, 'lelong', '=', 177545L, 'application/data'],
[0L, 'leshort', '=', 177545L, 'application/data'],
[0L, 'lelong', '=', 236525L, 'application/data'],
[0L, 'lelong', '=', 236526L, 'application/data'],
[0L, 'lelong&0x8080ffff', '=', 2074L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 2330L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 538L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 794L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 1050L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 1562L, 'application/x-arc'],
[0L, 'string', '=', '\032archive', 'application/data'],
[0L, 'leshort', '=', 60000L, 'application/x-arj'],
[0L, 'string', '=', 'HPAK', 'application/data'],
[0L, 'string', '=', '\351,\001JAM application/data', ''],
[2L, 'string', '=', '-lh0-', 'application/x-lha'],
[2L, 'string', '=', '-lh1-', 'application/x-lha'],
[2L, 'string', '=', '-lz4-', 'application/x-lha'],
[2L, 'string', '=', '-lz5-', 'application/x-lha'],
[2L, 'string', '=', '-lzs-', 'application/x-lha'],
[2L, 'string', '=', '-lh -', 'application/x-lha'],
[2L, 'string', '=', '-lhd-', 'application/x-lha'],
[2L, 'string', '=', '-lh2-', 'application/x-lha'],
[2L, 'string', '=', '-lh3-', 'application/x-lha'],
[2L, 'string', '=', '-lh4-', 'application/x-lha'],
[2L, 'string', '=', '-lh5-', 'application/x-lha'],
[0L, 'string', '=', 'Rar!', 'application/x-rar'],
[0L, 'string', '=', 'SQSH', 'application/data'],
[0L, 'string', '=', 'UC2\032', 'application/data'],
[0L, 'string', '=', 'PK\003\004', 'application/zip'],
[20L, 'lelong', '=', 4257523676L, 'application/x-zoo'],
[10L, 'string', '=', '# This is a shell archive', 'application/x-shar'],
[0L, 'string', '=', '*STA', 'application/data'],
[0L, 'string', '=', '2278', 'application/data'],
[0L, 'beshort', '=', 560L, 'application/x-executable-file'],
[0L, 'beshort', '=', 561L, 'application/x-executable-file'],
[0L, 'string', '=', '\000\004\036\212\200', 'application/core'],
[0L, 'string', '=', '.snd', 'audio/basic'],
[0L, 'lelong', '=', 6583086L, 'audio/basic'],
[0L, 'string', '=', 'MThd', 'audio/midi'],
[0L, 'string', '=', 'CTMF', 'audio/x-cmf'],
[0L, 'string', '=', 'SBI', 'audio/x-sbi'],
[0L, 'string', '=', 'Creative Voice File', 'audio/x-voc'],
[0L, 'belong', '=', 1314148939L, 'audio/x-multitrack'],
[0L, 'string', '=', 'RIFF', 'audio/x-wav'],
[0L, 'string', '=', 'EMOD', 'audio/x-emod'],
[0L, 'belong', '=', 779248125L, 'audio/x-pn-realaudio'],
[0L, 'string', '=', 'MTM', 'audio/x-multitrack'],
[0L, 'string', '=', 'if', 'audio/x-669-mod'],
[0L, 'string', '=', 'FAR', 'audio/mod'],
[0L, 'string', '=', 'MAS_U', 'audio/x-multimate-mod'],
[44L, 'string', '=', 'SCRM', 'audio/x-st3-mod'],
[0L, 'string', '=', 'GF1PATCH110\000ID#000002\000', 'audio/x-gus-patch'],
[0L, 'string', '=', 'GF1PATCH100\000ID#000002\000', 'audio/x-gus-patch'],
[0L, 'string', '=', 'JN', 'audio/x-669-mod'],
[0L, 'string', '=', 'UN05', 'audio/x-mikmod-uni'],
[0L, 'string', '=', 'Extended Module:', 'audio/x-ft2-mod'],
[21L, 'string', '=', '!SCREAM!', 'audio/x-st2-mod'],
[1080L, 'string', '=', 'M.K.', 'audio/x-protracker-mod'],
[1080L, 'string', '=', 'M!K!', 'audio/x-protracker-mod'],
[1080L, 'string', '=', 'FLT4', 'audio/x-startracker-mod'],
[1080L, 'string', '=', '4CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', '6CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', '8CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', 'CD81', 'audio/x-oktalyzer-mod'],
[1080L, 'string', '=', 'OKTA', 'audio/x-oktalyzer-mod'],
[1080L, 'string', '=', '16CN', 'audio/x-taketracker-mod'],
[1080L, 'string', '=', '32CN', 'audio/x-taketracker-mod'],
[0L, 'string', '=', 'TOC', 'audio/x-toc'],
[0L, 'short', '=', 3401L, 'application/x-executable-file'],
[0L, 'long', '=', 406L, 'application/x-executable-file'],
[0L, 'short', '=', 406L, 'application/x-executable-file'],
[0L, 'short', '=', 3001L, 'application/x-executable-file'],
[0L, 'lelong', '=', 314L, 'application/x-executable-file'],
[0L, 'string', '=', '//', 'text/cpp'],
[0L, 'string', '=', '\\\\1cw\\', 'application/data'],
[0L, 'string', '=', '\\\\1cw', 'application/data'],
[0L, 'belong&0xffffff00', '=', 2231440384L, 'application/data'],
[0L, 'belong&0xffffff00', '=', 2231487232L, 'application/data'],
[0L, 'short', '=', 575L, 'application/x-executable-file'],
[0L, 'short', '=', 577L, 'application/x-executable-file'],
[4L, 'string', '=', 'pipe', 'application/data'],
[4L, 'string', '=', 'prof', 'application/data'],
[0L, 'string', '=', ': shell', 'application/data'],
[0L, 'string', '=', '#!/bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#!/bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#!/bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#! /bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#! /bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#!/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#!/usr/local/bin/ash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/ash', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/ash', 'application/x-zsh'],
[0L, 'string', '=', '#!/usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#! /usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#! /usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#!/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', 'BEGIN', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/bin/env python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/env python', 'application/x-python'],
[0L, 'string', '=', '#!/bin/rc', 'text/script'],
[0L, 'string', '=', '#! /bin/rc', 'text/script'],
[0L, 'string', '=', '#! /bin/rc', 'text/script'],
[0L, 'string', '=', '#!/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#!/usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /', 'text/script'],
[0L, 'string', '=', '#! /', 'text/script'],
[0L, 'string', '=', '#!/', 'text/script'],
[0L, 'string', '=', '#! text/script', ''],
[0L, 'string', '=', '\037\235', 'application/compress'],
[0L, 'string', '=', '\037\213', 'application/x-gzip'],
[0L, 'string', '=', '\037\036', 'application/data'],
[0L, 'short', '=', 17437L, 'application/data'],
[0L, 'short', '=', 8191L, 'application/data'],
[0L, 'string', '=', '\377\037', 'application/data'],
[0L, 'short', '=', 145405L, 'application/data'],
[0L, 'string', '=', 'BZh', 'application/x-bzip2'],
[0L, 'leshort', '=', 65398L, 'application/data'],
[0L, 'leshort', '=', 65142L, 'application/data'],
[0L, 'leshort', '=', 64886L, 'application/x-lzh'],
[0L, 'string', '=', '\037\237', 'application/data'],
[0L, 'string', '=', '\037\236', 'application/data'],
[0L, 'string', '=', '\037\240', 'application/data'],
[0L, 'string', '=', 'BZ', 'application/x-bzip'],
[0L, 'string', '=', '\211LZO\000\015\012\032\012', 'application/data'],
[0L, 'belong', '=', 507L, 'application/x-object-file'],
[0L, 'belong', '=', 513L, 'application/x-executable-file'],
[0L, 'belong', '=', 515L, 'application/x-executable-file'],
[0L, 'belong', '=', 517L, 'application/x-executable-file'],
[0L, 'belong', '=', 70231L, 'application/core'],
[24L, 'belong', '=', 60011L, 'application/data'],
[24L, 'belong', '=', 60012L, 'application/data'],
[24L, 'belong', '=', 60013L, 'application/data'],
[24L, 'belong', '=', 60014L, 'application/data'],
[0L, 'belong', '=', 601L, 'application/x-object-file'],
[0L, 'belong', '=', 607L, 'application/data'],
[0L, 'belong', '=', 324508366L, 'application/x-gdbm'],
[0L, 'lelong', '=', 324508366L, 'application/x-gdbm'],
[0L, 'string', '=', 'GDBM', 'application/x-gdbm'],
[0L, 'belong', '=', 398689L, 'application/x-db'],
[0L, 'belong', '=', 340322L, 'application/x-db'],
[0L, 'string', '=', '<list>\012<protocol bbn-m', 'application/data'],
[0L, 'string', '=', 'diff text/x-patch', ''],
[0L, 'string', '=', '*** text/x-patch', ''],
[0L, 'string', '=', 'Only in text/x-patch', ''],
[0L, 'string', '=', 'Common subdirectories: text/x-patch', ''],
[0L, 'string', '=', '!<arch>\012________64E', 'application/data'],
[0L, 'leshort', '=', 387L, 'application/x-executable-file'],
[0L, 'leshort', '=', 392L, 'application/x-executable-file'],
[0L, 'leshort', '=', 399L, 'application/x-object-file'],
[0L, 'string', '=', '\377\377\177', 'application/data'],
[0L, 'string', '=', '\377\377|', 'application/data'],
[0L, 'string', '=', '\377\377~', 'application/data'],
[0L, 'string', '=', '\033c\033', 'application/data'],
[0L, 'long', '=', 4553207L, 'image/x11'],
[0L, 'string', '=', '!<PDF>!\012', 'application/x-prof'],
[0L, 'short', '=', 1281L, 'application/x-locale'],
[24L, 'belong', '=', 60012L, 'application/x-dump'],
[24L, 'belong', '=', 60011L, 'application/x-dump'],
[24L, 'lelong', '=', 60012L, 'application/x-dump'],
[24L, 'lelong', '=', 60011L, 'application/x-dump'],
[0L, 'string', '=', '\177ELF', 'application/x-executable-file'],
[0L, 'short', '=', 340L, 'application/data'],
[0L, 'short', '=', 341L, 'application/x-executable-file'],
[1080L, 'leshort', '=', 61267L, 'application/x-linux-ext2fs'],
[0L, 'string', '=', '\366\366\366\366', 'application/x-pc-floppy'],
[774L, 'beshort', '=', 55998L, 'application/data'],
[510L, 'leshort', '=', 43605L, 'application/data'],
[1040L, 'leshort', '=', 4991L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 5007L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 9320L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 9336L, 'application/x-filesystem'],
[0L, 'string', '=', '-rom1fs-\000', 'application/x-filesystem'],
[395L, 'string', '=', 'OS/2', 'application/x-bootable'],
[0L, 'string', '=', 'FONT', 'font/x-vfont'],
[0L, 'short', '=', 436L, 'font/x-vfont'],
[0L, 'short', '=', 17001L, 'font/x-vfont'],
[0L, 'string', '=', '%!PS-AdobeFont-1.0', 'font/type1'],
[6L, 'string', '=', '%!PS-AdobeFont-1.0', 'font/type1'],
[0L, 'belong', '=', 4L, 'font/x-snf'],
[0L, 'lelong', '=', 4L, 'font/x-snf'],
[0L, 'string', '=', 'STARTFONT font/x-bdf', ''],
[0L, 'string', '=', '\001fcp', 'font/x-pcf'],
[0L, 'string', '=', 'D1.0\015', 'font/x-speedo'],
[0L, 'string', '=', 'flf', 'font/x-figlet'],
[0L, 'string', '=', 'flc', 'application/x-font'],
[0L, 'belong', '=', 335698201L, 'font/x-libgrx'],
[0L, 'belong', '=', 4282797902L, 'font/x-dos'],
[7L, 'belong', '=', 4540225L, 'font/x-dos'],
[7L, 'belong', '=', 5654852L, 'font/x-dos'],
[4098L, 'string', '=', 'DOSFONT', 'font/x-dos'],
[0L, 'string', '=', '<MakerFile', 'application/x-framemaker'],
[0L, 'string', '=', '<MIFFile', 'application/x-framemaker'],
[0L, 'string', '=', '<MakerDictionary', 'application/x-framemaker'],
[0L, 'string', '=', '<MakerScreenFont', 'font/x-framemaker'],
[0L, 'string', '=', '<MML', 'application/x-framemaker'],
[0L, 'string', '=', '<BookFile', 'application/x-framemaker'],
[0L, 'string', '=', '<Maker', 'application/x-framemaker'],
[0L, 'lelong&0377777777', '=', 41400407L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400410L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400413L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400314L, 'application/x-executable-file'],
[7L, 'string', '=', '\357\020\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000', 'application/core'],
[0L, 'lelong', '=', 11421044151L, 'application/data'],
[0L, 'string', '=', 'GIMP Gradient', 'application/x-gimp-gradient'],
[0L, 'string', '=', 'gimp xcf', 'application/x-gimp-image'],
[20L, 'string', '=', 'GPAT', 'application/x-gimp-pattern'],
[20L, 'string', '=', 'GIMP', 'application/x-gimp-brush'],
[0L, 'string', '=', '\336\022\004\225', 'application/x-locale'],
[0L, 'string', '=', '\225\004\022\336', 'application/x-locale'],
[0L, 'beshort', '=', 627L, 'application/x-executable-file'],
[0L, 'beshort', '=', 624L, 'application/x-executable-file'],
[0L, 'string', '=', '\000\001\000\000\000', 'font/ttf'],
[0L, 'long', '=', 1203604016L, 'application/data'],
[0L, 'long', '=', 1702407010L, 'application/data'],
[0L, 'long', '=', 1003405017L, 'application/data'],
[0L, 'long', '=', 1602007412L, 'application/data'],
[0L, 'belong', '=', 34603270L, 'application/x-object-file'],
[0L, 'belong', '=', 34603271L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603272L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603275L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603278L, 'application/x-library-file'],
[0L, 'belong', '=', 34603277L, 'application/x-library-file'],
[0L, 'belong', '=', 34865414L, 'application/x-object-file'],
[0L, 'belong', '=', 34865415L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865416L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865419L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865422L, 'application/x-library-file'],
[0L, 'belong', '=', 34865421L, 'application/x-object-file'],
[0L, 'belong', '=', 34275590L, 'application/x-object-file'],
[0L, 'belong', '=', 34275591L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275592L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275595L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275598L, 'application/x-library-file'],
[0L, 'belong', '=', 34275597L, 'application/x-library-file'],
[0L, 'belong', '=', 557605234L, 'application/x-ar'],
[0L, 'long', '=', 34078982L, 'application/x-executable-file'],
[0L, 'long', '=', 34078983L, 'application/x-executable-file'],
[0L, 'long', '=', 34078984L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341128L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341127L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341131L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341126L, 'application/x-executable-file'],
[0L, 'belong', '=', 34210056L, 'application/x-executable-file'],
[0L, 'belong', '=', 34210055L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341134L, 'application/x-library-file'],
[0L, 'belong', '=', 34341133L, 'application/x-library-file'],
[0L, 'long', '=', 65381L, 'application/x-library-file'],
[0L, 'long', '=', 34275173L, 'application/x-library-file'],
[0L, 'long', '=', 34406245L, 'application/x-library-file'],
[0L, 'long', '=', 34144101L, 'application/x-library-file'],
[0L, 'long', '=', 22552998L, 'application/core'],
[0L, 'long', '=', 1302851304L, 'font/x-hp-windows'],
[0L, 'string', '=', 'Bitmapfile', 'image/unknown'],
[0L, 'string', '=', 'IMGfile', 'CIS image/unknown'],
[0L, 'long', '=', 34341132L, 'application/x-lisp'],
[0L, 'string', '=', 'msgcat01', 'application/x-locale'],
[0L, 'string', '=', 'HPHP48-', 'HP48 binary'],
[0L, 'string', '=', '%%HP:', 'HP48 text'],
[0L, 'beshort', '=', 200L, 'hp200 (68010) BSD'],
[0L, 'beshort', '=', 300L, 'hp300 (68020+68881) BSD'],
[0L, 'beshort', '=', 537L, '370 XA sysV executable'],
[0L, 'beshort', '=', 532L, '370 XA sysV pure executable'],
[0L, 'beshort', '=', 54001L, '370 sysV pure executable'],
[0L, 'beshort', '=', 55001L, '370 XA sysV pure executable'],
[0L, 'beshort', '=', 56401L, '370 sysV executable'],
[0L, 'beshort', '=', 57401L, '370 XA sysV executable'],
[0L, 'beshort', '=', 531L, 'SVR2 executable (Amdahl-UTS)'],
[0L, 'beshort', '=', 534L, 'SVR2 pure executable (Amdahl-UTS)'],
[0L, 'beshort', '=', 530L, 'SVR2 pure executable (USS/370)'],
[0L, 'beshort', '=', 535L, 'SVR2 executable (USS/370)'],
[0L, 'beshort', '=', 479L, 'executable (RISC System/6000 V3.1) or obj module'],
[0L, 'beshort', '=', 260L, 'shared library'],
[0L, 'beshort', '=', 261L, 'ctab data'],
[0L, 'beshort', '=', 65028L, 'structured file'],
[0L, 'string', '=', '0xabcdef', 'AIX message catalog'],
[0L, 'belong', '=', 505L, 'AIX compiled message catalog'],
[0L, 'string', '=', '<aiaff>', 'archive'],
[0L, 'string', '=', 'FORM', 'IFF data'],
[0L, 'string', '=', 'P1', 'image/x-portable-bitmap'],
[0L, 'string', '=', 'P2', 'image/x-portable-graymap'],
[0L, 'string', '=', 'P3', 'image/x-portable-pixmap'],
[0L, 'string', '=', 'P4', 'image/x-portable-bitmap'],
[0L, 'string', '=', 'P5', 'image/x-portable-graymap'],
[0L, 'string', '=', 'P6', 'image/x-portable-pixmap'],
[0L, 'string', '=', 'IIN1', 'image/tiff'],
[0L, 'string', '=', 'MM\000*', 'image/tiff'],
[0L, 'string', '=', 'II*\000', 'image/tiff'],
[0L, 'string', '=', '\211PNG', 'image/x-png'],
[1L, 'string', '=', 'PNG', 'image/x-png'],
[0L, 'string', '=', 'GIF8', 'image/gif'],
[0L, 'string', '=', '\361\000@\273', 'image/x-cmu-raster'],
[0L, 'string', '=', 'id=ImageMagick', 'MIFF image data'],
[0L, 'long', '=', 1123028772L, 'Artisan image data'],
[0L, 'string', '=', '#FIG', 'FIG image text'],
[0L, 'string', '=', 'ARF_BEGARF', 'PHIGS clear text archive'],
[0L, 'string', '=', '@(#)SunPHIGS', 'SunPHIGS'],
[0L, 'string', '=', 'GKSM', 'GKS Metafile'],
[0L, 'string', '=', 'BEGMF', 'clear text Computer Graphics Metafile'],
[0L, 'beshort&0xffe0', '=', 32L, 'binary Computer Graphics Metafile'],
[0L, 'beshort', '=', 12320L, 'character Computer Graphics Metafile'],
[0L, 'string', '=', 'yz', 'MGR bitmap, modern format, 8-bit aligned'],
[0L, 'string', '=', 'zz', 'MGR bitmap, old format, 1-bit deep, 16-bit aligned'],
[0L, 'string', '=', 'xz', 'MGR bitmap, old format, 1-bit deep, 32-bit aligned'],
[0L, 'string', '=', 'yx', 'MGR bitmap, modern format, squeezed'],
[0L, 'string', '=', '%bitmap\000', 'FBM image data'],
[1L, 'string', '=', 'PC Research, Inc', 'group 3 fax data'],
[0L, 'beshort', '=', 65496L, 'image/jpeg'],
[0L, 'string', '=', 'hsi1', 'image/x-jpeg-proprietary'],
[0L, 'string', '=', 'BM', 'image/x-bmp'],
[0L, 'string', '=', 'IC', 'image/x-ico'],
[0L, 'string', '=', 'PI', 'PC pointer image data'],
[0L, 'string', '=', 'CI', 'PC color icon data'],
[0L, 'string', '=', 'CP', 'PC color pointer image data'],
[0L, 'string', '=', '/* XPM */', 'X pixmap image text'],
[0L, 'leshort', '=', 52306L, 'RLE image data,'],
[0L, 'string', '=', 'Imagefile version-', 'iff image data'],
[0L, 'belong', '=', 1504078485L, 'x/x-image-sun-raster'],
[0L, 'beshort', '=', 474L, 'x/x-image-sgi'],
[0L, 'string', '=', 'IT01', 'FIT image data'],
[0L, 'string', '=', 'IT02', 'FIT image data'],
[2048L, 'string', '=', 'PCD_IPI', 'x/x-photo-cd-pack-file'],
[0L, 'string', '=', 'PCD_OPA', 'x/x-photo-cd-overfiew-file'],
[0L, 'string', '=', 'SIMPLE =', 'FITS image data'],
[0L, 'string', '=', 'This is a BitMap file', 'Lisp Machine bit-array-file'],
[0L, 'string', '=', '!!', 'Bennet Yee\'s "face" format'],
[0L, 'beshort', '=', 4112L, 'PEX Binary Archive'],
[3000L, 'string', '=', 'Visio (TM) Drawing', '%s'],
[0L, 'leshort', '=', 502L, 'basic-16 executable'],
[0L, 'leshort', '=', 503L, 'basic-16 executable (TV)'],
[0L, 'leshort', '=', 510L, 'application/x-executable-file'],
[0L, 'leshort', '=', 511L, 'application/x-executable-file'],
[0L, 'leshort', '=', 512L, 'application/x-executable-file'],
[0L, 'leshort', '=', 522L, 'application/x-executable-file'],
[0L, 'leshort', '=', 514L, 'application/x-executable-file'],
[0L, 'string', '=', '\210OPS', 'Interleaf saved data'],
[0L, 'string', '=', '<!OPS', 'Interleaf document text'],
[4L, 'string', '=', 'pgscriptver', 'IslandWrite document'],
[13L, 'string', '=', 'DrawFile', 'IslandDraw document'],
[0L, 'leshort&0xFFFC', '=', 38400L, 'little endian ispell'],
[0L, 'beshort&0xFFFC', '=', 38400L, 'big endian ispell'],
[0L, 'belong', '=', 3405691582L, 'compiled Java class data,'],
[0L, 'beshort', '=', 44269L, 'Java serialization data'],
[0L, 'string', '=', 'KarmaRHD', 'Version Karma Data Structure Version'],
[0L, 'string', '=', 'lect', 'DEC SRC Virtual Paper Lectern file'],
[53L, 'string', '=', 'yyprevious', 'C program text (from lex)'],
[21L, 'string', '=', 'generated by flex', 'C program text (from flex)'],
[0L, 'string', '=', '%{', 'lex description text'],
[0L, 'short', '=', 32768L, 'lif file'],
[0L, 'lelong', '=', 6553863L, 'Linux/i386 impure executable (OMAGIC)'],
[0L, 'lelong', '=', 6553864L, 'Linux/i386 pure executable (NMAGIC)'],
[0L, 'lelong', '=', 6553867L, 'Linux/i386 demand-paged executable (ZMAGIC)'],
[0L, 'lelong', '=', 6553804L, 'Linux/i386 demand-paged executable (QMAGIC)'],
[0L, 'string', '=', '\007\001\000', 'Linux/i386 object file'],
[0L, 'string', '=', '\001\003\020\004', 'Linux-8086 impure executable'],
[0L, 'string', '=', '\001\003 \004', 'Linux-8086 executable'],
[0L, 'string', '=', '\243\206\001\000', 'Linux-8086 object file'],
[0L, 'string', '=', '\001\003\020\020', 'Minix-386 impure executable'],
[0L, 'string', '=', '\001\003 \020', 'Minix-386 executable'],
[0L, 'string', '=', '*nazgul*', 'Linux compiled message catalog'],
[216L, 'lelong', '=', 421L, 'Linux/i386 core file'],
[2L, 'string', '=', 'LILO', 'Linux/i386 LILO boot/chain loader'],
[0L, 'string', '=', '0.9', ''],
[0L, 'leshort', '=', 1078L, 'font/linux-psf'],
[4086L, 'string', '=', 'SWAP-SPACE', 'Linux/i386 swap file'],
[0L, 'leshort', '=', 387L, 'ECOFF alpha'],
[514L, 'string', '=', 'HdrS', 'Linux kernel'],
[0L, 'belong', '=', 3099592590L, 'Linux kernel'],
[0L, 'string', '=', 'Begin3', 'Linux Software Map entry text'],
[0L, 'string', '=', ';;', 'Lisp/Scheme program text'],
[0L, 'string', '=', '\012(', 'byte-compiled Emacs-Lisp program data'],
[0L, 'string', '=', ';ELC\023\000\000\000', 'byte-compiled Emacs-Lisp program data'],
[0L, 'string', '=', "(SYSTEM::VERSION '", 'CLISP byte-compiled Lisp program text'],
[0L, 'long', '=', 1886817234L, 'CLISP memory image data'],
[0L, 'long', '=', 3532355184L, 'CLISP memory image data, other endian'],
[0L, 'long', '=', 3725722773L, 'GNU-format message catalog data'],
[0L, 'long', '=', 2500072158L, 'GNU-format message catalog data'],
[0L, 'belong', '=', 3405691582L, 'mach-o fat file'],
[0L, 'belong', '=', 4277009102L, 'mach-o'],
[11L, 'string', '=', 'must be converted with BinHex', 'BinHex binary text'],
[0L, 'string', '=', 'SIT!', 'StuffIt Archive (data)'],
[65L, 'string', '=', 'SIT!', 'StuffIt Archive (rsrc + data)'],
[0L, 'string', '=', 'SITD', 'StuffIt Deluxe (data)'],
[65L, 'string', '=', 'SITD', 'StuffIt Deluxe (rsrc + data)'],
[0L, 'string', '=', 'Seg', 'StuffIt Deluxe Segment (data)'],
[65L, 'string', '=', 'Seg', 'StuffIt Deluxe Segment (rsrc + data)'],
[0L, 'string', '=', 'APPL', 'Macintosh Application (data)'],
[65L, 'string', '=', 'APPL', 'Macintosh Application (rsrc + data)'],
[0L, 'string', '=', 'zsys', 'Macintosh System File (data)'],
[65L, 'string', '=', 'zsys', 'Macintosh System File(rsrc + data)'],
[0L, 'string', '=', 'FNDR', 'Macintosh Finder (data)'],
[65L, 'string', '=', 'FNDR', 'Macintosh Finder(rsrc + data)'],
[0L, 'string', '=', 'libr', 'Macintosh Library (data)'],
[65L, 'string', '=', 'libr', 'Macintosh Library(rsrc + data)'],
[0L, 'string', '=', 'shlb', 'Macintosh Shared Library (data)'],
[65L, 'string', '=', 'shlb', 'Macintosh Shared Library(rsrc + data)'],
[0L, 'string', '=', 'cdev', 'Macintosh Control Panel (data)'],
[65L, 'string', '=', 'cdev', 'Macintosh Control Panel(rsrc + data)'],
[0L, 'string', '=', 'INIT', 'Macintosh Extension (data)'],
[65L, 'string', '=', 'INIT', 'Macintosh Extension(rsrc + data)'],
[0L, 'string', '=', 'FFIL', 'font/ttf'],
[65L, 'string', '=', 'FFIL', 'font/ttf'],
[0L, 'string', '=', 'LWFN', 'font/type1'],
[65L, 'string', '=', 'LWFN', 'font/type1'],
[0L, 'string', '=', 'PACT', 'Macintosh Compact Pro Archive (data)'],
[65L, 'string', '=', 'PACT', 'Macintosh Compact Pro Archive(rsrc + data)'],
[0L, 'string', '=', 'ttro', 'Macintosh TeachText File (data)'],
[65L, 'string', '=', 'ttro', 'Macintosh TeachText File(rsrc + data)'],
[0L, 'string', '=', 'TEXT', 'Macintosh TeachText File (data)'],
[65L, 'string', '=', 'TEXT', 'Macintosh TeachText File(rsrc + data)'],
[0L, 'string', '=', 'PDF', 'Macintosh PDF File (data)'],
[65L, 'string', '=', 'PDF', 'Macintosh PDF File(rsrc + data)'],
[0L, 'string', '=', '# Magic', 'magic text file for file(1) cmd'],
[0L, 'string', '=', 'Relay-Version:', 'old news text'],
[0L, 'string', '=', '#! rnews', 'batched news text'],
[0L, 'string', '=', 'N#! rnews', 'mailed, batched news text'],
[0L, 'string', '=', 'Forward to', 'mail forwarding text'],
[0L, 'string', '=', 'Pipe to', 'mail piping text'],
[0L, 'string', '=', 'Return-Path:', 'message/rfc822'],
[0L, 'string', '=', 'Path:', 'message/news'],
[0L, 'string', '=', 'Xref:', 'message/news'],
[0L, 'string', '=', 'From:', 'message/rfc822'],
[0L, 'string', '=', 'Article', 'message/news'],
[0L, 'string', '=', 'BABYL', 'message/x-gnu-rmail'],
[0L, 'string', '=', 'Received:', 'message/rfc822'],
[0L, 'string', '=', 'MIME-Version:', 'MIME entity text'],
[0L, 'string', '=', 'Content-Type: ', ''],
[0L, 'string', '=', 'Content-Type:', ''],
[0L, 'long', '=', 31415L, 'Mirage Assembler m.out executable'],
[0L, 'string', '=', '\311\304', 'ID tags data'],
[0L, 'string', '=', '\001\001\001\001', 'MMDF mailbox'],
[4L, 'string', '=', 'Research,', 'Digifax-G3-File'],
[0L, 'short', '=', 256L, 'raw G3 data, byte-padded'],
[0L, 'short', '=', 5120L, 'raw G3 data'],
[0L, 'string', '=', 'RMD1', 'raw modem data'],
[0L, 'string', '=', 'PVF1\012', 'portable voice format'],
[0L, 'string', '=', 'PVF2\012', 'portable voice format'],
[0L, 'beshort', '=', 520L, 'mc68k COFF'],
[0L, 'beshort', '=', 521L, 'mc68k executable (shared)'],
[0L, 'beshort', '=', 522L, 'mc68k executable (shared demand paged)'],
[0L, 'beshort', '=', 554L, '68K BCS executable'],
[0L, 'beshort', '=', 555L, '88K BCS executable'],
[0L, 'string', '=', 'S0', 'Motorola S-Record; binary data in text format'],
[0L, 'string', '=', '@echo off', 'MS-DOS batch file text'],
[128L, 'string', '=', 'PE\000\000', 'MS Windows PE'],
[0L, 'leshort', '=', 332L, 'MS Windows COFF Intel 80386 object file'],
[0L, 'leshort', '=', 358L, 'MS Windows COFF MIPS R4000 object file'],
[0L, 'leshort', '=', 388L, 'MS Windows COFF Alpha object file'],
[0L, 'leshort', '=', 616L, 'MS Windows COFF Motorola 68000 object file'],
[0L, 'leshort', '=', 496L, 'MS Windows COFF PowerPC object file'],
[0L, 'leshort', '=', 656L, 'MS Windows COFF PA-RISC object file'],
[0L, 'string', '=', 'MZ', 'application/x-ms-dos-executable'],
[0L, 'string', '=', 'LZ', 'MS-DOS executable (built-in)'],
[0L, 'string', '=', 'regf', 'Windows NT Registry file'],
[2080L, 'string', '=', 'Microsoft Word 6.0 Document', 'text/vnd.ms-word'],
[2080L, 'string', '=', 'Documento Microsoft Word 6', 'text/vnd.ms-word'],
[2112L, 'string', '=', 'MSWordDoc', 'text/vnd.ms-word'],
[0L, 'belong', '=', 834535424L, 'text/vnd.ms-word'],
[0L, 'string', '=', 'PO^Q`', 'text/vnd.ms-word'],
[2080L, 'string', '=', 'Microsoft Excel 5.0 Worksheet', 'application/vnd.ms-excel'],
[2114L, 'string', '=', 'Biff5', 'application/vnd.ms-excel'],
[0L, 'belong', '=', 6656L, 'Lotus 1-2-3'],
[0L, 'belong', '=', 512L, 'Lotus 1-2-3'],
[1L, 'string', '=', 'WPC', 'text/vnd.wordperfect'],
[0L, 'beshort', '=', 610L, 'Tower/XP rel 2 object'],
[0L, 'beshort', '=', 615L, 'Tower/XP rel 2 object'],
[0L, 'beshort', '=', 620L, 'Tower/XP rel 3 object'],
[0L, 'beshort', '=', 625L, 'Tower/XP rel 3 object'],
[0L, 'beshort', '=', 630L, 'Tower32/600/400 68020 object'],
[0L, 'beshort', '=', 640L, 'Tower32/800 68020'],
[0L, 'beshort', '=', 645L, 'Tower32/800 68010'],
[0L, 'lelong', '=', 407L, 'NetBSD little-endian object file'],
[0L, 'belong', '=', 407L, 'NetBSD big-endian object file'],
[0L, 'belong&0377777777', '=', 41400413L, 'NetBSD/i386 demand paged'],
[0L, 'belong&0377777777', '=', 41400410L, 'NetBSD/i386 pure'],
[0L, 'belong&0377777777', '=', 41400407L, 'NetBSD/i386'],
[0L, 'belong&0377777777', '=', 41400507L, 'NetBSD/i386 core'],
[0L, 'belong&0377777777', '=', 41600413L, 'NetBSD/m68k demand paged'],
[0L, 'belong&0377777777', '=', 41600410L, 'NetBSD/m68k pure'],
[0L, 'belong&0377777777', '=', 41600407L, 'NetBSD/m68k'],
[0L, 'belong&0377777777', '=', 41600507L, 'NetBSD/m68k core'],
[0L, 'belong&0377777777', '=', 42000413L, 'NetBSD/m68k4k demand paged'],
[0L, 'belong&0377777777', '=', 42000410L, 'NetBSD/m68k4k pure'],
[0L, 'belong&0377777777', '=', 42000407L, 'NetBSD/m68k4k'],
[0L, 'belong&0377777777', '=', 42000507L, 'NetBSD/m68k4k core'],
[0L, 'belong&0377777777', '=', 42200413L, 'NetBSD/ns32532 demand paged'],
[0L, 'belong&0377777777', '=', 42200410L, 'NetBSD/ns32532 pure'],
[0L, 'belong&0377777777', '=', 42200407L, 'NetBSD/ns32532'],
[0L, 'belong&0377777777', '=', 42200507L, 'NetBSD/ns32532 core'],
[0L, 'belong&0377777777', '=', 42400413L, 'NetBSD/sparc demand paged'],
[0L, 'belong&0377777777', '=', 42400410L, 'NetBSD/sparc pure'],
[0L, 'belong&0377777777', '=', 42400407L, 'NetBSD/sparc'],
[0L, 'belong&0377777777', '=', 42400507L, 'NetBSD/sparc core'],
[0L, 'belong&0377777777', '=', 42600413L, 'NetBSD/pmax demand paged'],
[0L, 'belong&0377777777', '=', 42600410L, 'NetBSD/pmax pure'],
[0L, 'belong&0377777777', '=', 42600407L, 'NetBSD/pmax'],
[0L, 'belong&0377777777', '=', 42600507L, 'NetBSD/pmax core'],
[0L, 'belong&0377777777', '=', 43000413L, 'NetBSD/vax demand paged'],
[0L, 'belong&0377777777', '=', 43000410L, 'NetBSD/vax pure'],
[0L, 'belong&0377777777', '=', 43000407L, 'NetBSD/vax'],
[0L, 'belong&0377777777', '=', 43000507L, 'NetBSD/vax core'],
[0L, 'lelong', '=', 459141L, 'ECOFF NetBSD/alpha binary'],
[0L, 'belong&0377777777', '=', 43200507L, 'NetBSD/alpha core'],
[0L, 'belong&0377777777', '=', 43400413L, 'NetBSD/mips demand paged'],
[0L, 'belong&0377777777', '=', 43400410L, 'NetBSD/mips pure'],
[0L, 'belong&0377777777', '=', 43400407L, 'NetBSD/mips'],
[0L, 'belong&0377777777', '=', 43400507L, 'NetBSD/mips core'],
[0L, 'belong&0377777777', '=', 43600413L, 'NetBSD/arm32 demand paged'],
[0L, 'belong&0377777777', '=', 43600410L, 'NetBSD/arm32 pure'],
[0L, 'belong&0377777777', '=', 43600407L, 'NetBSD/arm32'],
[0L, 'belong&0377777777', '=', 43600507L, 'NetBSD/arm32 core'],
[0L, 'string', '=', 'StartFontMetrics', 'font/x-sunos-news'],
[0L, 'string', '=', 'StartFont', 'font/x-sunos-news'],
[0L, 'belong', '=', 326773060L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773063L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773072L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773073L, 'font/x-sunos-news'],
[8L, 'belong', '=', 326773573L, 'font/x-sunos-news'],
[8L, 'belong', '=', 326773576L, 'font/x-sunos-news'],
[0L, 'string', '=', 'Octave-1-L', 'Octave binary data (little endian)'],
[0L, 'string', '=', 'Octave-1-B', 'Octave binary data (big endian)'],
[0L, 'string', '=', '\177OLF', 'OLF'],
[0L, 'beshort', '=', 34765L, 'OS9/6809 module:'],
[0L, 'beshort', '=', 19196L, 'OS9/68K module:'],
[0L, 'long', '=', 61374L, 'OSF/Rose object'],
[0L, 'short', '=', 565L, 'i386 COFF object'],
[0L, 'short', '=', 10775L, '"compact bitmap" format (Poskanzer)'],
[0L, 'string', '=', '%PDF-', 'PDF document'],
[0L, 'lelong', '=', 101555L, 'PDP-11 single precision APL workspace'],
[0L, 'lelong', '=', 101554L, 'PDP-11 double precision APL workspace'],
[0L, 'leshort', '=', 407L, 'PDP-11 executable'],
[0L, 'leshort', '=', 401L, 'PDP-11 UNIX/RT ldp'],
[0L, 'leshort', '=', 405L, 'PDP-11 old overlay'],
[0L, 'leshort', '=', 410L, 'PDP-11 pure executable'],
[0L, 'leshort', '=', 411L, 'PDP-11 separate I&D executable'],
[0L, 'leshort', '=', 437L, 'PDP-11 kernel overlay'],
[0L, 'beshort', '=', 39168L, 'PGP key public ring'],
[0L, 'beshort', '=', 38145L, 'PGP key security ring'],
[0L, 'beshort', '=', 38144L, 'PGP key security ring'],
[0L, 'beshort', '=', 42496L, 'PGP encrypted data'],
[0L, 'string', '=', '-----BEGIN PGP', 'PGP armored data'],
[0L, 'string', '=', '# PaCkAgE DaTaStReAm', 'pkg Datastream (SVR4)'],
[0L, 'short', '=', 601L, 'mumps avl global'],
[0L, 'short', '=', 602L, 'mumps blt global'],
[0L, 'string', '=', '%!', 'application/postscript'],
[0L, 'string', '=', '\004%!', 'application/postscript'],
[0L, 'belong', '=', 3318797254L, 'DOS EPS Binary File'],
[0L, 'string', '=', '*PPD-Adobe:', 'PPD file'],
[0L, 'string', '=', '\033%-12345X@PJL', 'HP Printer Job Language data'],
[0L, 'string', '=', '\033%-12345X@PJL', 'HP Printer Job Language data'],
[0L, 'string', '=', '\033E\033', 'image/x-pcl-hp'],
[0L, 'string', '=', '@document(', 'Imagen printer'],
[0L, 'string', '=', 'Rast', 'RST-format raster font data'],
[0L, 'belong&0xff00ffff', '=', 1442840576L, 'ps database'],
[0L, 'long', '=', 1351614727L, 'Pyramid 90x family executable'],
[0L, 'long', '=', 1351614728L, 'Pyramid 90x family pure executable'],
[0L, 'long', '=', 1351614731L, 'Pyramid 90x family demand paged pure executable'],
[0L, 'beshort', '=', 60843L, ''],
[0L, 'string', '=', '{\\\\rtf', 'Rich Text Format data,'],
[38L, 'string', '=', 'Spreadsheet', 'sc spreadsheet file'],
[8L, 'string', '=', '\001s SCCS', 'archive data'],
[0L, 'byte', '=', 46L, 'Sendmail frozen configuration'],
[0L, 'short', '=', 10012L, 'Sendmail frozen configuration'],
[0L, 'lelong', '=', 234L, 'BALANCE NS32000 .o'],
[0L, 'lelong', '=', 4330L, 'BALANCE NS32000 executable (0 @ 0)'],
[0L, 'lelong', '=', 8426L, 'BALANCE NS32000 executable (invalid @ 0)'],
[0L, 'lelong', '=', 12522L, 'BALANCE NS32000 standalone executable'],
[0L, 'leshort', '=', 4843L, 'SYMMETRY i386 .o'],
[0L, 'leshort', '=', 8939L, 'SYMMETRY i386 executable (0 @ 0)'],
[0L, 'leshort', '=', 13035L, 'SYMMETRY i386 executable (invalid @ 0)'],
[0L, 'leshort', '=', 17131L, 'SYMMETRY i386 standalone executable'],
[0L, 'string', '=', 'kbd!map', 'kbd map file'],
[0L, 'belong', '=', 407L, 'old SGI 68020 executable'],
[0L, 'belong', '=', 410L, 'old SGI 68020 pure executable'],
[0L, 'beshort', '=', 34661L, 'disk quotas file'],
[0L, 'beshort', '=', 1286L, 'IRIS Showcase file'],
[0L, 'beshort', '=', 550L, 'IRIS Showcase template'],
[0L, 'belong', '=', 1396917837L, 'IRIS Showcase file'],
[0L, 'belong', '=', 1413695053L, 'IRIS Showcase template'],
[0L, 'belong', '=', 3735927486L, 'IRIX Parallel Arena'],
[0L, 'beshort', '=', 352L, 'MIPSEB COFF executable'],
[0L, 'beshort', '=', 354L, 'MIPSEL COFF executable'],
[0L, 'beshort', '=', 24577L, 'MIPSEB-LE COFF executable'],
[0L, 'beshort', '=', 25089L, 'MIPSEL-LE COFF executable'],
[0L, 'beshort', '=', 355L, 'MIPSEB MIPS-II COFF executable'],
[0L, 'beshort', '=', 358L, 'MIPSEL MIPS-II COFF executable'],
[0L, 'beshort', '=', 25345L, 'MIPSEB-LE MIPS-II COFF executable'],
[0L, 'beshort', '=', 26113L, 'MIPSEL-LE MIPS-II COFF executable'],
[0L, 'beshort', '=', 320L, 'MIPSEB MIPS-III COFF executable'],
[0L, 'beshort', '=', 322L, 'MIPSEL MIPS-III COFF executable'],
[0L, 'beshort', '=', 16385L, 'MIPSEB-LE MIPS-III COFF executable'],
[0L, 'beshort', '=', 16897L, 'MIPSEL-LE MIPS-III COFF executable'],
[0L, 'beshort', '=', 384L, 'MIPSEB Ucode'],
[0L, 'beshort', '=', 386L, 'MIPSEL Ucode'],
[0L, 'belong', '=', 3735924144L, 'IRIX core dump'],
[0L, 'belong', '=', 3735924032L, 'IRIX 64-bit core dump'],
[0L, 'belong', '=', 3133063355L, 'IRIX N32 core dump'],
[0L, 'string', '=', 'CrshDump', 'IRIX vmcore dump of'],
[0L, 'string', '=', 'SGIAUDIT', 'SGI Audit file'],
[0L, 'string', '=', 'WNGZWZSC', 'Wingz compiled script'],
[0L, 'string', '=', 'WNGZWZSS', 'Wingz spreadsheet'],
[0L, 'string', '=', 'WNGZWZHP', 'Wingz help file'],
[0L, 'string', '=', '\\#Inventor', 'V IRIS Inventor 1.0 file'],
[0L, 'string', '=', '\\#Inventor', 'V2 Open Inventor 2.0 file'],
[0L, 'string', '=', 'glfHeadMagic();', 'GLF_TEXT'],
[4L, 'belong', '=', 1090584576L, 'GLF_BINARY_LSB_FIRST'],
[4L, 'belong', '=', 321L, 'GLF_BINARY_MSB_FIRST'],
[0L, 'string', '=', '<!DOCTYPE HTML', 'text/html'],
[0L, 'string', '=', '<!doctype html', 'text/html'],
[0L, 'string', '=', '<HEAD', 'text/html'],
[0L, 'string', '=', '<head', 'text/html'],
[0L, 'string', '=', '<TITLE', 'text/html'],
[0L, 'string', '=', '<title', 'text/html'],
[0L, 'string', '=', '<html', 'text/html'],
[0L, 'string', '=', '<HTML', 'text/html'],
[0L, 'string', '=', '<?xml', 'application/xml'],
[0L, 'string', '=', '<!DOCTYPE', 'exported SGML document text'],
[0L, 'string', '=', '<!doctype', 'exported SGML document text'],
[0L, 'string', '=', '<!SUBDOC', 'exported SGML subdocument text'],
[0L, 'string', '=', '<!subdoc', 'exported SGML subdocument text'],
[0L, 'string', '=', '<!--', 'exported SGML document text'],
[0L, 'string', '=', 'RTSS', 'NetMon capture file'],
[0L, 'string', '=', 'TRSNIFF data \032', 'Sniffer capture file'],
[0L, 'string', '=', 'XCP\000', 'NetXRay capture file'],
[0L, 'ubelong', '=', 2712847316L, 'tcpdump capture file (big-endian)'],
[0L, 'ulelong', '=', 2712847316L, 'tcpdump capture file (little-endian)'],
[0L, 'string', '=', '<!SQ DTD>', 'Compiled SGML rules file'],
[0L, 'string', '=', '<!SQ A/E>', 'A/E SGML Document binary'],
[0L, 'string', '=', '<!SQ STS>', 'A/E SGML binary styles file'],
[0L, 'short', '=', 49374L, 'Compiled PSI (v1) data'],
[0L, 'short', '=', 49370L, 'Compiled PSI (v2) data'],
[0L, 'short', '=', 125252L, 'SoftQuad DESC or font file binary'],
[0L, 'string', '=', 'SQ BITMAP1', 'SoftQuad Raster Format text'],
[0L, 'string', '=', 'X SoftQuad', 'troff Context intermediate'],
[0L, 'belong&077777777', '=', 600413L, 'sparc demand paged'],
[0L, 'belong&077777777', '=', 600410L, 'sparc pure'],
[0L, 'belong&077777777', '=', 600407L, 'sparc'],
[0L, 'belong&077777777', '=', 400413L, 'mc68020 demand paged'],
[0L, 'belong&077777777', '=', 400410L, 'mc68020 pure'],
[0L, 'belong&077777777', '=', 400407L, 'mc68020'],
[0L, 'belong&077777777', '=', 200413L, 'mc68010 demand paged'],
[0L, 'belong&077777777', '=', 200410L, 'mc68010 pure'],
[0L, 'belong&077777777', '=', 200407L, 'mc68010'],
[0L, 'belong', '=', 407L, 'old sun-2 executable'],
[0L, 'belong', '=', 410L, 'old sun-2 pure executable'],
[0L, 'belong', '=', 413L, 'old sun-2 demand paged executable'],
[0L, 'belong', '=', 525398L, 'SunOS core file'],
[0L, 'long', '=', 4197695630L, 'SunPC 4.0 Hard Disk'],
[0L, 'string', '=', '#SUNPC_CONFIG', 'SunPC 4.0 Properties Values'],
[0L, 'string', '=', 'snoop', 'Snoop capture file'],
[36L, 'string', '=', 'acsp', 'Kodak Color Management System, ICC Profile'],
[0L, 'string', '=', '#!teapot\012xdr', 'teapot work sheet (XDR format)'],
[0L, 'string', '=', '\032\001', 'Compiled terminfo entry'],
[0L, 'short', '=', 433L, 'Curses screen image'],
[0L, 'short', '=', 434L, 'Curses screen image'],
[0L, 'string', '=', '\367\002', 'TeX DVI file'],
[0L, 'string', '=', '\367\203', 'font/x-tex'],
[0L, 'string', '=', '\367Y', 'font/x-tex'],
[0L, 'string', '=', '\367\312', 'font/x-tex'],
[0L, 'string', '=', 'This is TeX,', 'TeX transcript text'],
[0L, 'string', '=', 'This is METAFONT,', 'METAFONT transcript text'],
[2L, 'string', '=', '\000\021', 'font/x-tex-tfm'],
[2L, 'string', '=', '\000\022', 'font/x-tex-tfm'],
[0L, 'string', '=', '\\\\input\\', 'texinfo Texinfo source text'],
[0L, 'string', '=', 'This is Info file', 'GNU Info text'],
[0L, 'string', '=', '\\\\input', 'TeX document text'],
[0L, 'string', '=', '\\\\section', 'LaTeX document text'],
[0L, 'string', '=', '\\\\setlength', 'LaTeX document text'],
[0L, 'string', '=', '\\\\documentstyle', 'LaTeX document text'],
[0L, 'string', '=', '\\\\chapter', 'LaTeX document text'],
[0L, 'string', '=', '\\\\documentclass', 'LaTeX 2e document text'],
[0L, 'string', '=', '\\\\relax', 'LaTeX auxiliary file'],
[0L, 'string', '=', '\\\\contentsline', 'LaTeX table of contents'],
[0L, 'string', '=', '\\\\indexentry', 'LaTeX raw index file'],
[0L, 'string', '=', '\\\\begin{theindex}', 'LaTeX sorted index'],
[0L, 'string', '=', '\\\\glossaryentry', 'LaTeX raw glossary'],
[0L, 'string', '=', '\\\\begin{theglossary}', 'LaTeX sorted glossary'],
[0L, 'string', '=', 'This is makeindex', 'Makeindex log file'],
[0L, 'string', '=', '**TI82**', 'TI-82 Graphing Calculator'],
[0L, 'string', '=', '**TI83**', 'TI-83 Graphing Calculator'],
[0L, 'string', '=', '**TI85**', 'TI-85 Graphing Calculator'],
[0L, 'string', '=', '**TI92**', 'TI-92 Graphing Calculator'],
[0L, 'string', '=', '**TI80**', 'TI-80 Graphing Calculator File.'],
[0L, 'string', '=', '**TI81**', 'TI-81 Graphing Calculator File.'],
[0L, 'string', '=', 'TZif', 'timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\001\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\002\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\003\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\004\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\005\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\006\000', 'old timezone data'],
[0L, 'string', '=', '.\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\'\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\'.\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', 'x T', 'ditroff text'],
[0L, 'string', '=', '@\357', 'very old (C/A/T) troff output data'],
[0L, 'string', '=', 'Interpress/Xerox', 'Xerox InterPress data'],
[0L, 'short', '=', 263L, 'unknown machine executable'],
[0L, 'short', '=', 264L, 'unknown pure executable'],
[0L, 'short', '=', 265L, 'PDP-11 separate I&D'],
[0L, 'short', '=', 267L, 'unknown pure executable'],
[0L, 'long', '=', 268L, 'unknown demand paged pure executable'],
[0L, 'long', '=', 269L, 'unknown demand paged pure executable'],
[0L, 'long', '=', 270L, 'unknown readable demand paged pure executable'],
[0L, 'string', '=', 'begin uuencoded', 'or xxencoded text'],
[0L, 'string', '=', 'xbtoa Begin', "btoa'd text"],
[0L, 'string', '=', '$\012ship', "ship'd binary text"],
[0L, 'string', '=', 'Decode the following with bdeco', 'bencoded News text'],
[11L, 'string', '=', 'must be converted with BinHex', 'BinHex binary text'],
[0L, 'short', '=', 610L, 'Perkin-Elmer executable'],
[0L, 'beshort', '=', 572L, 'amd 29k coff noprebar executable'],
[0L, 'beshort', '=', 1572L, 'amd 29k coff prebar executable'],
[0L, 'beshort', '=', 160007L, 'amd 29k coff archive'],
[6L, 'beshort', '=', 407L, 'unicos (cray) executable'],
[596L, 'string', '=', 'X\337\377\377', 'Ultrix core file'],
[0L, 'string', '=', 'Joy!peffpwpc', 'header for PowerPC PEF executable'],
[0L, 'lelong', '=', 101557L, 'VAX single precision APL workspace'],
[0L, 'lelong', '=', 101556L, 'VAX double precision APL workspace'],
[0L, 'lelong', '=', 407L, 'VAX executable'],
[0L, 'lelong', '=', 410L, 'VAX pure executable'],
[0L, 'lelong', '=', 413L, 'VAX demand paged pure executable'],
[0L, 'leshort', '=', 570L, 'VAX COFF executable'],
[0L, 'leshort', '=', 575L, 'VAX COFF pure executable'],
[0L, 'string', '=', 'LBLSIZE=', 'VICAR image data'],
[43L, 'string', '=', 'SFDU_LABEL', 'VICAR label file'],
[0L, 'short', '=', 21845L, 'VISX image file'],
[0L, 'string', '=', '\260\0000\000', 'VMS VAX executable'],
[0L, 'belong', '=', 50331648L, 'VMS Alpha executable'],
[1L, 'string', '=', 'WPC', '(Corel/WP)'],
[0L, 'string', '=', 'core', 'core file (Xenix)'],
[0L, 'byte', '=', 128L, '8086 relocatable (Microsoft)'],
[0L, 'leshort', '=', 65381L, 'x.out'],
[0L, 'leshort', '=', 518L, 'Microsoft a.out'],
[0L, 'leshort', '=', 320L, 'old Microsoft 8086 x.out'],
[0L, 'lelong', '=', 518L, 'b.out'],
[0L, 'leshort', '=', 1408L, 'XENIX 8086 relocatable or 80286 small model'],
[0L, 'long', '=', 59399L, 'object file (z8000 a.out)'],
[0L, 'long', '=', 59400L, 'pure object file (z8000 a.out)'],
[0L, 'long', '=', 59401L, 'separate object file (z8000 a.out)'],
[0L, 'long', '=', 59397L, 'overlay object file (z8000 a.out)'],
[0L, 'string', '=', 'ZyXEL\002', 'ZyXEL voice data'],
]
magic_tests = []
for record in magic_database:
magic_tests.append(MagicTest(record[0], record[1], record[2], record[3],
record[4]))
def guess_type(filename):
"""
Guess the mimetype of a file based on its filename.
@param filename: File name.
@return: Mimetype string or description, when appropriate mime not
available.
"""
if not os.path.isfile(filename):
logging.debug('%s is not a file', filename)
return None
try:
data = open(filename, 'r').read(8192)
except Exception, e:
logging.error(str(e))
return None
for test in magic_tests:
type = test.compare(data)
if type:
return type
# No matching magic number in the database. is it binary or text?
for c in data:
if ord(c) > 128:
# Non ASCII (binary) data
return 'Data'
# ASCII, do some text tests
if string.find('The', data, 0, 8192) > -1:
return 'English text'
if string.find('def', data, 0, 8192) > -1:
return 'Python Source'
return 'ASCII text'
if __name__ == '__main__':
parser = optparse.OptionParser("usage: %prog [options] [filenames]")
options, args = parser.parse_args()
logging_manager.configure_logging(MagicLoggingConfig(), verbose=True)
if not args:
parser.print_help()
sys.exit(1)
for arg in args:
msg = None
if os.path.isfile(arg):
msg = guess_type(arg)
if msg:
logging.info('%s: %s', arg, msg)
else:
logging.info('%s: unknown', arg)
|
gpl-2.0
|
klickagent/phantomjs
|
src/qt/qtwebkit/Tools/gdb/webkit.py
|
115
|
11357
|
# Copyright (C) 2010, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""GDB support for WebKit types.
Add this to your gdb by amending your ~/.gdbinit as follows:
python
import sys
sys.path.insert(0, "/path/to/tools/gdb/")
import webkit
"""
import gdb
import re
import struct
def guess_string_length(ptr):
"""Guess length of string pointed by ptr.
Returns a tuple of (length, an error message).
"""
# Try to guess at the length.
for i in xrange(0, 2048):
try:
if int((ptr + i).dereference()) == 0:
return i, ''
except RuntimeError:
# We indexed into inaccessible memory; give up.
return i, ' (gdb hit inaccessible memory)'
return 256, ' (gdb found no trailing NUL)'
def ustring_to_string(ptr, length=None):
"""Convert a pointer to UTF-16 data into a Python string encoded with utf-8.
ptr and length are both gdb.Value objects.
If length is unspecified, will guess at the length."""
error_message = ''
if length is None:
length, error_message = guess_string_length(ptr)
else:
length = int(length)
char_vals = [int((ptr + i).dereference()) for i in xrange(length)]
string = struct.pack('H' * length, *char_vals).decode('utf-16', 'replace').encode('utf-8')
return string + error_message
def lstring_to_string(ptr, length=None):
"""Convert a pointer to LChar* data into a Python (non-Unicode) string.
ptr and length are both gdb.Value objects.
If length is unspecified, will guess at the length."""
error_message = ''
if length is None:
length, error_message = guess_string_length(ptr)
else:
length = int(length)
string = ''.join([chr((ptr + i).dereference()) for i in xrange(length)])
return string + error_message
class StringPrinter(object):
"Shared code between different string-printing classes"
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
class UCharStringPrinter(StringPrinter):
"Print a UChar*; we must guess at the length"
def to_string(self):
return ustring_to_string(self.val)
class LCharStringPrinter(StringPrinter):
"Print a LChar*; we must guess at the length"
def to_string(self):
return lstring_to_string(self.val)
class WTFAtomicStringPrinter(StringPrinter):
"Print a WTF::AtomicString"
def to_string(self):
return self.val['m_string']
class WTFCStringPrinter(StringPrinter):
"Print a WTF::CString"
def to_string(self):
# The CString holds a buffer, which is a refptr to a WTF::CStringBuffer.
data = self.val['m_buffer']['m_ptr']['m_data'].cast(gdb.lookup_type('char').pointer())
length = self.val['m_buffer']['m_ptr']['m_length']
return ''.join([chr((data + i).dereference()) for i in xrange(length)])
class WTFStringImplPrinter(StringPrinter):
"Print a WTF::StringImpl"
def get_length(self):
return self.val['m_length']
def to_string(self):
if self.is_8bit():
return lstring_to_string(self.val['m_data8'], self.get_length())
return ustring_to_string(self.val['m_data16'], self.get_length())
def is_8bit(self):
return self.val['m_hashAndFlags'] & self.val['s_hashFlag8BitBuffer']
class WTFStringPrinter(StringPrinter):
"Print a WTF::String"
def stringimpl_ptr(self):
return self.val['m_impl']['m_ptr']
def get_length(self):
if not self.stringimpl_ptr():
return 0
return WTFStringImplPrinter(self.stringimpl_ptr().dereference()).get_length()
def to_string(self):
if not self.stringimpl_ptr():
return '(null)'
return self.stringimpl_ptr().dereference()
class JSCIdentifierPrinter(StringPrinter):
"Print a JSC::Identifier"
def to_string(self):
return WTFStringPrinter(self.val['m_string']).to_string()
class JSCJSStringPrinter(StringPrinter):
"Print a JSC::JSString"
def to_string(self):
if self.val['m_length'] == 0:
return ''
return WTFStringImplPrinter(self.val['m_value']).to_string()
class WebCoreKURLGooglePrivatePrinter(StringPrinter):
"Print a WebCore::KURLGooglePrivate"
def to_string(self):
return WTFCStringPrinter(self.val['m_utf8']).to_string()
class WebCoreQualifiedNamePrinter(StringPrinter):
"Print a WebCore::QualifiedName"
def __init__(self, val):
super(WebCoreQualifiedNamePrinter, self).__init__(val)
self.prefix_length = 0
self.length = 0
if self.val['m_impl']:
self.prefix_printer = WTFStringPrinter(
self.val['m_impl']['m_prefix']['m_string'])
self.local_name_printer = WTFStringPrinter(
self.val['m_impl']['m_localName']['m_string'])
self.prefix_length = self.prefix_printer.get_length()
if self.prefix_length > 0:
self.length = (self.prefix_length + 1 +
self.local_name_printer.get_length())
else:
self.length = self.local_name_printer.get_length()
def get_length(self):
return self.length
def to_string(self):
if self.get_length() == 0:
return "(null)"
else:
if self.prefix_length > 0:
return (self.prefix_printer.to_string() + ":" +
self.local_name_printer.to_string())
else:
return self.local_name_printer.to_string()
class WTFVectorPrinter:
"""Pretty Printer for a WTF::Vector.
The output of this pretty printer is similar to the output of std::vector's
pretty printer, which is bundled in gcc.
Example gdb session should look like:
(gdb) p v
$3 = WTF::Vector of length 7, capacity 16 = {7, 17, 27, 37, 47, 57, 67}
(gdb) set print elements 3
(gdb) p v
$6 = WTF::Vector of length 7, capacity 16 = {7, 17, 27...}
(gdb) set print array
(gdb) p v
$7 = WTF::Vector of length 7, capacity 16 = {
7,
17,
27
...
}
(gdb) set print elements 200
(gdb) p v
$8 = WTF::Vector of length 7, capacity 16 = {
7,
17,
27,
37,
47,
57,
67
}
"""
class Iterator:
def __init__(self, start, finish):
self.item = start
self.finish = finish
self.count = 0
def __iter__(self):
return self
def next(self):
if self.item == self.finish:
raise StopIteration
count = self.count
self.count += 1
element = self.item.dereference()
self.item += 1
return ('[%d]' % count, element)
def __init__(self, val):
self.val = val
def children(self):
start = self.val['m_buffer']
return self.Iterator(start, start + self.val['m_size'])
def to_string(self):
return ('%s of length %d, capacity %d'
% ('WTF::Vector', self.val['m_size'], self.val['m_capacity']))
def display_hint(self):
return 'array'
def add_pretty_printers():
pretty_printers = (
(re.compile("^WTF::Vector<.*>$"), WTFVectorPrinter),
(re.compile("^WTF::AtomicString$"), WTFAtomicStringPrinter),
(re.compile("^WTF::CString$"), WTFCStringPrinter),
(re.compile("^WTF::String$"), WTFStringPrinter),
(re.compile("^WTF::StringImpl$"), WTFStringImplPrinter),
(re.compile("^WebCore::KURLGooglePrivate$"), WebCoreKURLGooglePrivatePrinter),
(re.compile("^WebCore::QualifiedName$"), WebCoreQualifiedNamePrinter),
(re.compile("^JSC::Identifier$"), JSCIdentifierPrinter),
(re.compile("^JSC::JSString$"), JSCJSStringPrinter),
)
def lookup_function(val):
"""Function used to load pretty printers; will be passed to GDB."""
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
tag = type.tag
if tag:
for function, pretty_printer in pretty_printers:
if function.search(tag):
return pretty_printer(val)
if type.code == gdb.TYPE_CODE_PTR:
name = str(type.target().unqualified())
if name == 'UChar':
return UCharStringPrinter(val)
if name == 'LChar':
return LCharStringPrinter(val)
return None
gdb.pretty_printers.append(lookup_function)
add_pretty_printers()
class PrintPathToRootCommand(gdb.Command):
"""Command for printing WebKit Node trees.
Usage: printpathtoroot variable_name"""
def __init__(self):
super(PrintPathToRootCommand, self).__init__("printpathtoroot",
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
element_type = gdb.lookup_type('WebCore::Element')
node_type = gdb.lookup_type('WebCore::Node')
frame = gdb.selected_frame()
try:
val = gdb.Frame.read_var(frame, arg)
except:
print "No such variable, or invalid type"
return
target_type = str(val.type.target().strip_typedefs())
if target_type == str(node_type):
stack = []
while val:
stack.append([val,
val.cast(element_type.pointer()).dereference()['m_tagName']])
val = val.dereference()['m_parent']
padding = ''
while len(stack) > 0:
pair = stack.pop()
print padding, pair[1], pair[0]
padding = padding + ' '
else:
print 'Sorry: I don\'t know how to deal with %s yet.' % target_type
PrintPathToRootCommand()
|
bsd-3-clause
|
Mitchkoens/sympy
|
sympy/physics/quantum/fermion.py
|
59
|
4518
|
"""Fermionic quantum operators."""
from sympy.core.compatibility import u
from sympy import Integer
from sympy.physics.quantum import Operator
from sympy.physics.quantum import HilbertSpace, Ket, Bra
from sympy.functions.special.tensor_functions import KroneckerDelta
__all__ = [
'FermionOp',
'FermionFockKet',
'FermionFockBra'
]
class FermionOp(Operator):
"""A fermionic operator that satisfies {c, Dagger(c)} == 1.
Parameters
==========
name : str
A string that labels the fermionic mode.
annihilation : bool
A bool that indicates if the fermionic operator is an annihilation
(True, default value) or creation operator (False)
Examples
========
>>> from sympy.physics.quantum import Dagger, AntiCommutator
>>> from sympy.physics.quantum.fermion import FermionOp
>>> c = FermionOp("c")
>>> AntiCommutator(c, Dagger(c)).doit()
1
"""
@property
def name(self):
return self.args[0]
@property
def is_annihilation(self):
return bool(self.args[1])
@classmethod
def default_args(self):
return ("c", True)
def __new__(cls, *args, **hints):
if not len(args) in [1, 2]:
raise ValueError('1 or 2 parameters expected, got %s' % args)
if len(args) == 1:
args = (args[0], Integer(1))
if len(args) == 2:
args = (args[0], Integer(args[1]))
return Operator.__new__(cls, *args)
def _eval_commutator_FermionOp(self, other, **hints):
if 'independent' in hints and hints['independent']:
# [c, d] = 0
return Integer(0)
return None
def _eval_anticommutator_FermionOp(self, other, **hints):
if self.name == other.name:
# {a^\dagger, a} = 1
if not self.is_annihilation and other.is_annihilation:
return Integer(1)
elif 'independent' in hints and hints['independent']:
# {c, d} = 2 * c * d, because [c, d] = 0 for independent operators
return 2 * self * other
return None
def _eval_anticommutator_BosonOp(self, other, **hints):
# because fermions and bosons commute
return 2 * self * other
def _eval_commutator_BosonOp(self, other, **hints):
return Integer(0)
def _eval_adjoint(self):
return FermionOp(str(self.name), not self.is_annihilation)
def _print_contents_latex(self, printer, *args):
if self.is_annihilation:
return r'{%s}' % str(self.name)
else:
return r'{{%s}^\dag}' % str(self.name)
def _print_contents(self, printer, *args):
if self.is_annihilation:
return r'%s' % str(self.name)
else:
return r'Dagger(%s)' % str(self.name)
def _print_contents_pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if self.is_annihilation:
return pform
else:
return pform**prettyForm(u('\N{DAGGER}'))
class FermionFockKet(Ket):
"""Fock state ket for a fermionic mode.
Parameters
==========
n : Number
The Fock state number.
"""
def __new__(cls, n):
if n not in [0, 1]:
raise ValueError("n must be 0 or 1")
return Ket.__new__(cls, n)
@property
def n(self):
return self.label[0]
@classmethod
def dual_class(self):
return FermionFockBra
@classmethod
def _eval_hilbert_space(cls, label):
return HilbertSpace()
def _eval_innerproduct_FermionFockBra(self, bra, **hints):
return KroneckerDelta(self.n, bra.n)
def _apply_operator_FermionOp(self, op, **options):
if op.is_annihilation:
if self.n == 1:
return FermionFockKet(0)
else:
return Integer(0)
else:
if self.n == 0:
return FermionFockKet(1)
else:
return Integer(0)
class FermionFockBra(Bra):
"""Fock state bra for a fermionic mode.
Parameters
==========
n : Number
The Fock state number.
"""
def __new__(cls, n):
if n not in [0, 1]:
raise ValueError("n must be 0 or 1")
return Bra.__new__(cls, n)
@property
def n(self):
return self.label[0]
@classmethod
def dual_class(self):
return FermionFockKet
|
bsd-3-clause
|
olemis/sqlalchemy
|
lib/sqlalchemy/dialects/mssql/base.py
|
9
|
66501
|
# mssql/base.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql
:name: Microsoft SQL Server
Auto Increment Behavior
-----------------------
SQL Server provides so-called "auto incrementing" behavior using the
``IDENTITY`` construct, which can be placed on an integer primary key.
SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior,
described at :paramref:`.Column.autoincrement`; this means
that by default, the first integer primary key column in a :class:`.Table`
will be considered to be the identity column and will generate DDL as such::
from sqlalchemy import Table, MetaData, Column, Integer
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
The above example will generate DDL as:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
For the case where this default generation of ``IDENTITY`` is not desired,
specify ``autoincrement=False`` on all integer primary key columns::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer))
m.create_all(engine)
.. note::
An INSERT statement which refers to an explicit value for such
a column is prohibited by SQL Server, however SQLAlchemy will detect this
and modify the ``IDENTITY_INSERT`` flag accordingly at statement execution
time. As this is not a high performing process, care should be taken to
set the ``autoincrement`` flag appropriately for columns that will not
actually require IDENTITY behavior.
Controlling "Start" and "Increment"
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specific control over the parameters of the ``IDENTITY`` value is supported
using the :class:`.schema.Sequence` object. While this object normally
represents an explicit "sequence" for supporting backends, on SQL Server it is
re-purposed to specify behavior regarding the identity column, including
support of the "start" and "increment" values::
from sqlalchemy import Table, Integer, Sequence, Column
Table('test', metadata,
Column('id', Integer,
Sequence('blah', start=100, increment=10),
primary_key=True),
Column('name', String(20))
).create(some_engine)
would yield:
.. sourcecode:: sql
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
Note that the ``start`` and ``increment`` values for sequences are
optional and will default to 1,1.
INSERT behavior
^^^^^^^^^^^^^^^^
Handling of the ``IDENTITY`` column at INSERT time involves two key
techniques. The most common is being able to fetch the "last inserted value"
for a given ``IDENTITY`` column, a process which SQLAlchemy performs
implicitly in many cases, most importantly within the ORM.
The process for fetching this value has several variants:
* In the vast majority of cases, RETURNING is used in conjunction with INSERT
statements on SQL Server in order to get newly generated primary key values:
.. sourcecode:: sql
INSERT INTO t (x) OUTPUT inserted.id VALUES (?)
* When RETURNING is not available or has been disabled via
``implicit_returning=False``, either the ``scope_identity()`` function or
the ``@@identity`` variable is used; behavior varies by backend:
* when using PyODBC, the phrase ``; select scope_identity()`` will be
appended to the end of the INSERT statement; a second result set will be
fetched in order to receive the value. Given a table as::
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer),
implicit_returning=False)
an INSERT will look like:
.. sourcecode:: sql
INSERT INTO t (x) VALUES (?); select scope_identity()
* Other dialects such as pymssql will call upon
``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
statement. If the flag ``use_scope_identity=False`` is passed to
:func:`.create_engine`, the statement ``SELECT @@identity AS lastrowid``
is used instead.
A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
that refers to the identity column explicitly. The SQLAlchemy dialect will
detect when an INSERT construct, created using a core :func:`.insert`
construct (not a plain string SQL), refers to the identity column, and
in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
execution. Given this example::
m = MetaData()
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
engine.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
The above column will be created with IDENTITY, however the INSERT statement
we emit is specifying explicit values. In the echo output we can see
how SQLAlchemy handles this:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
COMMIT
SET IDENTITY_INSERT t ON
INSERT INTO t (id, x) VALUES (?, ?)
((1, 1), (2, 2))
SET IDENTITY_INSERT t OFF
COMMIT
This
is an auxilliary use case suitable for testing and bulk insert scenarios.
.. _legacy_schema_rendering:
Rendering of SQL statements that include schema qualifiers
---------------------------------------------------------
When using :class:`.Table` metadata that includes a "schema" qualifier,
such as::
account_table = Table(
'account', metadata,
Column('id', Integer, primary_key=True),
Column('info', String(100)),
schema="customer_schema"
)
The SQL Server dialect has a long-standing behavior that it will attempt
to turn a schema-qualified table name into an alias, such as::
>>> eng = create_engine("mssql+pymssql://mydsn")
>>> print(account_table.select().compile(eng))
SELECT account_1.id, account_1.info
FROM customer_schema.account AS account_1
This behavior is legacy, does not function correctly for many forms
of SQL statements, and will be disabled by default in the 1.1 series
of SQLAlchemy. As of 1.0.5, the above statement will produce the following
warning::
SAWarning: legacy_schema_aliasing flag is defaulted to True;
some schema-qualified queries may not function correctly.
Consider setting this flag to False for modern SQL Server versions;
this flag will default to False in version 1.1
This warning encourages the :class:`.Engine` to be created as follows::
>>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=False)
Where the above SELECT statement will produce::
>>> print(account_table.select().compile(eng))
SELECT customer_schema.account.id, customer_schema.account.info
FROM customer_schema.account
The warning will not emit if the ``legacy_schema_aliasing`` flag is set
to either True or False.
.. versionadded:: 1.0.5 - Added the ``legacy_schema_aliasing`` flag to disable
the SQL Server dialect's legacy behavior with schema-qualified table
names. This flag will default to False in version 1.1.
Collation Support
-----------------
Character collations are supported by the base string types,
specified by the string argument "collation"::
from sqlalchemy import VARCHAR
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`.Table`, the
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
.. versionadded:: 0.8 Character collations are now part of the base string
types.
LIMIT/OFFSET Support
--------------------
MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is
supported directly through the ``TOP`` Transact SQL keyword::
select.limit
will yield::
SELECT TOP n
If using SQL Server 2005 or above, LIMIT with OFFSET
support is available through the ``ROW_NUMBER OVER`` construct.
For versions below 2005, LIMIT with OFFSET usage will fail.
Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render::
name VARCHAR(20)
If ``nullable`` is ``True`` or ``False`` then the column will be
``NULL` or ``NOT NULL`` respectively.
Date / Time Handling
--------------------
DATE and TIME are supported. Bind parameters are converted
to datetime.datetime() objects as required by most MSSQL drivers,
and results are processed from strings if needed.
The DATE and TIME types are not available for MSSQL 2005 and
previous - if a server version below 2008 is detected, DDL
for these types will be issued as DATETIME.
.. _mssql_large_type_deprecation:
Large Text/Binary Type Deprecation
----------------------------------
Per `SQL Server 2012/2014 Documentation <http://technet.microsoft.com/en-us/library/ms187993.aspx>`_,
the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server
in a future release. SQLAlchemy normally relates these types to the
:class:`.UnicodeText`, :class:`.Text` and :class:`.LargeBinary` datatypes.
In order to accommodate this change, a new flag ``deprecate_large_types``
is added to the dialect, which will be automatically set based on detection
of the server version in use, if not otherwise set by the user. The
behavior of this flag is as follows:
* When this flag is ``True``, the :class:`.UnicodeText`, :class:`.Text` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``,
respectively. This is a new behavior as of the addition of this flag.
* When this flag is ``False``, the :class:`.UnicodeText`, :class:`.Text` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NTEXT``, ``TEXT``, and ``IMAGE``,
respectively. This is the long-standing behavior of these types.
* The flag begins with the value ``None``, before a database connection is
established. If the dialect is used to render DDL without the flag being
set, it is interpreted the same as ``False``.
* On first connection, the dialect detects if SQL Server version 2012 or greater
is in use; if the flag is still at ``None``, it sets it to ``True`` or
``False`` based on whether 2012 or greater is detected.
* The flag can be set to either ``True`` or ``False`` when the dialect
is created, typically via :func:`.create_engine`::
eng = create_engine("mssql+pymssql://user:pass@host/db",
deprecate_large_types=True)
* Complete control over whether the "old" or "new" types are rendered is
available in all SQLAlchemy versions by using the UPPERCASE type objects
instead: :class:`.NVARCHAR`, :class:`.VARCHAR`, :class:`.types.VARBINARY`,
:class:`.TEXT`, :class:`.mssql.NTEXT`, :class:`.mssql.IMAGE` will always remain
fixed and always output exactly that type.
.. versionadded:: 1.0.0
.. _mssql_indexes:
Clustered Index Support
-----------------------
The MSSQL dialect supports clustered indexes (and primary keys) via the
``mssql_clustered`` option. This option is available to :class:`.Index`,
:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
To generate a clustered index::
Index("my_index", table.c.x, mssql_clustered=True)
which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
.. versionadded:: 0.8
To generate a clustered primary key use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y))
Similarly, we can generate a clustered unique constraint using::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
.. versionadded:: 0.9.2
MSSQL-Specific Index Options
-----------------------------
In addition to clustering, the MSSQL dialect supports other special options
for :class:`.Index`.
INCLUDE
^^^^^^^
The ``mssql_include`` option renders INCLUDE(colname) for the given string
names::
Index("my_index", table.c.x, mssql_include=['y'])
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
.. versionadded:: 0.8
Index ordering
^^^^^^^^^^^^^^
Index ordering is available via functional expressions, such as::
Index("my_index", table.c.x.desc())
would render the index as ``CREATE INDEX my_index ON table (x DESC)``
.. versionadded:: 0.8
.. seealso::
:ref:`schema_indexes_functional`
Compatibility Levels
--------------------
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatible with SQL2000 while running on a SQL2005 database
server. ``server_version_info`` will always return the database
server version information (in this case SQL2005) and not the
compatibility level information. Because of this, if running under
a backwards compatibility mode SQAlchemy may attempt to use T-SQL
statements that are unable to be parsed by the database server.
Triggers
--------
SQLAlchemy by default uses OUTPUT INSERTED to get at newly
generated primary key values via IDENTITY columns or other
server side defaults. MS-SQL does not
allow the usage of OUTPUT INSERTED on tables that have triggers.
To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`.Table`
which has triggers::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
# ...,
implicit_returning=False
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {'implicit_returning':False}
This option can also be specified engine-wide using the
``implicit_returning=False`` argument on :func:`.create_engine`.
Enabling Snapshot Isolation
---------------------------
Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
Background on SQL Server snapshot isolation is available at
http://msdn.microsoft.com/en-us/library/ms175095.aspx.
Known Issues
------------
* No support for more than one ``IDENTITY`` column per table
* reflection of indexes does not work with versions older than
SQL Server 2005
"""
import datetime
import operator
import re
from ... import sql, schema as sa_schema, exc, util
from ...sql import compiler, expression, util as sql_util
from ... import engine
from ...engine import reflection, default
from ... import types as sqltypes
from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
from ...util import update_wrapper
from . import information_schema as ischema
# http://sqlserverbuilds.blogspot.com/
MS_2012_VERSION = (11,)
MS_2008_VERSION = (10,)
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
RESERVED_WORDS = set(
['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
'containstable', 'continue', 'convert', 'create', 'cross', 'current',
'current_date', 'current_time', 'current_timestamp', 'current_user',
'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
'reconfigure', 'references', 'replication', 'restore', 'restrict',
'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
'writetext',
])
class REAL(sqltypes.REAL):
__visit_name__ = 'REAL'
def __init__(self, **kw):
# REAL is a synonym for FLOAT(24) on SQL server
kw['precision'] = 24
super(REAL, self).__init__(**kw)
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
# MSSQL DATE/TIME types have varied behavior, sometimes returning
# strings. MSDate/TIME check for everything, and always
# filter bind parameters into datetime objects (required by pyodbc,
# not sure about other dialects).
class _MSDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, util.string_types):
return datetime.date(*[
int(x or 0)
for x in self._reg.match(value).groups()
])
else:
return value
return process
class TIME(sqltypes.TIME):
def __init__(self, precision=None, **kwargs):
self.precision = precision
super(TIME, self).__init__()
__zero_date = datetime.date(1900, 1, 1)
def bind_processor(self, dialect):
def process(value):
if isinstance(value, datetime.datetime):
value = datetime.datetime.combine(
self.__zero_date, value.time())
elif isinstance(value, datetime.time):
value = datetime.datetime.combine(self.__zero_date, value)
return value
return process
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, util.string_types):
return datetime.time(*[
int(x or 0)
for x in self._reg.match(value).groups()])
else:
return value
return process
_MSTime = TIME
class _DateTimeBase(object):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
pass
class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'SMALLDATETIME'
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'DATETIME2'
def __init__(self, precision=None, **kw):
super(DATETIME2, self).__init__(**kw)
self.precision = precision
# TODO: is this not an Interval ?
class DATETIMEOFFSET(sqltypes.TypeEngine):
__visit_name__ = 'DATETIMEOFFSET'
def __init__(self, precision=None, **kwargs):
self.precision = precision
class _StringType(object):
"""Base for MSSQL string types."""
def __init__(self, collation=None):
super(_StringType, self).__init__(collation=collation)
class NTEXT(sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
__visit_name__ = 'NTEXT'
class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary):
"""The MSSQL VARBINARY type.
This type extends both :class:`.types.VARBINARY` and
:class:`.types.LargeBinary`. In "deprecate_large_types" mode,
the :class:`.types.LargeBinary` type will produce ``VARBINARY(max)``
on SQL Server.
.. versionadded:: 1.0.0
.. seealso::
:ref:`mssql_large_type_deprecation`
"""
__visit_name__ = 'VARBINARY'
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = 'MONEY'
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = 'SMALLMONEY'
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class SQL_VARIANT(sqltypes.TypeEngine):
__visit_name__ = 'SQL_VARIANT'
# old names.
MSDateTime = _MSDateTime
MSDate = _MSDate
MSReal = REAL
MSTinyInteger = TINYINT
MSTime = TIME
MSSmallDateTime = SMALLDATETIME
MSDateTime2 = DATETIME2
MSDateTimeOffset = DATETIMEOFFSET
MSText = TEXT
MSNText = NTEXT
MSString = VARCHAR
MSNVarchar = NVARCHAR
MSChar = CHAR
MSNChar = NCHAR
MSBinary = BINARY
MSVarBinary = VARBINARY
MSImage = IMAGE
MSBit = BIT
MSMoney = MONEY
MSSmallMoney = SMALLMONEY
MSUniqueIdentifier = UNIQUEIDENTIFIER
MSVariant = SQL_VARIANT
ischema_names = {
'int': INTEGER,
'bigint': BIGINT,
'smallint': SMALLINT,
'tinyint': TINYINT,
'varchar': VARCHAR,
'nvarchar': NVARCHAR,
'char': CHAR,
'nchar': NCHAR,
'text': TEXT,
'ntext': NTEXT,
'decimal': DECIMAL,
'numeric': NUMERIC,
'float': FLOAT,
'datetime': DATETIME,
'datetime2': DATETIME2,
'datetimeoffset': DATETIMEOFFSET,
'date': DATE,
'time': TIME,
'smalldatetime': SMALLDATETIME,
'binary': BINARY,
'varbinary': VARBINARY,
'bit': BIT,
'real': REAL,
'image': IMAGE,
'timestamp': TIMESTAMP,
'money': MONEY,
'smallmoney': SMALLMONEY,
'uniqueidentifier': UNIQUEIDENTIFIER,
'sql_variant': SQL_VARIANT,
}
class MSTypeCompiler(compiler.GenericTypeCompiler):
def _extend(self, spec, type_, length=None):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
"""
if getattr(type_, 'collation', None):
collation = 'COLLATE %s' % type_.collation
else:
collation = None
if not length:
length = type_.length
if length:
spec = spec + "(%s)" % length
return ' '.join([c for c in (spec, collation)
if c is not None])
def visit_FLOAT(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision is None:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': precision}
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_DATETIMEOFFSET(self, type_, **kw):
if type_.precision:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_TIME(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_DATETIME2(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
def visit_SMALLDATETIME(self, type_, **kw):
return "SMALLDATETIME"
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARCHAR(type_, **kw)
else:
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_NVARCHAR(type_, **kw)
else:
return self.visit_NTEXT(type_, **kw)
def visit_NTEXT(self, type_, **kw):
return self._extend("NTEXT", type_)
def visit_TEXT(self, type_, **kw):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_, **kw):
return self._extend("VARCHAR", type_, length=type_.length or 'max')
def visit_CHAR(self, type_, **kw):
return self._extend("CHAR", type_)
def visit_NCHAR(self, type_, **kw):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_, **kw):
return self._extend("NVARCHAR", type_, length=type_.length or 'max')
def visit_date(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_DATE(type_, **kw)
def visit_time(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_TIME(type_, **kw)
def visit_large_binary(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARBINARY(type_, **kw)
else:
return self.visit_IMAGE(type_, **kw)
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_VARBINARY(self, type_, **kw):
return self._extend(
"VARBINARY",
type_,
length=type_.length or 'max')
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return 'SMALLMONEY'
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
def visit_SQL_VARIANT(self, type_, **kw):
return 'SQL_VARIANT'
class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_result_proxy = None
_lastrowid = None
def _opt_encode(self, statement):
if not self.dialect.supports_unicode_statements:
return self.dialect._encoder(statement)[0]
else:
return statement
def pre_exec(self):
"""Activate IDENTITY_INSERT if needed."""
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0] or \
(
self.compiled.statement.parameters and (
(
self.compiled.statement._has_multi_parameters
and
seq_column.key in
self.compiled.statement.parameters[0]
) or (
not
self.compiled.statement._has_multi_parameters
and
seq_column.key in
self.compiled.statement.parameters
)
)
)
else:
self._enable_identity_insert = False
self._select_lastrowid = insert_has_sequence and \
not self.compiled.returning and \
not self._enable_identity_insert and \
not self.executemany
if self._enable_identity_insert:
self.root_connection._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl)),
(),
self)
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
conn = self.root_connection
if self._select_lastrowid:
if self.dialect.use_scope_identity:
conn._cursor_execute(
self.cursor,
"SELECT scope_identity() AS lastrowid", (), self)
else:
conn._cursor_execute(self.cursor,
"SELECT @@identity AS lastrowid",
(),
self)
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
conn._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer. format_table(
self.compiled.statement.table)),
(),
self)
def get_lastrowid(self):
return self._lastrowid
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
self.cursor.execute(
self._opt_encode(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer. format_table(
self.compiled.statement.table)))
except Exception:
pass
def get_result_proxy(self):
if self._result_proxy:
return self._result_proxy
else:
return engine.ResultProxy(self)
class MSSQLCompiler(compiler.SQLCompiler):
returning_precedes_values = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond',
'microseconds': 'microsecond'
})
def __init__(self, *args, **kwargs):
self.tablealiases = {}
super(MSSQLCompiler, self).__init__(*args, **kwargs)
def _with_legacy_schema_aliasing(fn):
def decorate(self, *arg, **kw):
if self.dialect.legacy_schema_aliasing:
return fn(self, *arg, **kw)
else:
super_ = getattr(super(MSSQLCompiler, self), fn.__name__)
return super_(*arg, **kw)
return decorate
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_current_date_func(self, fn, **kw):
return "GETDATE()"
def visit_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_char_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_concat_op_binary(self, binary, operator, **kw):
return "%s + %s" % \
(self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_true(self, expr, **kw):
return '1'
def visit_false(self, expr, **kw):
return '0'
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def get_select_precolumns(self, select, **kw):
""" MS-SQL puts TOP, it's version of LIMIT here """
s = ""
if select._distinct:
s += "DISTINCT "
if select._simple_int_limit and not select._offset:
# ODBC drivers and possibly others
# don't support bind params in the SELECT clause on SQL Server.
# so have to use literal here.
s += "TOP %d " % select._limit
if s:
return s
else:
return compiler.SQLCompiler.get_select_precolumns(
self, select, **kw)
def get_from_hint_text(self, table, text):
return text
def get_crud_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in mssql is after the select keyword
return ""
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``row_number()`` criterion.
"""
if (
(
not select._simple_int_limit and
select._limit_clause is not None
) or (
select._offset_clause is not None and
not select._simple_int_offset or select._offset
)
) and not getattr(select, '_mssql_visit', None):
# to use ROW_NUMBER(), an ORDER BY is required.
if not select._order_by_clause.clauses:
raise exc.CompileError('MSSQL requires an order_by when '
'using an OFFSET or a non-simple '
'LIMIT clause')
_order_by_clauses = select._order_by_clause.clauses
limit_clause = select._limit_clause
offset_clause = select._offset_clause
kwargs['select_wraps_for'] = select
select = select._generate()
select._mssql_visit = True
select = select.column(
sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
.label("mssql_rn")).order_by(None).alias()
mssql_rn = sql.column('mssql_rn')
limitselect = sql.select([c for c in select.c if
c.key != 'mssql_rn'])
if offset_clause is not None:
limitselect.append_whereclause(mssql_rn > offset_clause)
if limit_clause is not None:
limitselect.append_whereclause(
mssql_rn <= (limit_clause + offset_clause))
else:
limitselect.append_whereclause(
mssql_rn <= (limit_clause))
return self.process(limitselect, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
@_with_legacy_schema_aliasing
def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
if mssql_aliased is table or iscrud:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
# alias schema-qualified tables
alias = self._schema_aliased_table(table)
if alias is not None:
return self.process(alias, mssql_aliased=table, **kwargs)
else:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
@_with_legacy_schema_aliasing
def visit_alias(self, alias, **kw):
# translate for schema-qualified table aliases
kw['mssql_aliased'] = alias.original
return super(MSSQLCompiler, self).visit_alias(alias, **kw)
@_with_legacy_schema_aliasing
def visit_column(self, column, add_to_result_map=None, **kw):
if column.table is not None and \
(not self.isupdate and not self.isdelete) or \
self.is_subquery():
# translate for schema-qualified table aliases
t = self._schema_aliased_table(column.table)
if t is not None:
converted = expression._corresponding_column_or_error(
t, column)
if add_to_result_map is not None:
add_to_result_map(
column.name,
column.name,
(column, column.name, column.key),
column.type
)
return super(MSSQLCompiler, self).\
visit_column(converted, **kw)
return super(MSSQLCompiler, self).visit_column(
column, add_to_result_map=add_to_result_map, **kw)
def _schema_aliased_table(self, table):
if getattr(table, 'schema', None) is not None:
if self.dialect._warn_schema_aliasing and \
table.schema.lower() != 'information_schema':
util.warn(
"legacy_schema_aliasing flag is defaulted to True; "
"some schema-qualified queries may not function "
"correctly. Consider setting this flag to False for "
"modern SQL Server versions; this flag will default to "
"False in version 1.1")
if table not in self.tablealiases:
self.tablealiases[table] = table.alias()
return self.tablealiases[table]
else:
return None
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % \
(field, self.process(extract.expr, **kw))
def visit_savepoint(self, savepoint_stmt):
return "SAVE TRANSACTION %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return ("ROLLBACK TRANSACTION %s"
% self.preparer.format_savepoint(savepoint_stmt))
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
possible.
"""
if (
isinstance(binary.left, expression.BindParameter)
and binary.operator == operator.eq
and not isinstance(binary.right, expression.BindParameter)
):
return self.process(
expression.BinaryExpression(binary.right,
binary.left,
binary.operator),
**kwargs)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
def returning_clause(self, stmt, returning_cols):
if self.isinsert or self.isupdate:
target = stmt.table.alias("inserted")
else:
target = stmt.table.alias("deleted")
adapter = sql_util.ClauseAdapter(target)
columns = [
self._label_select_column(None, adapter.traverse(c),
True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'OUTPUT ' + ', '.join(columns)
def get_cte_preamble(self, recursive):
# SQL Server finds it too inconvenient to accept
# an entirely optional, SQL standard specified,
# "RECURSIVE" word with their "WITH",
# so here we go
return "WITH"
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label(None)
else:
return super(MSSQLCompiler, self).\
label_select_column(select, column, asfrom)
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
order_by = self.process(select._order_by_clause, **kw)
# MSSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Render the UPDATE..FROM clause specific to MSSQL.
In MSSQL, if the UPDATE statement involves an alias of the table to
be updated, then the table itself must be added to the FROM list as
well. Otherwise, it is optional. Here, we add it regardless.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in [from_table] + extra_froms)
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
A dialect may use this compiler on a platform where native
binds are used.
"""
ansi_bind_rules = True
def visit_in_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_notin_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s NOT IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def render_literal_value(self, value, type_):
"""
For date and datetime values, convert to a string
format acceptable to MSSQL. That seems to be the
so-called ODBC canonical date format which looks
like this:
yyyy-mm-dd hh:mi:ss.mmm(24h)
For other data types, call the base class implementation.
"""
# datetime and date are both subclasses of datetime.date
if issubclass(type(value), datetime.date):
# SQL Server wants single quotes around the date string.
return "'" + str(value) + "'"
else:
return super(MSSQLStrictCompiler, self).\
render_literal_value(value, type_)
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (
self.preparer.format_column(column) + " "
+ self.dialect.type_compiler.process(
column.type, type_expression=column)
)
if column.nullable is not None:
if not column.nullable or column.primary_key or \
isinstance(column.default, sa_schema.Sequence):
colspec += " NOT NULL"
else:
colspec += " NULL"
if column.table is None:
raise exc.CompileError(
"mssql requires Table-bound columns "
"in order to generate DDL")
# install an IDENTITY Sequence if we either a sequence or an implicit
# IDENTITY column
if isinstance(column.default, sa_schema.Sequence):
if column.default.start == 0:
start = 0
else:
start = column.default.start or 1
colspec += " IDENTITY(%s,%s)" % (start,
column.default.increment or 1)
elif column is column.table._autoincrement_column:
colspec += " IDENTITY(1,1)"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_create_index(self, create, include_schema=False):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
# handle clustering option
if index.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table),
', '.join(
self.sql_compiler.process(expr,
include_table=False,
literal_binds=True) for
expr in index.expressions)
)
# handle other included columns
if index.dialect_options['mssql']['include']:
inclusions = [index.table.c[col]
if isinstance(col, util.string_types) else col
for col in
index.dialect_options['mssql']['include']
]
text += " INCLUDE (%s)" \
% ', '.join([preparer.quote(c.name)
for c in inclusions])
return text
def visit_drop_index(self, drop):
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(drop.element, include_schema=False),
self.preparer.format_table(drop.element.table)
)
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "PRIMARY KEY "
if constraint.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "UNIQUE "
if constraint.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
final_quote=']')
def _escape_identifier(self, value):
return value
def quote_schema(self, schema, force=None):
"""Prepare a quoted table and schema name."""
result = '.'.join([self.quote(x, force) for x in schema.split('.')])
return result
def _db_plus_owner_listing(fn):
def wrap(dialect, connection, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
def _db_plus_owner(fn):
def wrap(dialect, connection, tablename, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
tablename, dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
def _switch_db(dbname, connection, fn, *arg, **kw):
if dbname:
current_db = connection.scalar("select db_name()")
connection.execute("use %s" % dbname)
try:
return fn(*arg, **kw)
finally:
if dbname:
connection.execute("use %s" % current_db)
def _owner_plus_db(dialect, schema):
if not schema:
return None, dialect.default_schema_name
elif "." in schema:
return schema.split(".", 1)
else:
return None, schema
class MSDialect(default.DefaultDialect):
name = 'mssql'
supports_default_values = True
supports_empty_insert = False
execution_ctx_cls = MSExecutionContext
use_scope_identity = True
max_identifier_length = 128
schema_name = "dbo"
colspecs = {
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate,
sqltypes.Time: TIME,
}
engine_config_types = default.DefaultDialect.engine_config_types.union([
('legacy_schema_aliasing', util.asbool),
])
ischema_names = ischema_names
supports_native_boolean = False
supports_unicode_binds = True
postfetch_lastrowid = True
server_version_info = ()
statement_compiler = MSSQLCompiler
ddl_compiler = MSDDLCompiler
type_compiler = MSTypeCompiler
preparer = MSIdentifierPreparer
construct_arguments = [
(sa_schema.PrimaryKeyConstraint, {
"clustered": False
}),
(sa_schema.UniqueConstraint, {
"clustered": False
}),
(sa_schema.Index, {
"clustered": False,
"include": None
})
]
def __init__(self,
query_timeout=None,
use_scope_identity=True,
max_identifier_length=None,
schema_name="dbo",
deprecate_large_types=None,
legacy_schema_aliasing=None, **opts):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
self.use_scope_identity = use_scope_identity
self.max_identifier_length = int(max_identifier_length or 0) or \
self.max_identifier_length
self.deprecate_large_types = deprecate_large_types
if legacy_schema_aliasing is None:
self.legacy_schema_aliasing = True
self._warn_schema_aliasing = True
else:
self.legacy_schema_aliasing = legacy_schema_aliasing
self._warn_schema_aliasing = False
super(MSDialect, self).__init__(**opts)
def do_savepoint(self, connection, name):
# give the DBAPI a push
connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
super(MSDialect, self).do_savepoint(connection, name)
def do_release_savepoint(self, connection, name):
# SQL Server does not support RELEASE SAVEPOINT
pass
def initialize(self, connection):
super(MSDialect, self).initialize(connection)
self._setup_version_attributes()
def _setup_version_attributes(self):
if self.server_version_info[0] not in list(range(8, 17)):
# FreeTDS with version 4.2 seems to report here
# a number like "95.10.255". Don't know what
# that is. So emit warning.
# Use TDS Version 7.0 through 7.3, per the MS information here:
# https://msdn.microsoft.com/en-us/library/dd339982.aspx
# and FreeTDS information here (7.3 highest supported version):
# http://www.freetds.org/userguide/choosingtdsprotocol.htm
util.warn(
"Unrecognized server version info '%s'. Version specific "
"behaviors may not function properly. If using ODBC "
"with FreeTDS, ensure TDS_VERSION 7.0 through 7.3, not "
"4.2, is configured in the FreeTDS configuration." %
".".join(str(x) for x in self.server_version_info))
if self.server_version_info >= MS_2005_VERSION and \
'implicit_returning' not in self.__dict__:
self.implicit_returning = True
if self.server_version_info >= MS_2008_VERSION:
self.supports_multivalues_insert = True
if self.deprecate_large_types is None:
self.deprecate_large_types = \
self.server_version_info >= MS_2012_VERSION
def _get_default_schema_name(self, connection):
if self.server_version_info < MS_2005_VERSION:
return self.schema_name
query = sql.text("""
SELECT default_schema_name FROM
sys.database_principals
WHERE principal_id=database_principal_id()
""")
default_schema_name = connection.scalar(query)
if default_schema_name is not None:
return util.text_type(default_schema_name)
else:
return self.schema_name
@_db_plus_owner
def has_table(self, connection, tablename, dbname, owner, schema):
columns = ischema.columns
whereclause = columns.c.table_name == tablename
if owner:
whereclause = sql.and_(whereclause,
columns.c.table_schema == owner)
s = sql.select([columns], whereclause)
c = connection.execute(s)
return c.first() is not None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = sql.select([ischema.schemata.c.schema_name],
order_by=[ischema.schemata.c.schema_name]
)
schema_names = [r[0] for r in connection.execute(s)]
return schema_names
@reflection.cache
@_db_plus_owner_listing
def get_table_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == 'BASE TABLE'
),
order_by=[tables.c.table_name]
)
table_names = [r[0] for r in connection.execute(s)]
return table_names
@reflection.cache
@_db_plus_owner_listing
def get_view_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == 'VIEW'
),
order_by=[tables.c.table_name]
)
view_names = [r[0] for r in connection.execute(s)]
return view_names
@reflection.cache
@_db_plus_owner
def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
# using system catalogs, don't support index reflection
# below MS 2005
if self.server_version_info < MS_2005_VERSION:
return []
rp = connection.execute(
sql.text("select ind.index_id, ind.is_unique, ind.name "
"from sys.indexes as ind join sys.tables as tab on "
"ind.object_id=tab.object_id "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name = :tabname "
"and sch.name=:schname "
"and ind.is_primary_key=0",
bindparams=[
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
],
typemap={
'name': sqltypes.Unicode()
}
)
)
indexes = {}
for row in rp:
indexes[row['index_id']] = {
'name': row['name'],
'unique': row['is_unique'] == 1,
'column_names': []
}
rp = connection.execute(
sql.text(
"select ind_col.index_id, ind_col.object_id, col.name "
"from sys.columns as col "
"join sys.tables as tab on tab.object_id=col.object_id "
"join sys.index_columns as ind_col on "
"(ind_col.column_id=col.column_id and "
"ind_col.object_id=tab.object_id) "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name=:tabname "
"and sch.name=:schname",
bindparams=[
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
],
typemap={'name': sqltypes.Unicode()}
),
)
for row in rp:
if row['index_id'] in indexes:
indexes[row['index_id']]['column_names'].append(row['name'])
return list(indexes.values())
@reflection.cache
@_db_plus_owner
def get_view_definition(self, connection, viewname,
dbname, owner, schema, **kw):
rp = connection.execute(
sql.text(
"select definition from sys.sql_modules as mod, "
"sys.views as views, "
"sys.schemas as sch"
" where "
"mod.object_id=views.object_id and "
"views.schema_id=sch.schema_id and "
"views.name=:viewname and sch.name=:schname",
bindparams=[
sql.bindparam('viewname', viewname,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
]
)
)
if rp:
view_def = rp.scalar()
return view_def
@reflection.cache
@_db_plus_owner
def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
# Get base columns
columns = ischema.columns
if owner:
whereclause = sql.and_(columns.c.table_name == tablename,
columns.c.table_schema == owner)
else:
whereclause = columns.c.table_name == tablename
s = sql.select([columns], whereclause,
order_by=[columns.c.ordinal_position])
c = connection.execute(s)
cols = []
while True:
row = c.fetchone()
if row is None:
break
(name, type, nullable, charlen,
numericprec, numericscale, default, collation) = (
row[columns.c.column_name],
row[columns.c.data_type],
row[columns.c.is_nullable] == 'YES',
row[columns.c.character_maximum_length],
row[columns.c.numeric_precision],
row[columns.c.numeric_scale],
row[columns.c.column_default],
row[columns.c.collation_name]
)
coltype = self.ischema_names.get(type, None)
kwargs = {}
if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
MSNText, MSBinary, MSVarBinary,
sqltypes.LargeBinary):
if charlen == -1:
charlen = None
kwargs['length'] = charlen
if collation:
kwargs['collation'] = collation
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'" %
(type, name))
coltype = sqltypes.NULLTYPE
else:
if issubclass(coltype, sqltypes.Numeric) and \
coltype is not MSReal:
kwargs['scale'] = numericscale
kwargs['precision'] = numericprec
coltype = coltype(**kwargs)
cdict = {
'name': name,
'type': coltype,
'nullable': nullable,
'default': default,
'autoincrement': False,
}
cols.append(cdict)
# autoincrement and identity
colmap = {}
for col in cols:
colmap[col['name']] = col
# We also run an sp_columns to check for identity columns:
cursor = connection.execute("sp_columns @table_name = '%s', "
"@table_owner = '%s'"
% (tablename, owner))
ic = None
while True:
row = cursor.fetchone()
if row is None:
break
(col_name, type_name) = row[3], row[5]
if type_name.endswith("identity") and col_name in colmap:
ic = col_name
colmap[col_name]['autoincrement'] = True
colmap[col_name]['sequence'] = dict(
name='%s_identity' % col_name)
break
cursor.close()
if ic is not None and self.server_version_info >= MS_2005_VERSION:
table_fullname = "%s.%s" % (owner, tablename)
cursor = connection.execute(
"select ident_seed('%s'), ident_incr('%s')"
% (table_fullname, table_fullname)
)
row = cursor.first()
if row is not None and row[0] is not None:
colmap[ic]['sequence'].update({
'start': int(row[0]),
'increment': int(row[1])
})
return cols
@reflection.cache
@_db_plus_owner
def get_pk_constraint(self, connection, tablename,
dbname, owner, schema, **kw):
pkeys = []
TC = ischema.constraints
C = ischema.key_constraints.alias('C')
# Primary key constraints
s = sql.select([C.c.column_name,
TC.c.constraint_type,
C.c.constraint_name],
sql.and_(TC.c.constraint_name == C.c.constraint_name,
TC.c.table_schema == C.c.table_schema,
C.c.table_name == tablename,
C.c.table_schema == owner)
)
c = connection.execute(s)
constraint_name = None
for row in c:
if 'PRIMARY' in row[TC.c.constraint_type.name]:
pkeys.append(row[0])
if constraint_name is None:
constraint_name = row[C.c.constraint_name.name]
return {'constrained_columns': pkeys, 'name': constraint_name}
@reflection.cache
@_db_plus_owner
def get_foreign_keys(self, connection, tablename,
dbname, owner, schema, **kw):
RR = ischema.ref_constraints
C = ischema.key_constraints.alias('C')
R = ischema.key_constraints.alias('R')
# Foreign key constraints
s = sql.select([C.c.column_name,
R.c.table_schema, R.c.table_name, R.c.column_name,
RR.c.constraint_name, RR.c.match_option,
RR.c.update_rule,
RR.c.delete_rule],
sql.and_(C.c.table_name == tablename,
C.c.table_schema == owner,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name ==
RR.c.unique_constraint_name,
C.c.ordinal_position == R.c.ordinal_position
),
order_by=[RR.c.constraint_name, R.c.ordinal_position]
)
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
fknm, scols, rcols = (None, [], [])
def fkey_rec():
return {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec['name'] = rfknm
if not rec['referred_table']:
rec['referred_table'] = rtbl
if schema is not None or owner != rschema:
if dbname:
rschema = dbname + "." + rschema
rec['referred_schema'] = rschema
local_cols, remote_cols = \
rec['constrained_columns'],\
rec['referred_columns']
local_cols.append(scol)
remote_cols.append(rcol)
return list(fkeys.values())
|
mit
|
lshain-android-source/external-chromium_org
|
tools/telemetry/telemetry/core/chrome/inspector_console.py
|
29
|
1991
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
class InspectorConsole(object):
def __init__(self, inspector_backend):
self._inspector_backend = inspector_backend
self._inspector_backend.RegisterDomain(
'Console',
self._OnNotification,
self._OnClose)
self._message_output_stream = None
self._last_message = None
self._console_enabled = False
def _OnNotification(self, msg):
logging.debug('Notification: %s', json.dumps(msg, indent=2))
if msg['method'] == 'Console.messageAdded':
if msg['params']['message']['url'] == 'chrome://newtab/':
return
self._last_message = 'At %s:%i: %s' % (
msg['params']['message']['url'],
msg['params']['message']['line'],
msg['params']['message']['text'])
if self._message_output_stream:
self._message_output_stream.write(
'%s\n' % self._last_message)
elif msg['method'] == 'Console.messageRepeatCountUpdated':
if self._message_output_stream:
self._message_output_stream.write(
'%s\n' % self._last_message)
def _OnClose(self):
pass
# False positive in PyLint 0.25.1: http://www.logilab.org/89092
@property
def message_output_stream(self): # pylint: disable=E0202
return self._message_output_stream
@message_output_stream.setter
def message_output_stream(self, stream): # pylint: disable=E0202
self._message_output_stream = stream
self._UpdateConsoleEnabledState()
def _UpdateConsoleEnabledState(self):
enabled = self._message_output_stream != None
if enabled == self._console_enabled:
return
if enabled:
method_name = 'enable'
else:
method_name = 'disable'
self._inspector_backend.SyncRequest({
'method': 'Console.%s' % method_name
})
self._console_enabled = enabled
|
bsd-3-clause
|
Adnn/django
|
tests/migrations/test_graph.py
|
167
|
11672
|
import warnings
from django.db.migrations.exceptions import (
CircularDependencyError, NodeNotFoundError,
)
from django.db.migrations.graph import RECURSION_DEPTH_WARNING, MigrationGraph
from django.test import SimpleTestCase
from django.utils.encoding import force_text
class GraphTests(SimpleTestCase):
"""
Tests the digraph structure.
"""
def test_simple_graph(self):
"""
Tests a basic dependency graph:
app_a: 0001 <-- 0002 <--- 0003 <-- 0004
/
app_b: 0001 <-- 0002 <-/
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_a", "0004"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_a", "0003"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
# Test root migration case
self.assertEqual(
graph.forwards_plan(("app_a", "0001")),
[('app_a', '0001')],
)
# Test branch B only
self.assertEqual(
graph.forwards_plan(("app_b", "0002")),
[("app_b", "0001"), ("app_b", "0002")],
)
# Test whole graph
self.assertEqual(
graph.forwards_plan(("app_a", "0004")),
[('app_b', '0001'), ('app_b', '0002'), ('app_a', '0001'), ('app_a', '0002'), ('app_a', '0003'), ('app_a', '0004')],
)
# Test reverse to b:0002
self.assertEqual(
graph.backwards_plan(("app_b", "0002")),
[('app_a', '0004'), ('app_a', '0003'), ('app_b', '0002')],
)
# Test roots and leaves
self.assertEqual(
graph.root_nodes(),
[('app_a', '0001'), ('app_b', '0001')],
)
self.assertEqual(
graph.leaf_nodes(),
[('app_a', '0004'), ('app_b', '0002')],
)
def test_complex_graph(self):
"""
Tests a complex dependency graph:
app_a: 0001 <-- 0002 <--- 0003 <-- 0004
\ \ / /
app_b: 0001 <-\ 0002 <-X /
\ \ /
app_c: \ 0001 <-- 0002 <-
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_a", "0004"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_node(("app_c", "0001"), None)
graph.add_node(("app_c", "0002"), None)
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_a", "0003"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_c", "0002"))
graph.add_dependency("app_c.0002", ("app_c", "0002"), ("app_c", "0001"))
graph.add_dependency("app_c.0001", ("app_c", "0001"), ("app_b", "0001"))
graph.add_dependency("app_c.0002", ("app_c", "0002"), ("app_a", "0002"))
# Test branch C only
self.assertEqual(
graph.forwards_plan(("app_c", "0002")),
[('app_b', '0001'), ('app_c', '0001'), ('app_a', '0001'), ('app_a', '0002'), ('app_c', '0002')],
)
# Test whole graph
self.assertEqual(
graph.forwards_plan(("app_a", "0004")),
[('app_b', '0001'), ('app_c', '0001'), ('app_a', '0001'), ('app_a', '0002'), ('app_c', '0002'), ('app_b', '0002'), ('app_a', '0003'), ('app_a', '0004')],
)
# Test reverse to b:0001
self.assertEqual(
graph.backwards_plan(("app_b", "0001")),
[('app_a', '0004'), ('app_c', '0002'), ('app_c', '0001'), ('app_a', '0003'), ('app_b', '0002'), ('app_b', '0001')],
)
# Test roots and leaves
self.assertEqual(
graph.root_nodes(),
[('app_a', '0001'), ('app_b', '0001'), ('app_c', '0001')],
)
self.assertEqual(
graph.leaf_nodes(),
[('app_a', '0004'), ('app_b', '0002'), ('app_c', '0002')],
)
def test_circular_graph(self):
"""
Tests a circular dependency graph.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0001", ("app_a", "0001"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_b.0001", ("app_b", "0001"), ("app_a", "0003"))
# Test whole graph
self.assertRaises(
CircularDependencyError,
graph.forwards_plan, ("app_a", "0003"),
)
def test_circular_graph_2(self):
graph = MigrationGraph()
graph.add_node(('A', '0001'), None)
graph.add_node(('C', '0001'), None)
graph.add_node(('B', '0001'), None)
graph.add_dependency('A.0001', ('A', '0001'), ('B', '0001'))
graph.add_dependency('B.0001', ('B', '0001'), ('A', '0001'))
graph.add_dependency('C.0001', ('C', '0001'), ('B', '0001'))
self.assertRaises(
CircularDependencyError,
graph.forwards_plan, ('C', '0001')
)
def test_graph_recursive(self):
graph = MigrationGraph()
root = ("app_a", "1")
graph.add_node(root, None)
expected = [root]
for i in range(2, 750):
parent = ("app_a", str(i - 1))
child = ("app_a", str(i))
graph.add_node(child, None)
graph.add_dependency(str(i), child, parent)
expected.append(child)
leaf = expected[-1]
forwards_plan = graph.forwards_plan(leaf)
self.assertEqual(expected, forwards_plan)
backwards_plan = graph.backwards_plan(root)
self.assertEqual(expected[::-1], backwards_plan)
def test_graph_iterative(self):
graph = MigrationGraph()
root = ("app_a", "1")
graph.add_node(root, None)
expected = [root]
for i in range(2, 1000):
parent = ("app_a", str(i - 1))
child = ("app_a", str(i))
graph.add_node(child, None)
graph.add_dependency(str(i), child, parent)
expected.append(child)
leaf = expected[-1]
with warnings.catch_warnings(record=True) as w:
forwards_plan = graph.forwards_plan(leaf)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(str(w[-1].message), RECURSION_DEPTH_WARNING)
self.assertEqual(expected, forwards_plan)
with warnings.catch_warnings(record=True) as w:
backwards_plan = graph.backwards_plan(root)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(str(w[-1].message), RECURSION_DEPTH_WARNING)
self.assertEqual(expected[::-1], backwards_plan)
def test_plan_invalid_node(self):
"""
Tests for forwards/backwards_plan of nonexistent node.
"""
graph = MigrationGraph()
message = "Node ('app_b', '0001') not a valid node"
with self.assertRaisesMessage(NodeNotFoundError, message):
graph.forwards_plan(("app_b", "0001"))
with self.assertRaisesMessage(NodeNotFoundError, message):
graph.backwards_plan(("app_b", "0001"))
def test_missing_parent_nodes(self):
"""
Tests for missing parent nodes.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
msg = "Migration app_a.0001 dependencies reference nonexistent parent node ('app_b', '0002')"
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.add_dependency("app_a.0001", ("app_a", "0001"), ("app_b", "0002"))
def test_missing_child_nodes(self):
"""
Tests for missing child nodes.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
msg = "Migration app_a.0002 dependencies reference nonexistent child node ('app_a', '0002')"
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
def test_infinite_loop(self):
"""
Tests a complex dependency graph:
app_a: 0001 <-
\
app_b: 0001 <- x 0002 <-
/ \
app_c: 0001<- <------------- x 0002
And apply squashing on app_c.
"""
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_node(("app_c", "0001_squashed_0002"), None)
graph.add_dependency("app_b.0001", ("app_b", "0001"), ("app_c", "0001_squashed_0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_a", "0001"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_c.0001_squashed_0002", ("app_c", "0001_squashed_0002"), ("app_b", "0002"))
with self.assertRaises(CircularDependencyError):
graph.forwards_plan(("app_c", "0001_squashed_0002"))
def test_stringify(self):
graph = MigrationGraph()
self.assertEqual(force_text(graph), "Graph: 0 nodes, 0 edges")
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
self.assertEqual(force_text(graph), "Graph: 5 nodes, 3 edges")
self.assertEqual(repr(graph), "<MigrationGraph: nodes=5, edges=3>")
|
bsd-3-clause
|
nirmeshk/oh-mainline
|
vendor/packages/Django/django/views/decorators/cache.py
|
106
|
3973
|
from functools import wraps
from django.utils.decorators import decorator_from_middleware_with_args, available_attrs
from django.utils.cache import patch_cache_control, add_never_cache_headers
from django.middleware.cache import CacheMiddleware
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
sites.get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We need backwards compatibility with code which spells it this way:
# def my_view(): pass
# my_view = cache_page(my_view, 123)
# and this way:
# my_view = cache_page(123)(my_view)
# and this:
# my_view = cache_page(my_view, 123, key_prefix="foo")
# and this:
# my_view = cache_page(123, key_prefix="foo")(my_view)
# and possibly this way (?):
# my_view = cache_page(123, my_view)
# and also this way:
# my_view = cache_page(my_view)
# and also this way:
# my_view = cache_page()(my_view)
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
assert not kwargs, "The only keyword arguments are cache and key_prefix"
def warn():
import warnings
warnings.warn('The cache_page decorator must be called like: '
'cache_page(timeout, [cache=cache name], [key_prefix=key prefix]). '
'All other ways are deprecated.',
DeprecationWarning,
stacklevel=2)
if len(args) > 1:
assert len(args) == 2, "cache_page accepts at most 2 arguments"
warn()
if callable(args[0]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[1], cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
elif callable(args[1]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)(args[1])
else:
assert False, "cache_page must be passed a view function if called with two arguments"
elif len(args) == 1:
if callable(args[0]):
warn()
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
else:
# The One True Way
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)
else:
warn()
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
|
agpl-3.0
|
simonzhangsm/voltdb
|
third_party/python/schemaobject/schemaobject/foreignkey.py
|
15
|
7854
|
import re
from schemaobject.collections import OrderedDict
REGEX_FK_REFERENCE_OPTIONS = r"""
`%s`(?:.(?!ON\ DELETE)(?!ON\ UPDATE))*
(?:\sON\sDELETE\s(?P<on_delete>(?:RESTRICT|CASCADE|SET\ NULL|NO\ ACTION)))?
(?:\sON\sUPDATE\s(?P<on_update>(?:RESTRICT|CASCADE|SET\ NULL|NO\ ACTION)))?
"""
def ForeignKeySchemaBuilder(table):
"""
Returns a dictionary loaded with all of the foreign keys available in the table.
``table`` must be an instance of TableSchema.
.. note::
This function is automatically called for you and set to
``schema.databases[name].tables[name].foreign_keys`` when you create an instance of SchemaObject
"""
conn = table.parent.parent.connection
fkeys = OrderedDict()
sql = """
SELECT K.CONSTRAINT_NAME,
K.TABLE_SCHEMA, K.TABLE_NAME, K.COLUMN_NAME,
K.REFERENCED_TABLE_SCHEMA, K.REFERENCED_TABLE_NAME, K.REFERENCED_COLUMN_NAME,
K.POSITION_IN_UNIQUE_CONSTRAINT
FROM information_schema.KEY_COLUMN_USAGE K, information_schema.TABLE_CONSTRAINTS T
WHERE K.CONSTRAINT_NAME = T.CONSTRAINT_NAME
AND T.CONSTRAINT_TYPE = 'FOREIGN KEY'
AND K.CONSTRAINT_SCHEMA='%s'
AND K.TABLE_NAME='%s'
"""
constraints = conn.execute(sql % (table.parent.name, table.name))
if not constraints:
return fkeys
table_def = conn.execute("SHOW CREATE TABLE `%s`.`%s`" % (table.parent.name, table.name))[0]['Create Table']
for fk in constraints:
n = fk['CONSTRAINT_NAME']
if n not in fkeys:
FKItem = ForeignKeySchema(name=n, parent=table)
FKItem.symbol = n
FKItem.table_schema = fk['TABLE_SCHEMA']
FKItem.table_name = fk['TABLE_NAME']
FKItem.referenced_table_schema = fk['REFERENCED_TABLE_SCHEMA']
FKItem.referenced_table_name = fk['REFERENCED_TABLE_NAME']
reference_options = re.search(REGEX_FK_REFERENCE_OPTIONS % n, table_def, re.X)
if reference_options:
#If ON DELETE or ON UPDATE are not specified, the default action is RESTRICT.
FKItem.update_rule = reference_options.group('on_update') or 'RESTRICT'
FKItem.delete_rule = reference_options.group('on_delete') or 'RESTRICT'
fkeys[n] = FKItem
#columns for this fk
if fk['COLUMN_NAME'] not in fkeys[n].columns:
fkeys[n].columns.insert(fk['POSITION_IN_UNIQUE_CONSTRAINT'], fk['COLUMN_NAME'])
#referenced columns for this fk
if fk['REFERENCED_COLUMN_NAME'] not in fkeys[n].referenced_columns:
fkeys[n].referenced_columns.insert(fk['POSITION_IN_UNIQUE_CONSTRAINT'], fk['REFERENCED_COLUMN_NAME'])
return fkeys
class ForeignKeySchema(object):
"""
Object representation of a single foreign key.
Supports equality and inequality comparison of ForeignKeySchema.
``name`` is the column name.
``parent`` is an instance of TableSchema
.. note::
ForeignKeySchema objects are automatically created for you by ForeignKeySchemaBuilder
and loaded under ``schema.databases[name].tables[name].foreign_keys``
Example
>>> schema.databases['sakila'].tables['rental'].foreign_keys.keys()
['fk_rental_customer', 'fk_rental_inventory', 'fk_rental_staff']
Foreign Key Attributes
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].name
'fk_rental_inventory'
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].symbol
'fk_rental_inventory'
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].table_schema
'sakila'
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].table_name
'rental'
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].columns
['inventory_id']
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].referenced_table_name
'inventory'
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].referenced_table_schema
'sakila'
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].referenced_columns
['inventory_id']
#match_option will always be None in MySQL 5.x, 6.x
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].match_option
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].update_rule
'CASCADE'
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].delete_rule
'RESTRICT'
"""
def __init__(self, name, parent):
self.parent = parent
#name of the fk constraint
self.name = name
self.symbol = name
#primary table info
self.table_schema = None
self.table_name = None
self.columns = []
#referenced table info
self.referenced_table_schema = None
self.referenced_table_name = None
self.referenced_columns = []
#constraint options
self.match_option = None #will always be none in mysql 5.0-6.0
self.update_rule = None
self.delete_rule = None
@classmethod
def _format_referenced_col(self, field, length):
"""
Generate the SQL to format referenced columns in a foreign key
"""
if length:
return "`%s`(%d)" % (field, length)
else:
return "`%s`" % (field)
def create(self):
"""
Generate the SQL to create (ADD) this foreign key
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].create()
'ADD CONSTRAINT `fk_rental_inventory`
FOREIGN KEY `fk_rental_inventory` (`inventory_id`)
REFERENCES `inventory` (`inventory_id`)
ON DELETE RESTRICT ON UPDATE CASCADE'
.. note:
match_option is ignored when creating a foreign key.
"""
sql = []
sql.append("ADD CONSTRAINT `%s`" % self.symbol)
sql.append("FOREIGN KEY `%s` (%s)" % (self.symbol, ",".join([("`%s`" % c) for c in self.columns])))
if self.referenced_table_schema != self.table_schema:
sql.append("REFERENCES `%s`.`%s`" % (self.referenced_table_schema, self.referenced_table_name))
else:
sql.append("REFERENCES `%s`" % self.referenced_table_name)
sql.append("(%s)" % ",".join([("`%s`" % c) for c in self.referenced_columns]))
if self.delete_rule:
sql.append("ON DELETE %s" % self.delete_rule)
if self.update_rule:
sql.append("ON UPDATE %s" % self.update_rule)
return ' '.join(sql)
def drop(self):
"""
Generate the SQL to drop this foreign key
>>> schema.databases['sakila'].tables['rental'].foreign_keys['fk_rental_inventory'].drop()
'DROP FOREIGN KEY `fk_rental_inventory`'
"""
return "DROP FOREIGN KEY `%s`" % (self.symbol)
def __eq__(self, other):
if not isinstance(other, ForeignKeySchema):
return False
# table_schema and referenced_table_schema are ignored
return ((self.table_name == other.table_name)
and (self.referenced_table_name == other.referenced_table_name)
and (self.update_rule == other.update_rule)
and (self.delete_rule == other.delete_rule)
and (self.columns == other.columns)
and (self.referenced_columns == other.referenced_columns))
def __ne__(self, other):
return not self.__eq__(other)
|
agpl-3.0
|
Distrotech/scons
|
build/scons/build/lib/SCons/Tool/sunlink.py
|
2
|
2417
|
"""SCons.Tool.sunlink
Tool-specific initialization for the Sun Solaris (Forte) linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunlink.py 2014/01/04 01:12:18 root"
import os
import os.path
import SCons.Util
import link
ccLinker = None
# search for the acc compiler and linker front end
try:
dirs = os.listdir('/opt')
except (IOError, OSError):
# Not being able to read the directory because it doesn't exist
# (IOError) or isn't readable (OSError) is okay.
dirs = []
for d in dirs:
linker = '/opt/' + d + '/bin/CC'
if os.path.exists(linker):
ccLinker = linker
break
def generate(env):
"""Add Builders and construction variables for Forte to an Environment."""
link.generate(env)
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -G')
env['RPATHPREFIX'] = '-R'
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
return ccLinker
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
|
alfa-jor/addon
|
mediaserver/platformcode/html_info_window.py
|
3
|
9787
|
# -*- coding: utf-8 -*-
from core.tmdb import Tmdb
from platformcode import logger
class InfoWindow(object):
otmdb = None
item_title = ""
item_serie = ""
item_temporada = 0
item_episodio = 0
result = {}
@staticmethod
def get_language(lng):
# Cambiamos el formato del Idioma
languages = {
'aa': 'Afar', 'ab': 'Abkhazian', 'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'am': 'Amharic',
'ar': 'Arabic', 'an': 'Aragonese', 'as': 'Assamese', 'av': 'Avaric', 'ae': 'Avestan',
'ay': 'Aymara', 'az': 'Azerbaijani', 'ba': 'Bashkir', 'bm': 'Bambara', 'eu': 'Basque',
'be': 'Belarusian', 'bn': 'Bengali', 'bh': 'Bihari languages', 'bi': 'Bislama',
'bo': 'Tibetan', 'bs': 'Bosnian', 'br': 'Breton', 'bg': 'Bulgarian', 'my': 'Burmese',
'ca': 'Catalan; Valencian', 'cs': 'Czech', 'ch': 'Chamorro', 'ce': 'Chechen', 'zh': 'Chinese',
'cu': 'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic',
'cv': 'Chuvash', 'kw': 'Cornish', 'co': 'Corsican', 'cr': 'Cree', 'cy': 'Welsh',
'da': 'Danish', 'de': 'German', 'dv': 'Divehi; Dhivehi; Maldivian', 'nl': 'Dutch; Flemish',
'dz': 'Dzongkha', 'en': 'English', 'eo': 'Esperanto',
'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese', 'fa': 'Persian', 'fj': 'Fijian',
'fi': 'Finnish', 'fr': 'French', 'fy': 'Western Frisian', 'ff': 'Fulah',
'Ga': 'Georgian', 'gd': 'Gaelic; Scottish Gaelic', 'ga': 'Irish', 'gl': 'Galician',
'gv': 'Manx', 'el': 'Greek, Modern (1453-)', 'gn': 'Guarani', 'gu': 'Gujarati',
'ht': 'Haitian; Haitian Creole', 'ha': 'Hausa', 'he': 'Hebrew', 'hz': 'Herero', 'hi': 'Hindi',
'ho': 'Hiri Motu', 'hr': 'Croatian', 'hu': 'Hungarian', 'hy': 'Armenian', 'ig': 'Igbo',
'is': 'Icelandic', 'io': 'Ido', 'ii': 'Sichuan Yi; Nuosu', 'iu': 'Inuktitut',
'ie': 'Interlingue; Occidental', 'ia': 'Interlingua (International Auxiliary Language Association)',
'id': 'Indonesian', 'ik': 'Inupiaq', 'it': 'Italian', 'jv': 'Javanese',
'ja': 'Japanese', 'kl': 'Kalaallisut; Greenlandic', 'kn': 'Kannada', 'ks': 'Kashmiri',
'ka': 'Georgian', 'kr': 'Kanuri', 'kk': 'Kazakh', 'km': 'Central Khmer', 'ki': 'Kikuyu; Gikuyu',
'rw': 'Kinyarwanda', 'ky': 'Kirghiz; Kyrgyz', 'kv': 'Komi', 'kg': 'Kongo', 'ko': 'Korean',
'kj': 'Kuanyama; Kwanyama', 'ku': 'Kurdish', 'lo': 'Lao', 'la': 'Latin', 'lv': 'Latvian',
'li': 'Limburgan; Limburger; Limburgish', 'ln': 'Lingala', 'lt': 'Lithuanian',
'lb': 'Luxembourgish; Letzeburgesch', 'lu': 'Luba-Katanga', 'lg': 'Ganda', 'mk': 'Macedonian',
'mh': 'Marshallese', 'ml': 'Malayalam', 'mi': 'Maori', 'mr': 'Marathi', 'ms': 'Malay', 'Mi': 'Micmac',
'mg': 'Malagasy', 'mt': 'Maltese', 'mn': 'Mongolian', 'na': 'Nauru',
'nv': 'Navajo; Navaho', 'nr': 'Ndebele, South; South Ndebele', 'nd': 'Ndebele, North; North Ndebele',
'ng': 'Ndonga', 'ne': 'Nepali', 'nn': 'Norwegian Nynorsk; Nynorsk, Norwegian',
'nb': 'Bokmål, Norwegian; Norwegian Bokmål', 'no': 'Norwegian', 'oc': 'Occitan (post 1500)',
'oj': 'Ojibwa', 'or': 'Oriya', 'om': 'Oromo', 'os': 'Ossetian; Ossetic', 'pa': 'Panjabi; Punjabi',
'pi': 'Pali', 'pl': 'Polish', 'pt': 'Portuguese', 'ps': 'Pushto; Pashto', 'qu': 'Quechua',
'ro': 'Romanian; Moldavian; Moldovan', 'rn': 'Rundi', 'ru': 'Russian', 'sg': 'Sango', 'rm': 'Romansh',
'sa': 'Sanskrit', 'si': 'Sinhala; Sinhalese', 'sk': 'Slovak', 'sl': 'Slovenian', 'se': 'Northern Sami',
'sm': 'Samoan', 'sn': 'Shona', 'sd': 'Sindhi', 'so': 'Somali', 'st': 'Sotho, Southern', 'es': 'Spanish',
'sc': 'Sardinian', 'sr': 'Serbian', 'ss': 'Swati', 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',
'ty': 'Tahitian', 'ta': 'Tamil', 'tt': 'Tatar', 'te': 'Telugu', 'tg': 'Tajik', 'tl': 'Tagalog',
'th': 'Thai', 'ti': 'Tigrinya', 'to': 'Tonga (Tonga Islands)', 'tn': 'Tswana', 'ts': 'Tsonga',
'tk': 'Turkmen', 'tr': 'Turkish', 'tw': 'Twi', 'ug': 'Uighur; Uyghur', 'uk': 'Ukrainian',
'ur': 'Urdu', 'uz': 'Uzbek', 've': 'Venda', 'vi': 'Vietnamese', 'vo': 'Volapük',
'wa': 'Walloon', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish', 'yo': 'Yoruba', 'za': 'Zhuang; Chuang',
'zu': 'Zulu'}
return languages.get(lng, lng)
def get_scraper_data(self, data_in):
self.otmdb = None
# logger.debug(str(data_in))
if self.listData:
# Datos comunes a todos los listados
infoLabels = self.scraper().get_infoLabels(origen=data_in)
if "original_language" in infoLabels:
infoLabels["language"] = self.get_language(infoLabels["original_language"])
if "vote_average" in data_in and "vote_count" in data_in:
infoLabels["puntuacion"] = str(data_in["vote_average"]) + "/10 (" + str(data_in["vote_count"]) + ")"
self.result = infoLabels
def start(self, handler, data, caption="Información del vídeo", item=None, scraper=Tmdb):
# Capturamos los parametros
self.caption = caption
self.item = item
self.indexList = -1
self.listData = []
self.handler = handler
self.scraper = scraper
logger.debug(data)
if type(data) == list:
self.listData = data
self.indexList = 0
data = self.listData[self.indexList]
self.get_scraper_data(data)
ID = self.update_window()
return self.onClick(ID)
def update_window(self):
JsonData = {}
JsonData["action"] = "OpenInfo"
JsonData["data"] = {}
JsonData["data"]["buttons"] = len(self.listData) > 0
JsonData["data"]["previous"] = self.indexList > 0
JsonData["data"]["next"] = self.indexList + 1 < len(self.listData)
JsonData["data"]["count"] = "(%s/%s)" % (self.indexList + 1, len(self.listData))
JsonData["data"]["title"] = self.caption
JsonData["data"]["fanart"] = self.result.get("fanart", "")
JsonData["data"]["thumbnail"] = self.result.get("thumbnail", "")
JsonData["data"]["lines"] = []
if self.result.get("mediatype", "movie") == "movie":
JsonData["data"]["lines"].append({"title": "Título:", "text": self.result.get("title", "N/A")})
JsonData["data"]["lines"].append(
{"title": "Título Original:", "text": self.result.get("originaltitle", "N/A")})
JsonData["data"]["lines"].append({"title": "Idioma Original:", "text": self.result.get("language", "N/A")})
JsonData["data"]["lines"].append({"title": "Puntuación:", "text": self.result.get("puntuacion", "N/A")})
JsonData["data"]["lines"].append({"title": "Lanzamiento:", "text": self.result.get("release_date", "N/A")})
JsonData["data"]["lines"].append({"title": "Generos:", "text": self.result.get("genre", "N/A")})
JsonData["data"]["lines"].append({"title": "", "text": ""})
else:
JsonData["data"]["lines"].append({"title": "Serie:", "text": self.result.get("title", "N/A")})
JsonData["data"]["lines"].append({"title": "Idioma Original:", "text": self.result.get("language", "N/A")})
JsonData["data"]["lines"].append({"title": "Puntuación:", "text": self.result.get("puntuacion", "N/A")})
JsonData["data"]["lines"].append({"title": "Generos:", "text": self.result.get("genre", "N/A")})
if self.result.get("season"):
JsonData["data"]["lines"].append(
{"title": "Titulo temporada:", "text": self.result.get("temporada_nombre", "N/A")})
JsonData["data"]["lines"].append({"title": "Temporada:",
"text": self.result.get("season", "N/A") + " de " + self.result.get(
"seasons", "N/A")})
JsonData["data"]["lines"].append({"title": "", "text": ""})
if self.result.get("episode"):
JsonData["data"]["lines"].append({"title": "Titulo:", "text": self.result.get("episode_title", "N/A")})
JsonData["data"]["lines"].append({"title": "Episodio:",
"text": self.result.get("episode", "N/A") + " de " + self.result.get(
"episodes", "N/A")})
JsonData["data"]["lines"].append({"title": "Emisión:", "text": self.result.get("date", "N/A")})
if self.result.get("plot"):
JsonData["data"]["lines"].append({"title": "Sinopsis:", "text": self.result["plot"]})
else:
JsonData["data"]["lines"].append({"title": "", "text": ""})
ID = self.handler.send_message(JsonData)
return ID
def onClick(self, ID):
while True:
response = self.handler.get_data(ID)
if response == "ok":
return self.listData[self.indexList]
elif response == "close":
return None
elif response == "next" and self.indexList < len(self.listData) - 1:
self.indexList += 1
self.get_scraper_data(self.listData[self.indexList])
ID = self.update_window()
elif response == "previous" and self.indexList > 0:
self.indexList -= 1
self.get_scraper_data(self.listData[self.indexList])
ID = self.update_window()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.