repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
oasis-open/cti-pattern-validator
|
setup.py
|
1
|
1795
|
#!/usr/bin/env python
from setuptools import find_packages, setup
with open('README.rst') as f:
readme = f.read()
doc_requires = [
'sphinx',
'sphinx-prompt',
]
test_requires = [
'coverage',
'pytest',
'pytest-cov',
]
dev_requires = doc_requires + test_requires + [
'bumpversion',
'check-manifest',
'pre-commit',
# test_requires are installed into every tox environment, so we don't
# want to include tox there.
'tox',
]
setup(
name='stix2-patterns',
version='1.3.2',
description='Validate STIX 2 Patterns.',
long_description=readme,
long_description_content_type='text/x-rst',
url="https://github.com/oasis-open/cti-pattern-validator",
author='OASIS Cyber Threat Intelligence Technical Committee',
author_email='cti-users@lists.oasis-open.org',
maintainer='Chris Lenk',
maintainer_email='clenk@mitre.org',
python_requires=">=3.6",
packages=find_packages(),
install_requires=[
'antlr4-python3-runtime~=4.9.0',
'six',
],
package_data={
'stix2patterns.test.v20': ['spec_examples.txt'],
'stix2patterns.test.v21': ['spec_examples.txt'],
},
entry_points={
'console_scripts': [
'validate-patterns = stix2patterns.validator:main',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
extras_require={
'dev': dev_requires,
'docs': doc_requires,
'test': test_requires,
},
)
|
bsd-3-clause
|
zerc/django
|
tests/flatpages_tests/test_forms.py
|
155
|
4568
|
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.flatpages.forms import FlatpageForm
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from django.utils import translation
@modify_settings(INSTALLED_APPS={'append': ['django.contrib.flatpages', ]})
@override_settings(SITE_ID=1)
class FlatpageAdminFormTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
def setUp(self):
# Site fields cache needs to be cleared after flatpages is added to
# INSTALLED_APPS
Site._meta._expire_cache()
self.form_data = {
'title': "A test page",
'content': "This is a test",
'sites': [settings.SITE_ID],
}
def test_flatpage_admin_form_url_validation(self):
"The flatpage admin form correctly validates urls"
self.assertTrue(FlatpageForm(data=dict(url='/new_flatpage/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.special~chars/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.very_special~chars-here/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a space/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a % char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ! char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a & char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ? char/', **self.form_data)).is_valid())
def test_flatpage_requires_leading_slash(self):
form = FlatpageForm(data=dict(url='no_leading_slash/', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a leading slash."])
@override_settings(APPEND_SLASH=True,
MIDDLEWARE_CLASSES=['django.middleware.common.CommonMiddleware'])
def test_flatpage_requires_trailing_slash_with_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a trailing slash."])
@override_settings(APPEND_SLASH=False,
MIDDLEWARE_CLASSES=['django.middleware.common.CommonMiddleware'])
def test_flatpage_doesnt_requires_trailing_slash_without_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
self.assertTrue(form.is_valid())
def test_flatpage_admin_form_url_uniqueness_validation(self):
"The flatpage admin form correctly enforces url uniqueness among flatpages of the same site"
data = dict(url='/myflatpage1/', **self.form_data)
FlatpageForm(data=data).save()
f = FlatpageForm(data=data)
with translation.override('en'):
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'__all__': ['Flatpage with url /myflatpage1/ already exists for site example.com']})
def test_flatpage_admin_form_edit(self):
"""
Existing flatpages can be edited in the admin form without triggering
the url-uniqueness validation.
"""
existing = FlatPage.objects.create(
url="/myflatpage1/", title="Some page", content="The content")
existing.sites.add(settings.SITE_ID)
data = dict(url='/myflatpage1/', **self.form_data)
f = FlatpageForm(data=data, instance=existing)
self.assertTrue(f.is_valid(), f.errors)
updated = f.save()
self.assertEqual(updated.title, "A test page")
def test_flatpage_nosites(self):
data = dict(url='/myflatpage1/', **self.form_data)
data.update({'sites': ''})
f = FlatpageForm(data=data)
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'sites': [translation.ugettext('This field is required.')]})
|
bsd-3-clause
|
Bysmyyr/chromium-crosswalk
|
tools/usb_gadget/gadget_test.py
|
37
|
12748
|
#!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import uuid
import mock
import gadget
import usb_constants
import usb_descriptors
device_desc = usb_descriptors.DeviceDescriptor(
idVendor=0x18D1, # Google Inc.
idProduct=0xFF00,
bcdUSB=0x0200,
iManufacturer=1,
iProduct=2,
iSerialNumber=3,
bNumConfigurations=1,
bcdDevice=0x0100)
fs_config_desc = usb_descriptors.ConfigurationDescriptor(
bmAttributes=0xC0,
MaxPower=50)
fs_interface_desc = usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=0
)
fs_config_desc.AddInterface(fs_interface_desc)
fs_bulk_in_endpoint_desc = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=usb_constants.TransferType.BULK,
wMaxPacketSize=64,
bInterval=0
)
fs_interface_desc.AddEndpoint(fs_bulk_in_endpoint_desc)
fs_bulk_out_endpoint_desc = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x81,
bmAttributes=usb_constants.TransferType.BULK,
wMaxPacketSize=64,
bInterval=0
)
fs_interface_desc.AddEndpoint(fs_bulk_out_endpoint_desc)
fs_alt_interface_desc = usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=0,
bAlternateSetting=1
)
fs_config_desc.AddInterface(fs_alt_interface_desc)
fs_interrupt_in_endpoint_desc = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=usb_constants.TransferType.INTERRUPT,
wMaxPacketSize=64,
bInterval=1
)
fs_alt_interface_desc.AddEndpoint(fs_interrupt_in_endpoint_desc)
fs_interrupt_out_endpoint_desc = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x81,
bmAttributes=usb_constants.TransferType.INTERRUPT,
wMaxPacketSize=64,
bInterval=1
)
fs_alt_interface_desc.AddEndpoint(fs_interrupt_out_endpoint_desc)
hs_config_desc = usb_descriptors.ConfigurationDescriptor(
bmAttributes=0xC0,
MaxPower=50)
hs_interface_desc = usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=0
)
hs_config_desc.AddInterface(hs_interface_desc)
hs_bulk_in_endpoint_desc = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=usb_constants.TransferType.BULK,
wMaxPacketSize=512,
bInterval=0
)
hs_interface_desc.AddEndpoint(hs_bulk_in_endpoint_desc)
hs_bulk_out_endpoint_desc = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x81,
bmAttributes=usb_constants.TransferType.BULK,
wMaxPacketSize=512,
bInterval=0
)
hs_interface_desc.AddEndpoint(hs_bulk_out_endpoint_desc)
hs_alt_interface_desc = usb_descriptors.InterfaceDescriptor(
bInterfaceNumber=0,
bAlternateSetting=1
)
hs_config_desc.AddInterface(hs_alt_interface_desc)
hs_interrupt_in_endpoint_desc = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=usb_constants.TransferType.INTERRUPT,
wMaxPacketSize=256,
bInterval=1
)
hs_alt_interface_desc.AddEndpoint(hs_interrupt_in_endpoint_desc)
hs_interrupt_out_endpoint_desc = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x81,
bmAttributes=usb_constants.TransferType.INTERRUPT,
wMaxPacketSize=256,
bInterval=1
)
hs_alt_interface_desc.AddEndpoint(hs_interrupt_out_endpoint_desc)
class GadgetTest(unittest.TestCase):
def test_get_descriptors(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
self.assertEquals(g.GetDeviceDescriptor(), device_desc)
self.assertEquals(g.GetFullSpeedConfigurationDescriptor(), fs_config_desc)
self.assertEquals(g.GetHighSpeedConfigurationDescriptor(), hs_config_desc)
with self.assertRaisesRegexp(RuntimeError, 'not connected'):
g.GetConfigurationDescriptor()
def test_connect_full_speed(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
g.Connected(mock.Mock(), usb_constants.Speed.FULL)
self.assertTrue(g.IsConnected())
self.assertEquals(g.GetSpeed(), usb_constants.Speed.FULL)
self.assertEquals(g.GetConfigurationDescriptor(), fs_config_desc)
g.Disconnected()
self.assertFalse(g.IsConnected())
def test_connect_high_speed(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
g.Connected(mock.Mock(), usb_constants.Speed.HIGH)
self.assertTrue(g.IsConnected())
self.assertEquals(g.GetSpeed(), usb_constants.Speed.HIGH)
self.assertEquals(g.GetConfigurationDescriptor(), hs_config_desc)
g.Disconnected()
self.assertFalse(g.IsConnected())
def test_string_index_out_of_range(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
with self.assertRaisesRegexp(ValueError, 'index out of range'):
g.AddStringDescriptor(0, 'Hello world!')
def test_language_id_out_of_range(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
with self.assertRaisesRegexp(ValueError, 'language code out of range'):
g.AddStringDescriptor(1, 'Hello world!', lang=-1)
def test_get_languages(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
g.AddStringDescriptor(1, 'Hello world!')
desc = g.ControlRead(0x80, 6, 0x0300, 0, 255)
self.assertEquals(desc, '\x04\x03\x09\x04')
def test_get_string_descriptor(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
g.AddStringDescriptor(1, 'Hello world!')
desc = g.ControlRead(0x80, 6, 0x0301, 0x0409, 255)
self.assertEquals(desc, '\x1A\x03H\0e\0l\0l\0o\0 \0w\0o\0r\0l\0d\0!\0')
def test_get_missing_string_descriptor(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
g.AddStringDescriptor(1, 'Hello world!')
desc = g.ControlRead(0x80, 6, 0x0302, 0x0409, 255)
self.assertEquals(desc, None)
def test_get_missing_string_language(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
g.AddStringDescriptor(1, 'Hello world!')
desc = g.ControlRead(0x80, 6, 0x0301, 0x040C, 255)
self.assertEquals(desc, None)
def test_class_and_vendor_transfers(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
self.assertIsNone(g.ControlRead(0xA0, 0, 0, 0, 0))
self.assertIsNone(g.ControlRead(0xC0, 0, 0, 0, 0))
self.assertIsNone(g.ControlWrite(0x20, 0, 0, 0, ''))
self.assertIsNone(g.ControlWrite(0x40, 0, 0, 0, ''))
def test_set_configuration(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
g.ControlWrite(0, 9, 1, 0, 0)
chip.StartEndpoint.assert_has_calls([
mock.call(hs_bulk_in_endpoint_desc),
mock.call(hs_bulk_out_endpoint_desc)
])
def test_set_configuration_zero(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
g.ControlWrite(0, 9, 1, 0, 0)
chip.StartEndpoint.reset_mock()
g.ControlWrite(0, 9, 0, 0, 0)
chip.StopEndpoint.assert_has_calls([
mock.call(0x01),
mock.call(0x81)
])
def test_set_bad_configuration(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
g.Connected(mock.Mock(), usb_constants.Speed.HIGH)
self.assertIsNone(g.ControlWrite(0, 9, 2, 0, 0))
def test_set_interface(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
self.assertTrue(g.ControlWrite(0, 9, 1, 0, 0))
chip.reset_mock()
self.assertTrue(g.ControlWrite(1, 11, 1, 0, 0))
chip.StopEndpoint.assert_has_calls([
mock.call(0x01),
mock.call(0x81)
])
chip.StartEndpoint.assert_has_calls([
mock.call(hs_interrupt_in_endpoint_desc),
mock.call(hs_interrupt_out_endpoint_desc)
])
chip.reset_mock()
self.assertTrue(g.ControlWrite(1, 11, 0, 0, 0))
chip.StopEndpoint.assert_has_calls([
mock.call(0x01),
mock.call(0x81)
])
chip.StartEndpoint.assert_has_calls([
mock.call(hs_bulk_in_endpoint_desc),
mock.call(hs_bulk_out_endpoint_desc)
])
def test_set_bad_interface(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
g.Connected(mock.Mock(), usb_constants.Speed.HIGH)
self.assertTrue(g.ControlWrite(0, 9, 1, 0, 0))
self.assertIsNone(g.ControlWrite(1, 11, 0, 1, 0))
def test_send_packet(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
g.SendPacket(0x81, 'Hello world!')
chip.SendPacket.assert_called_once_with(0x81, 'Hello world!')
def test_send_packet_disconnected(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
with self.assertRaisesRegexp(RuntimeError, 'not connected'):
g.SendPacket(0x81, 'Hello world!')
g.Connected(mock.Mock(), usb_constants.Speed.HIGH)
g.SendPacket(0x81, 'Hello world!')
g.Disconnected()
with self.assertRaisesRegexp(RuntimeError, 'not connected'):
g.SendPacket(0x81, 'Hello world!')
def test_send_invalid_endpoint(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
with self.assertRaisesRegexp(ValueError, 'non-input endpoint'):
g.SendPacket(0x01, 'Hello world!')
def test_receive_packet(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
self.assertIsNone(g.ReceivePacket(0x01, 'Hello world!'))
def test_halt_endpoint(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
g.HaltEndpoint(0x01)
chip.HaltEndpoint.assert_called_once_with(0x01)
def test_get_microsoft_os_string_descriptor(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
g.EnableMicrosoftOSDescriptorsV1(vendor_code=0x42)
os_string_descriptor = g.ControlRead(0x80,
usb_constants.Request.GET_DESCRIPTOR,
0x03EE,
0x0000,
0x12)
self.assertEqual(os_string_descriptor,
"\x12\x03M\x00S\x00F\x00T\x001\x000\x000\x00\x42\x00")
def test_get_microsoft_os_compat_id_descriptor(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
g.EnableMicrosoftOSDescriptorsV1(vendor_code=0x42)
g.SetMicrosoftCompatId(0, 'WINUSB')
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
expected_compatid_header = \
"\x28\x00\x00\x00\x00\x01\x04\x00\x01\0\0\0\0\0\0\0"
compatid_header = g.ControlRead(0xC0, 0x42, 0x0000, 0x0004, 0x0010)
self.assertEqual(compatid_header, expected_compatid_header)
compatid_descriptor = g.ControlRead(0xC0, 0x42, 0x0000, 0x0004, 0x0028)
self.assertEqual(compatid_descriptor,
expected_compatid_header +
"\x00\x01WINUSB\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0")
def test_get_bos_descriptor(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
self.assertIsNone(g.ControlRead(0x80, 0x06, 0x0F00, 0x0000, 5))
container_id = uuid.uuid4()
g.AddDeviceCapabilityDescriptor(usb_descriptors.ContainerIdDescriptor(
ContainerID=container_id.bytes_le))
bos_descriptor_header = g.ControlRead(0x80, 0x06, 0x0F00, 0x0000, 5)
self.assertEquals('\x05\x0F\x19\x00\x01', bos_descriptor_header)
bos_descriptor = g.ControlRead(0x80, 0x06, 0x0F00, 0x0000, 25)
self.assertEquals(
'\x05\x0F\x19\x00\x01\x14\x10\x04\x00' + container_id.bytes_le,
bos_descriptor)
def test_get_microsoft_os_20_descriptor_set(self):
g = gadget.Gadget(device_desc, fs_config_desc, hs_config_desc)
g.EnableMicrosoftOSDescriptorsV2(vendor_code=0x42)
g.SetMicrosoftCompatId(0, 'WINUSB')
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.HIGH)
bos_descriptor = g.ControlRead(0x80, 0x06, 0x0F00, 0x0000, 33)
self.assertEquals(
'\x05\x0F\x21\x00\x01' +
'\x1C\x10\x05\x00' +
uuid.UUID('{D8DD60DF-4589-4CC7-9CD2-659D9E648A9F}').bytes_le +
'\x00\x00\x03\x06\x2E\x00\x42\x00',
bos_descriptor)
descriptor_set = g.ControlRead(0xC0, 0x42, 0x0000, 0x0007, 48)
self.assertEquals(
'\x0A\x00\x00\x00\x00\x00\x03\x06\x2E\x00' +
'\x08\x00\x01\x00\x00\x00\x24\x00' +
'\x08\x00\x02\x00\x00\x00\x1C\x00' +
'\x14\x00\x03\x00WINUSB\0\0\0\0\0\0\0\0\0\0',
descriptor_set)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
alpodrezov/ordering_lunch
|
xavchik/settings.py
|
1
|
2241
|
"""
Django settings for xavchik project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(#@6cjvgq1^pp0*o*^8hs20ozo!27do1&-^nqc92ol%4d8)(5l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'xavchik',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'xavchik.urls'
WSGI_APPLICATION = 'xavchik.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
('static', '/Volumes/Macintosh HD 2 2/work_projects/python/ordering_lunch/xavchik/static'),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
|
unlicense
|
splotz90/urh
|
src/urh/signalprocessing/SimulatorItem.py
|
1
|
2278
|
class SimulatorItem(object):
protocol_manager = None
expression_parser = None
def __init__(self):
self.__parentItem = None
self.__childItems = []
self.logging_active = True
self.is_valid = True
def check(self):
return True
def get_pos(self):
if self.parent() is not None:
return self.parent().children.index(self)
return 0
def index(self):
if self.parent() is None:
return ""
item = self
result = str(item.get_pos() + 1)
while item.parent().parent() is not None:
item = item.parent()
result = str(item.get_pos() + 1) + "." + result
return result
def insert_child(self, pos, child):
child.set_parent(self)
self.children.insert(pos, child)
def delete(self):
for child in self.children[:]:
child.set_parent(None)
self.set_parent(None)
def parent(self):
return self.__parentItem
def set_parent(self, value):
if self.parent() is not None:
self.parent().children.remove(self)
self.__parentItem = value
@property
def children(self):
return self.__childItems
def child_count(self) -> int:
return len(self.children)
def next_sibling(self):
result = None
index = self.get_pos()
if self.parent() and index < self.parent().child_count() - 1:
result = self.parent().children[index + 1]
return result
def prev_sibling(self):
result = None
index = self.get_pos()
if self.parent() and index > 0:
result = self.parent().children[index - 1]
return result
def next(self):
if self.child_count():
return self.children[0]
curr = self
while curr is not None:
if curr.next_sibling() is not None:
return curr.next_sibling()
curr = curr.parent()
return None
def prev(self):
if self.prev_sibling() is not None:
curr = self.prev_sibling()
else:
return self.parent()
while curr.child_count():
curr = curr.children[-1]
return curr
|
gpl-3.0
|
dgladkov/django
|
django/utils/dates.py
|
590
|
2296
|
"Commonly-used date structures"
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
WEEKDAYS = {
0: _('Monday'), 1: _('Tuesday'), 2: _('Wednesday'), 3: _('Thursday'), 4: _('Friday'),
5: _('Saturday'), 6: _('Sunday')
}
WEEKDAYS_ABBR = {
0: _('Mon'), 1: _('Tue'), 2: _('Wed'), 3: _('Thu'), 4: _('Fri'),
5: _('Sat'), 6: _('Sun')
}
WEEKDAYS_REV = {
'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4,
'saturday': 5, 'sunday': 6
}
MONTHS = {
1: _('January'), 2: _('February'), 3: _('March'), 4: _('April'), 5: _('May'), 6: _('June'),
7: _('July'), 8: _('August'), 9: _('September'), 10: _('October'), 11: _('November'),
12: _('December')
}
MONTHS_3 = {
1: _('jan'), 2: _('feb'), 3: _('mar'), 4: _('apr'), 5: _('may'), 6: _('jun'),
7: _('jul'), 8: _('aug'), 9: _('sep'), 10: _('oct'), 11: _('nov'), 12: _('dec')
}
MONTHS_3_REV = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8,
'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
|
bsd-3-clause
|
kawamon/hue
|
desktop/core/ext-py/Django-1.11.29/django/core/serializers/pyyaml.py
|
51
|
2844
|
"""
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
import collections
import decimal
import sys
from io import StringIO
import yaml
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.db import models
from django.utils import six
# Use the C (faster) implementation if possible
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
class DjangoSafeDumper(SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
def represent_ordered_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data.items())
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
DjangoSafeDumper.add_representer(collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
if isinstance(stream_or_string, six.string_types):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
|
apache-2.0
|
freedomofpress/securedrop
|
securedrop/alembic/versions/e0a525cbab83_add_column_to_track_source_deletion_of_.py
|
2
|
2076
|
"""add column to track source deletion of replies
Revision ID: e0a525cbab83
Revises: 2d0ce3ee5bdc
Create Date: 2018-08-02 00:07:59.242510
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "e0a525cbab83"
down_revision = "2d0ce3ee5bdc"
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute("PRAGMA legacy_alter_table=ON")
# Schema migration
op.rename_table("replies", "replies_tmp")
# Add new column.
op.add_column("replies_tmp", sa.Column("deleted_by_source", sa.Boolean()))
# Populate deleted_by_source column in replies_tmp table.
replies = conn.execute(sa.text("SELECT * FROM replies_tmp")).fetchall()
for reply in replies:
conn.execute(
sa.text(
"""UPDATE replies_tmp SET deleted_by_source=0 WHERE
id=:id"""
).bindparams(id=reply.id)
)
# Now create new table with not null constraint applied to
# deleted_by_source.
op.create_table(
"replies",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("journalist_id", sa.Integer(), nullable=True),
sa.Column("source_id", sa.Integer(), nullable=True),
sa.Column("filename", sa.String(length=255), nullable=False),
sa.Column("size", sa.Integer(), nullable=False),
sa.Column("deleted_by_source", sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(["journalist_id"], ["journalists.id"]),
sa.ForeignKeyConstraint(["source_id"], ["sources.id"]),
sa.PrimaryKeyConstraint("id"),
)
# Data Migration: move all replies into the new table.
conn.execute(
"""
INSERT INTO replies
SELECT id, journalist_id, source_id, filename, size, deleted_by_source
FROM replies_tmp
"""
)
# Now delete the old table.
op.drop_table("replies_tmp")
def downgrade():
with op.batch_alter_table("replies", schema=None) as batch_op:
batch_op.drop_column("deleted_by_source")
|
agpl-3.0
|
beblount/Steer-Clear-Backend-Web
|
env/Lib/site-packages/sqlalchemy/event/base.py
|
60
|
9540
|
# event/base.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base implementation classes.
The public-facing ``Events`` serves as the base class for an event interface;
its public attributes represent different kinds of events. These attributes
are mirrored onto a ``_Dispatch`` class, which serves as a container for
collections of listener functions. These collections are represented both
at the class level of a particular ``_Dispatch`` class as well as within
instances of ``_Dispatch``.
"""
from __future__ import absolute_import
import weakref
from .. import util
from .attr import _JoinedListener, \
_EmptyListener, _ClsLevelDispatch
_registrars = util.defaultdict(list)
def _is_event_name(name):
return not name.startswith('_') and name != 'dispatch'
class _UnpickleDispatch(object):
"""Serializable callable that re-generates an instance of
:class:`_Dispatch` given a particular :class:`.Events` subclass.
"""
def __call__(self, _instance_cls):
for cls in _instance_cls.__mro__:
if 'dispatch' in cls.__dict__:
return cls.__dict__['dispatch'].\
dispatch_cls._for_class(_instance_cls)
else:
raise AttributeError("No class with a 'dispatch' member present.")
class _Dispatch(object):
"""Mirror the event listening definitions of an Events class with
listener collections.
Classes which define a "dispatch" member will return a
non-instantiated :class:`._Dispatch` subclass when the member
is accessed at the class level. When the "dispatch" member is
accessed at the instance level of its owner, an instance
of the :class:`._Dispatch` class is returned.
A :class:`._Dispatch` class is generated for each :class:`.Events`
class defined, by the :func:`._create_dispatcher_class` function.
The original :class:`.Events` classes remain untouched.
This decouples the construction of :class:`.Events` subclasses from
the implementation used by the event internals, and allows
inspecting tools like Sphinx to work in an unsurprising
way against the public API.
"""
# in one ORM edge case, an attribute is added to _Dispatch,
# so __dict__ is used in just that case and potentially others.
__slots__ = '_parent', '_instance_cls', '__dict__', '_empty_listeners'
_empty_listener_reg = weakref.WeakKeyDictionary()
def __init__(self, parent, instance_cls=None):
self._parent = parent
self._instance_cls = instance_cls
if instance_cls:
try:
self._empty_listeners = self._empty_listener_reg[instance_cls]
except KeyError:
self._empty_listeners = \
self._empty_listener_reg[instance_cls] = dict(
(ls.name, _EmptyListener(ls, instance_cls))
for ls in parent._event_descriptors
)
else:
self._empty_listeners = {}
def __getattr__(self, name):
# assign EmptyListeners as attributes on demand
# to reduce startup time for new dispatch objects
try:
ls = self._empty_listeners[name]
except KeyError:
raise AttributeError(name)
else:
setattr(self, ls.name, ls)
return ls
@property
def _event_descriptors(self):
for k in self._event_names:
yield getattr(self, k)
def _for_class(self, instance_cls):
return self.__class__(self, instance_cls)
def _for_instance(self, instance):
instance_cls = instance.__class__
return self._for_class(instance_cls)
@property
def _listen(self):
return self._events._listen
def _join(self, other):
"""Create a 'join' of this :class:`._Dispatch` and another.
This new dispatcher will dispatch events to both
:class:`._Dispatch` objects.
"""
if '_joined_dispatch_cls' not in self.__class__.__dict__:
cls = type(
"Joined%s" % self.__class__.__name__,
(_JoinedDispatcher, ), {'__slots__': self._event_names}
)
self.__class__._joined_dispatch_cls = cls
return self._joined_dispatch_cls(self, other)
def __reduce__(self):
return _UnpickleDispatch(), (self._instance_cls, )
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
for ls in other._event_descriptors:
if isinstance(ls, _EmptyListener):
continue
getattr(self, ls.name).\
for_modify(self)._update(ls, only_propagate=only_propagate)
def _clear(self):
for ls in self._event_descriptors:
ls.for_modify(self).clear()
class _EventMeta(type):
"""Intercept new Event subclasses and create
associated _Dispatch classes."""
def __init__(cls, classname, bases, dict_):
_create_dispatcher_class(cls, classname, bases, dict_)
return type.__init__(cls, classname, bases, dict_)
def _create_dispatcher_class(cls, classname, bases, dict_):
"""Create a :class:`._Dispatch` class corresponding to an
:class:`.Events` class."""
# there's all kinds of ways to do this,
# i.e. make a Dispatch class that shares the '_listen' method
# of the Event class, this is the straight monkeypatch.
if hasattr(cls, 'dispatch'):
dispatch_base = cls.dispatch.__class__
else:
dispatch_base = _Dispatch
event_names = [k for k in dict_ if _is_event_name(k)]
dispatch_cls = type("%sDispatch" % classname,
(dispatch_base, ), {'__slots__': event_names})
dispatch_cls._event_names = event_names
dispatch_inst = cls._set_dispatch(cls, dispatch_cls)
for k in dispatch_cls._event_names:
setattr(dispatch_inst, k, _ClsLevelDispatch(cls, dict_[k]))
_registrars[k].append(cls)
for super_ in dispatch_cls.__bases__:
if issubclass(super_, _Dispatch) and super_ is not _Dispatch:
for ls in super_._events.dispatch._event_descriptors:
setattr(dispatch_inst, ls.name, ls)
dispatch_cls._event_names.append(ls.name)
if getattr(cls, '_dispatch_target', None):
cls._dispatch_target.dispatch = dispatcher(cls)
def _remove_dispatcher(cls):
for k in cls.dispatch._event_names:
_registrars[k].remove(cls)
if not _registrars[k]:
del _registrars[k]
class Events(util.with_metaclass(_EventMeta, object)):
"""Define event listening functions for a particular target type."""
@staticmethod
def _set_dispatch(cls, dispatch_cls):
# this allows an Events subclass to define additional utility
# methods made available to the target via
# "self.dispatch._events.<utilitymethod>"
# @staticemethod to allow easy "super" calls while in a metaclass
# constructor.
cls.dispatch = dispatch_cls(None)
dispatch_cls._events = cls
return cls.dispatch
@classmethod
def _accept_with(cls, target):
# Mapper, ClassManager, Session override this to
# also accept classes, scoped_sessions, sessionmakers, etc.
if hasattr(target, 'dispatch') and (
isinstance(target.dispatch, cls.dispatch.__class__) or
(
isinstance(target.dispatch, type) and
isinstance(target.dispatch, cls.dispatch.__class__)
) or
(
isinstance(target.dispatch, _JoinedDispatcher) and
isinstance(target.dispatch.parent, cls.dispatch.__class__)
)
):
return target
else:
return None
@classmethod
def _listen(cls, event_key, propagate=False, insert=False, named=False):
event_key.base_listen(propagate=propagate, insert=insert, named=named)
@classmethod
def _remove(cls, event_key):
event_key.remove()
@classmethod
def _clear(cls):
cls.dispatch._clear()
class _JoinedDispatcher(object):
"""Represent a connection between two _Dispatch objects."""
__slots__ = 'local', 'parent', '_instance_cls'
def __init__(self, local, parent):
self.local = local
self.parent = parent
self._instance_cls = self.local._instance_cls
def __getattr__(self, name):
# assign _JoinedListeners as attributes on demand
# to reduce startup time for new dispatch objects
ls = getattr(self.local, name)
jl = _JoinedListener(self.parent, ls.name, ls)
setattr(self, ls.name, jl)
return jl
@property
def _listen(self):
return self.parent._listen
class dispatcher(object):
"""Descriptor used by target classes to
deliver the _Dispatch class at the class level
and produce new _Dispatch instances for target
instances.
"""
def __init__(self, events):
self.dispatch_cls = events.dispatch
self.events = events
def __get__(self, obj, cls):
if obj is None:
return self.dispatch_cls
obj.__dict__['dispatch'] = disp = self.dispatch_cls._for_instance(obj)
return disp
|
mit
|
shsingh/ansible
|
lib/ansible/modules/network/sros/sros_config.py
|
6
|
11718
|
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
---
module: sros_config
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Manage Nokia SR OS device configuration
description:
- Nokia SR OS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with SR OS configuration sections in
a deterministic way.
extends_documentation_fragment: sros
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser. The I(lines) argument only supports current
context lines. See EXAMPLES
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines), I(parents).
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line.
If match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
type: bool
version_added: "2.2"
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory. If the directory does not exist, it is created.
type: bool
default: 'no'
version_added: "2.2"
config:
description:
- The C(config) argument allows the playbook designer to supply
the base configuration to be used to validate configuration
changes necessary. If this argument is provided, the module
will not download the running-config from the remote node.
version_added: "2.2"
defaults:
description:
- This argument specifies whether or not to collect all defaults
when getting the remote device running config. When enabled,
the module will get the current config by issuing the command
C(admin display-config detail).
type: bool
default: 'no'
aliases: ['detail']
version_added: "2.2"
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
type: bool
default: 'no'
version_added: "2.2"
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
---
- name: enable rollback location
sros_config:
lines: configure system rollback rollback-location "cf3:/ansible"
provider: "{{ cli }}"
- name: set system name to {{ inventory_hostname }} using one line
sros_config:
lines:
- configure system name "{{ inventory_hostname }}"
provider: "{{ cli }}"
- name: set system name to {{ inventory_hostname }} using parents
sros_config:
lines:
- 'name "{{ inventory_hostname }}"'
parents:
- configure
- system
provider: "{{ cli }}"
backup: yes
- name: load config from file
sros_config:
src: "{{ inventory_hostname }}.cfg"
provider: "{{ cli }}"
save: yes
- name: invalid use of lines
sros_config:
lines:
- service
- vpls 1000 customer foo 1 create
- description "invalid lines example"
provider: "{{ cli }}"
- name: valid use of lines
sros_config:
lines:
- description "invalid lines example"
parents:
- service
- vpls 1000 customer foo 1 create
provider: "{{ cli }}"
- name: configurable backup path
sros_config:
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['config system name "sros01"']
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['config system name "sros01"']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/sros_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils.network.sros.sros import sros_argument_spec, check_args
from ansible.module_utils.network.sros.sros import load_config, run_commands, get_config
def get_active_config(module):
contents = module.params['config']
if not contents:
flags = []
if module.params['defaults']:
flags = ['detail']
return get_config(module, flags)
return contents
def get_candidate(module):
candidate = NetworkConfig(indent=4)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
candidate = get_candidate(module)
if match != 'none':
config_text = get_active_config(module)
config = NetworkConfig(indent=4, contents=config_text)
configobjs = candidate.difference(config)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands')
commands = commands.split('\n')
result['commands'] = commands
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
def main():
""" main entry point for module execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
match=dict(default='line', choices=['line', 'none']),
config=dict(),
defaults=dict(type='bool', default=False, aliases=['detail']),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec),
save=dict(type='bool', default=False),
)
argument_spec.update(sros_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
result = dict(changed=False, warnings=list())
warnings = list()
check_args(module, warnings)
if warnings:
result['warnings'] = warnings
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
if module.params['save']:
if not module.check_mode:
run_commands(module, ['admin save'])
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
dhruvsrivastava/OJ
|
flask/lib/python2.7/site-packages/flask/testsuite/signals.py
|
554
|
4807
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.signals
~~~~~~~~~~~~~~~~~~~~~~~
Signalling.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase
class SignalsTestCase(FlaskTestCase):
def test_template_rendered(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('simple_template.html', whiskey=42)
recorded = []
def record(sender, template, context):
recorded.append((template, context))
flask.template_rendered.connect(record, app)
try:
app.test_client().get('/')
self.assert_equal(len(recorded), 1)
template, context = recorded[0]
self.assert_equal(template.name, 'simple_template.html')
self.assert_equal(context['whiskey'], 42)
finally:
flask.template_rendered.disconnect(record, app)
def test_request_signals(self):
app = flask.Flask(__name__)
calls = []
def before_request_signal(sender):
calls.append('before-signal')
def after_request_signal(sender, response):
self.assert_equal(response.data, b'stuff')
calls.append('after-signal')
@app.before_request
def before_request_handler():
calls.append('before-handler')
@app.after_request
def after_request_handler(response):
calls.append('after-handler')
response.data = 'stuff'
return response
@app.route('/')
def index():
calls.append('handler')
return 'ignored anyway'
flask.request_started.connect(before_request_signal, app)
flask.request_finished.connect(after_request_signal, app)
try:
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'stuff')
self.assert_equal(calls, ['before-signal', 'before-handler',
'handler', 'after-handler',
'after-signal'])
finally:
flask.request_started.disconnect(before_request_signal, app)
flask.request_finished.disconnect(after_request_signal, app)
def test_request_exception_signal(self):
app = flask.Flask(__name__)
recorded = []
@app.route('/')
def index():
1 // 0
def record(sender, exception):
recorded.append(exception)
flask.got_request_exception.connect(record, app)
try:
self.assert_equal(app.test_client().get('/').status_code, 500)
self.assert_equal(len(recorded), 1)
self.assert_true(isinstance(recorded[0], ZeroDivisionError))
finally:
flask.got_request_exception.disconnect(record, app)
def test_appcontext_signals(self):
app = flask.Flask(__name__)
recorded = []
def record_push(sender, **kwargs):
recorded.append('push')
def record_pop(sender, **kwargs):
recorded.append('push')
@app.route('/')
def index():
return 'Hello'
flask.appcontext_pushed.connect(record_push, app)
flask.appcontext_popped.connect(record_pop, app)
try:
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'Hello')
self.assert_equal(recorded, ['push'])
self.assert_equal(recorded, ['push', 'pop'])
finally:
flask.appcontext_pushed.disconnect(record_push, app)
flask.appcontext_popped.disconnect(record_pop, app)
def test_flash_signal(self):
app = flask.Flask(__name__)
app.config['SECRET_KEY'] = 'secret'
@app.route('/')
def index():
flask.flash('This is a flash message', category='notice')
return flask.redirect('/other')
recorded = []
def record(sender, message, category):
recorded.append((message, category))
flask.message_flashed.connect(record, app)
try:
client = app.test_client()
with client.session_transaction():
client.get('/')
self.assert_equal(len(recorded), 1)
message, category = recorded[0]
self.assert_equal(message, 'This is a flash message')
self.assert_equal(category, 'notice')
finally:
flask.message_flashed.disconnect(record, app)
def suite():
suite = unittest.TestSuite()
if flask.signals_available:
suite.addTest(unittest.makeSuite(SignalsTestCase))
return suite
|
bsd-3-clause
|
bks7/mzbench
|
lib/mzbench_api_client.py
|
2
|
6734
|
from urllib import urlencode
import json
import os
import sys
import requests
import multipart
class MZBenchAPIException(Exception):
pass
def start(host, script_file, script_content,
node_commit = None, nodes = None, deallocate_after_bench = None,
provision_nodes = None, exclusive_node_usage = None, benchmark_name = None,
emails=[], includes=[], env={}
):
"""Starts a bench
:param host: MZBench API server host with port
:type host: str
:param script_file: Scenario filename for dashboard
:type script_file: str or unicode
:param script_content: Scenario content to execute
:type script_content: str or unicode
:param node_commit: Commit or branch name for MZBench node, default is "master"
:type node_commit: str
:param nodes: Number of nodes to allocate or node list, 1 by default
:type nodes: int or list of strings
:param deallocate_after_bench: Deallocate nodes after bench is over
:type deallocate_after_bench: "true" or "false"
:param provision_nodes: Install required software
:type provision_nodes: "true" or "false"
:param exclusive_node_usage: Allocate exclusive nodes if allocator supports this mode
:type exclusive_node_usage: "true" or "false"
:param benchmark_name: Set benchmark name
:type benchmark_name: str or unicode
:param emails: Emails to notify on bench results
:type emails: List of strings
:param includes: List of files to include
:type includes: List of strings
:param env: Dictionary of environment variables to substitute
:type env: Dictionary
:returns: Operation status
:rtype: Dictionary
"""
if nodes is not None:
if isinstance(nodes, int):
params = [('nodes', nodes)]
else:
params = [('nodes', ','.join(nodes))]
else:
params = []
if deallocate_after_bench is not None:
params += [('deallocate_after_bench', deallocate_after_bench)]
if provision_nodes is not None:
params += [('provision_nodes', provision_nodes)]
if exclusive_node_usage is not None:
params += [('exclusive_node_usage', exclusive_node_usage)]
if benchmark_name is not None:
params += [('benchmark_name', benchmark_name)]
if node_commit is not None:
params += [('node_commit', node_commit)]
params += [('email', email) for email in emails]
params += [(k, v) for k, v in env.iteritems()]
files = [('bench',
{'filename': os.path.basename(script_file),
'content': script_content})]
for inc in includes:
script_dir = os.path.dirname(script_file)
filename = os.path.join(script_dir, inc)
try:
with open(filename) as fi:
files.append(
('include', {'filename': inc, 'content': fi.read()}))
except IOError:
print "Warning: resource file '%s' is not found on the local machine" % filename
body, headers = multipart.encode_multipart({}, files)
return assert_successful_post(
host,
'/start',
params,
data=body, headers=headers)
def restart(host, bench_id):
"""Creates a copy of a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id to copy
:type host: int
:returns: operation status
:rtype: dict
"""
return assert_successful_get(host, '/restart', {'id': bench_id})
def logs(host, bench_id):
"""Outputs logs for a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type host: int
:returns: logs
:rtype: generator of str
"""
for x in stream_lines(host, '/logs', {'id': bench_id}):
yield x
def data(host, bench_id):
"""Outputs CSV data for a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type host: int
:returns: CSV data
:rtype: generator of str
"""
for x in stream_lines(host, '/data', {'id': bench_id}):
yield x
def status(host, bench_id, wait=False):
"""Get bench status
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type host: int
:returns: benchmark status
:rtype: dict
"""
return assert_successful_get(
host,
'/status',
{'id': bench_id,
'wait': 'true' if wait else 'false'})
def stop(host, bench_id):
"""Stop a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type host: int
:returns: operation status
:rtype: dict
"""
return assert_successful_get(
host,
'/stop',
{'id': bench_id})
def stream_lines(host, endpoint, args):
try:
response = requests.get(
'http://' + host + endpoint + '?' + urlencode(args),
stream=True)
for line in response.iter_lines(chunk_size=1):
try:
yield line
except ValueError:
print line
if response.status_code == 200:
pass
else:
raise MZBenchAPIException('Server call to {0} failed with code {1}'.format(endpoint, response.status_code))
except requests.exceptions.ConnectionError as e:
raise MZBenchAPIException('Connect to "{0}" failed with message: {1}'.format(host, e))
def assert_successful_request(perform_request):
def wrapped(*args, **kwargs):
try:
response = perform_request(*args, **kwargs)
if response.status_code == 200:
return response.json()
else:
print 'Server call with arguments {0} failed with code {1}'.format(args, response.status_code)
print 'Response body:'
try:
data = json.loads(response.text)
json.dump(data, sys.stdout, indent=4)
except:
raise MZBenchAPIException(response.text)
except requests.exceptions.ConnectionError as e:
raise MZBenchAPIException('Connect to "{0}" failed with message: {1}'.format(args[0], e))
return wrapped
@assert_successful_request
def assert_successful_get(host, endpoint, args):
return requests.get(
'http://' + host + endpoint + '?' + urlencode(args))
@assert_successful_request
def assert_successful_post(host, endpoint, args, data=None, headers=None):
return requests.post(
'http://' + host + endpoint + '?' + urlencode(args),
data=data,
headers=headers)
|
bsd-3-clause
|
zalf-lsa/monica
|
installer/Hohenfinow2/python/run-producer.py
|
1
|
2337
|
#!/usr/bin/python
# -*- coding: UTF-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/. */
# Authors:
# Michael Berg-Mohnicke <michael.berg@zalf.de>
#
# Maintainers:
# Currently maintained by the authors.
#
# This file has been created at the Institute of
# Landscape Systems Analysis at the ZALF.
# Copyright (C: Leibniz Centre for Agricultural Landscape Research (ZALF)
import json
import sys
import zmq
import os
import monica_io
#print sys.path
#print "pyzmq version: ", zmq.pyzmq_version(), " zmq version: ", zmq.zmq_version()
def run_producer(server = {"server": None, "port": None}, shared_id = None):
context = zmq.Context()
socket = context.socket(zmq.PUSH)
config = {
"port": server["port"] if server["port"] else "6666",
"server": server["server"] if server["server"] else "localhost",
"sim.json": os.path.join(os.path.dirname(__file__), '../sim-min.json'),
"crop.json": os.path.join(os.path.dirname(__file__), '../crop-min.json'),
"site.json": os.path.join(os.path.dirname(__file__), '../site-min.json'),
"climate.csv": os.path.join(os.path.dirname(__file__), '../climate-min.csv'),
"shared_id": shared_id
}
# read commandline args only if script is invoked directly from commandline
if len(sys.argv) > 1 and __name__ == "__main__":
for arg in sys.argv[1:]:
k, v = arg.split("=")
if k in config:
config[k] = v
print "config:", config
socket.connect("tcp://" + config["server"] + ":" + config["port"])
with open(config["sim.json"]) as _:
sim_json = json.load(_)
with open(config["site.json"]) as _:
site_json = json.load(_)
with open(config["crop.json"]) as _:
crop_json = json.load(_)
with open(config["climate.csv"]) as _:
climate_csv = _.read()
env = monica_io.create_env_json_from_json_config({
"crop": crop_json,
"site": site_json,
"sim": sim_json,
"climate": climate_csv
})
#print env
# add shared ID if env to be sent to routable monicas
if config["shared_id"]:
env["sharedId"] = config["shared_id"]
socket.send_json(env)
print "done"
if __name__ == "__main__":
run_producer()
|
mpl-2.0
|
veger/ansible
|
test/units/modules/network/slxos/test_slxos_l3_interface.py
|
30
|
3426
|
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from units.compat.mock import patch
from ansible.modules.network.slxos import slxos_l3_interface
from units.modules.utils import set_module_args
from .slxos_module import TestSlxosModule, load_fixture
class TestSlxosL3InterfaceModule(TestSlxosModule):
module = slxos_l3_interface
def setUp(self):
super(TestSlxosL3InterfaceModule, self).setUp()
self._patch_get_config = patch(
'ansible.modules.network.slxos.slxos_l3_interface.get_config'
)
self._patch_load_config = patch(
'ansible.modules.network.slxos.slxos_l3_interface.load_config'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
def tearDown(self):
super(TestSlxosL3InterfaceModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
def load_fixtures(self, commands=None):
config_file = 'slxos_config_config.cfg'
self._get_config.return_value = load_fixture(config_file)
self._load_config.return_value = None
def test_slxos_l3_interface_ipv4_address(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
ipv4='192.168.4.1/24'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'ip address 192.168.4.1/24'
],
'changed': True
}
)
def test_slxos_l3_interface_absent(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/9',
state='absent'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/9',
'no ip address',
'no ipv6 address'
],
'changed': True
}
)
def test_slxos_l3_interface_invalid_argument(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/1',
shawshank='Redemption'
))
result = self.execute_module(failed=True)
self.assertEqual(result['failed'], True)
self.assertTrue(re.match(
r'Unsupported parameters for \((basic.py|basic.pyc)\) module: '
'shawshank Supported parameters include: aggregate, ipv4, ipv6, '
'name, state',
result['msg']
))
|
gpl-3.0
|
valtech-mooc/edx-platform
|
cms/djangoapps/contentstore/tests/test_course_settings.py
|
17
|
49152
|
"""
Tests for Studio Course Settings.
"""
import datetime
import json
import copy
import mock
from mock import patch
import unittest
from django.utils.timezone import UTC
from django.test.utils import override_settings
from django.conf import settings
from models.settings.course_details import (CourseDetails, CourseSettingsEncoder)
from models.settings.course_grading import CourseGradingModel
from contentstore.utils import EXTRA_TAB_PANELS, reverse_course_url, reverse_usage_url
from xmodule.modulestore.tests.factories import CourseFactory
from models.settings.course_metadata import CourseMetadata
from xmodule.fields import Date
from .utils import CourseTestCase
from xmodule.modulestore.django import modulestore
from contentstore.views.component import ADVANCED_COMPONENT_POLICY_KEY
import ddt
from xmodule.modulestore import ModuleStoreEnum
from util.milestones_helpers import seed_milestone_relationship_types
def get_url(course_id, handler_name='settings_handler'):
return reverse_course_url(handler_name, course_id)
class CourseDetailsTestCase(CourseTestCase):
"""
Tests the first course settings page (course dates, overview, etc.).
"""
def test_virgin_fetch(self):
details = CourseDetails.fetch(self.course.id)
self.assertEqual(details.org, self.course.location.org, "Org not copied into")
self.assertEqual(details.course_id, self.course.location.course, "Course_id not copied into")
self.assertEqual(details.run, self.course.location.name, "Course name not copied into")
self.assertEqual(details.course_image_name, self.course.course_image)
self.assertIsNotNone(details.start_date.tzinfo)
self.assertIsNone(details.end_date, "end date somehow initialized " + str(details.end_date))
self.assertIsNone(details.enrollment_start, "enrollment_start date somehow initialized " + str(details.enrollment_start))
self.assertIsNone(details.enrollment_end, "enrollment_end date somehow initialized " + str(details.enrollment_end))
self.assertIsNone(details.syllabus, "syllabus somehow initialized" + str(details.syllabus))
self.assertIsNone(details.intro_video, "intro_video somehow initialized" + str(details.intro_video))
self.assertIsNone(details.effort, "effort somehow initialized" + str(details.effort))
def test_encoder(self):
details = CourseDetails.fetch(self.course.id)
jsondetails = json.dumps(details, cls=CourseSettingsEncoder)
jsondetails = json.loads(jsondetails)
self.assertEqual(jsondetails['course_image_name'], self.course.course_image)
self.assertIsNone(jsondetails['end_date'], "end date somehow initialized ")
self.assertIsNone(jsondetails['enrollment_start'], "enrollment_start date somehow initialized ")
self.assertIsNone(jsondetails['enrollment_end'], "enrollment_end date somehow initialized ")
self.assertIsNone(jsondetails['syllabus'], "syllabus somehow initialized")
self.assertIsNone(jsondetails['intro_video'], "intro_video somehow initialized")
self.assertIsNone(jsondetails['effort'], "effort somehow initialized")
def test_ooc_encoder(self):
"""
Test the encoder out of its original constrained purpose to see if it functions for general use
"""
details = {
'number': 1,
'string': 'string',
'datetime': datetime.datetime.now(UTC())
}
jsondetails = json.dumps(details, cls=CourseSettingsEncoder)
jsondetails = json.loads(jsondetails)
self.assertEquals(1, jsondetails['number'])
self.assertEqual(jsondetails['string'], 'string')
def test_update_and_fetch(self):
jsondetails = CourseDetails.fetch(self.course.id)
jsondetails.syllabus = "<a href='foo'>bar</a>"
# encode - decode to convert date fields and other data which changes form
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).syllabus,
jsondetails.syllabus, "After set syllabus"
)
jsondetails.short_description = "Short Description"
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).short_description,
jsondetails.short_description, "After set short_description"
)
jsondetails.overview = "Overview"
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).overview,
jsondetails.overview, "After set overview"
)
jsondetails.intro_video = "intro_video"
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).intro_video,
jsondetails.intro_video, "After set intro_video"
)
jsondetails.effort = "effort"
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).effort,
jsondetails.effort, "After set effort"
)
jsondetails.start_date = datetime.datetime(2010, 10, 1, 0, tzinfo=UTC())
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).start_date,
jsondetails.start_date
)
jsondetails.course_image_name = "an_image.jpg"
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).course_image_name,
jsondetails.course_image_name
)
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
def test_marketing_site_fetch(self):
settings_details_url = get_url(self.course.id)
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.get_html(settings_details_url)
self.assertNotContains(response, "Course Summary Page")
self.assertNotContains(response, "Send a note to students via email")
self.assertContains(response, "course summary page will not be viewable")
self.assertContains(response, "Course Start Date")
self.assertContains(response, "Course End Date")
self.assertContains(response, "Enrollment Start Date")
self.assertContains(response, "Enrollment End Date")
self.assertContains(response, "not the dates shown on your course summary page")
self.assertContains(response, "Introducing Your Course")
self.assertContains(response, "Course Image")
self.assertContains(response, "Course Short Description")
self.assertNotContains(response, "Course Overview")
self.assertNotContains(response, "Course Introduction Video")
self.assertNotContains(response, "Requirements")
@unittest.skipUnless(settings.FEATURES.get('ENTRANCE_EXAMS', False), True)
def test_entrance_exam_created_updated_and_deleted_successfully(self):
seed_milestone_relationship_types()
settings_details_url = get_url(self.course.id)
data = {
'entrance_exam_enabled': 'true',
'entrance_exam_minimum_score_pct': '60',
'syllabus': 'none',
'short_description': 'empty',
'overview': '',
'effort': '',
'intro_video': ''
}
response = self.client.post(settings_details_url, data=json.dumps(data), content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEquals(response.status_code, 200)
course = modulestore().get_course(self.course.id)
self.assertTrue(course.entrance_exam_enabled)
self.assertEquals(course.entrance_exam_minimum_score_pct, .60)
# Update the entrance exam
data['entrance_exam_enabled'] = "true"
data['entrance_exam_minimum_score_pct'] = "80"
response = self.client.post(
settings_details_url,
data=json.dumps(data),
content_type='application/json',
HTTP_ACCEPT='application/json'
)
self.assertEquals(response.status_code, 200)
course = modulestore().get_course(self.course.id)
self.assertTrue(course.entrance_exam_enabled)
self.assertEquals(course.entrance_exam_minimum_score_pct, .80)
# Delete the entrance exam
data['entrance_exam_enabled'] = "false"
response = self.client.post(
settings_details_url,
data=json.dumps(data),
content_type='application/json',
HTTP_ACCEPT='application/json'
)
course = modulestore().get_course(self.course.id)
self.assertEquals(response.status_code, 200)
self.assertFalse(course.entrance_exam_enabled)
self.assertEquals(course.entrance_exam_minimum_score_pct, None)
@unittest.skipUnless(settings.FEATURES.get('ENTRANCE_EXAMS', False), True)
def test_entrance_exam_store_default_min_score(self):
"""
test that creating an entrance exam should store the default value, if key missing in json request
or entrance_exam_minimum_score_pct is an empty string
"""
seed_milestone_relationship_types()
settings_details_url = get_url(self.course.id)
test_data_1 = {
'entrance_exam_enabled': 'true',
'syllabus': 'none',
'short_description': 'empty',
'overview': '',
'effort': '',
'intro_video': ''
}
response = self.client.post(
settings_details_url,
data=json.dumps(test_data_1),
content_type='application/json',
HTTP_ACCEPT='application/json'
)
self.assertEquals(response.status_code, 200)
course = modulestore().get_course(self.course.id)
self.assertTrue(course.entrance_exam_enabled)
# entrance_exam_minimum_score_pct is not present in the request so default value should be saved.
self.assertEquals(course.entrance_exam_minimum_score_pct, .5)
#add entrance_exam_minimum_score_pct with empty value in json request.
test_data_2 = {
'entrance_exam_enabled': 'true',
'entrance_exam_minimum_score_pct': '',
'syllabus': 'none',
'short_description': 'empty',
'overview': '',
'effort': '',
'intro_video': ''
}
response = self.client.post(
settings_details_url,
data=json.dumps(test_data_2),
content_type='application/json',
HTTP_ACCEPT='application/json'
)
self.assertEquals(response.status_code, 200)
course = modulestore().get_course(self.course.id)
self.assertTrue(course.entrance_exam_enabled)
self.assertEquals(course.entrance_exam_minimum_score_pct, .5)
def test_editable_short_description_fetch(self):
settings_details_url = get_url(self.course.id)
with mock.patch.dict('django.conf.settings.FEATURES', {'EDITABLE_SHORT_DESCRIPTION': False}):
response = self.client.get_html(settings_details_url)
self.assertNotContains(response, "Course Short Description")
def test_regular_site_fetch(self):
settings_details_url = get_url(self.course.id)
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False}):
response = self.client.get_html(settings_details_url)
self.assertContains(response, "Course Summary Page")
self.assertContains(response, "Send a note to students via email")
self.assertNotContains(response, "course summary page will not be viewable")
self.assertContains(response, "Course Start Date")
self.assertContains(response, "Course End Date")
self.assertContains(response, "Enrollment Start Date")
self.assertContains(response, "Enrollment End Date")
self.assertNotContains(response, "not the dates shown on your course summary page")
self.assertContains(response, "Introducing Your Course")
self.assertContains(response, "Course Image")
self.assertContains(response, "Course Short Description")
self.assertContains(response, "Course Overview")
self.assertContains(response, "Course Introduction Video")
self.assertContains(response, "Requirements")
class CourseDetailsViewTest(CourseTestCase):
"""
Tests for modifying content on the first course settings page (course dates, overview, etc.).
"""
def setUp(self):
super(CourseDetailsViewTest, self).setUp()
def alter_field(self, url, details, field, val):
"""
Change the one field to the given value and then invoke the update post to see if it worked.
"""
setattr(details, field, val)
# Need to partially serialize payload b/c the mock doesn't handle it correctly
payload = copy.copy(details.__dict__)
payload['start_date'] = CourseDetailsViewTest.convert_datetime_to_iso(details.start_date)
payload['end_date'] = CourseDetailsViewTest.convert_datetime_to_iso(details.end_date)
payload['enrollment_start'] = CourseDetailsViewTest.convert_datetime_to_iso(details.enrollment_start)
payload['enrollment_end'] = CourseDetailsViewTest.convert_datetime_to_iso(details.enrollment_end)
resp = self.client.ajax_post(url, payload)
self.compare_details_with_encoding(json.loads(resp.content), details.__dict__, field + str(val))
@staticmethod
def convert_datetime_to_iso(datetime_obj):
"""
Use the xblock serializer to convert the datetime
"""
return Date().to_json(datetime_obj)
def test_update_and_fetch(self):
details = CourseDetails.fetch(self.course.id)
# resp s/b json from here on
url = get_url(self.course.id)
resp = self.client.get_json(url)
self.compare_details_with_encoding(json.loads(resp.content), details.__dict__, "virgin get")
utc = UTC()
self.alter_field(url, details, 'start_date', datetime.datetime(2012, 11, 12, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'start_date', datetime.datetime(2012, 11, 1, 13, 30, tzinfo=utc))
self.alter_field(url, details, 'end_date', datetime.datetime(2013, 2, 12, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'enrollment_start', datetime.datetime(2012, 10, 12, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'enrollment_end', datetime.datetime(2012, 11, 15, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'short_description', "Short Description")
self.alter_field(url, details, 'overview', "Overview")
self.alter_field(url, details, 'intro_video', "intro_video")
self.alter_field(url, details, 'effort', "effort")
self.alter_field(url, details, 'course_image_name', "course_image_name")
def compare_details_with_encoding(self, encoded, details, context):
"""
compare all of the fields of the before and after dicts
"""
self.compare_date_fields(details, encoded, context, 'start_date')
self.compare_date_fields(details, encoded, context, 'end_date')
self.compare_date_fields(details, encoded, context, 'enrollment_start')
self.compare_date_fields(details, encoded, context, 'enrollment_end')
self.assertEqual(details['short_description'], encoded['short_description'], context + " short_description not ==")
self.assertEqual(details['overview'], encoded['overview'], context + " overviews not ==")
self.assertEqual(details['intro_video'], encoded.get('intro_video', None), context + " intro_video not ==")
self.assertEqual(details['effort'], encoded['effort'], context + " efforts not ==")
self.assertEqual(details['course_image_name'], encoded['course_image_name'], context + " images not ==")
def compare_date_fields(self, details, encoded, context, field):
"""
Compare the given date fields between the before and after doing json deserialization
"""
if details[field] is not None:
date = Date()
if field in encoded and encoded[field] is not None:
dt1 = date.from_json(encoded[field])
dt2 = details[field]
self.assertEqual(dt1, dt2, msg="{} != {} at {}".format(dt1, dt2, context))
else:
self.fail(field + " missing from encoded but in details at " + context)
elif field in encoded and encoded[field] is not None:
self.fail(field + " included in encoding but missing from details at " + context)
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_pre_requisite_course_list_present(self):
seed_milestone_relationship_types()
settings_details_url = get_url(self.course.id)
response = self.client.get_html(settings_details_url)
self.assertContains(response, "Prerequisite Course")
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_pre_requisite_course_update_and_fetch(self):
seed_milestone_relationship_types()
url = get_url(self.course.id)
resp = self.client.get_json(url)
course_detail_json = json.loads(resp.content)
# assert pre_requisite_courses is initialized
self.assertEqual([], course_detail_json['pre_requisite_courses'])
# update pre requisite courses with a new course keys
pre_requisite_course = CourseFactory.create(org='edX', course='900', run='test_run')
pre_requisite_course2 = CourseFactory.create(org='edX', course='902', run='test_run')
pre_requisite_course_keys = [unicode(pre_requisite_course.id), unicode(pre_requisite_course2.id)]
course_detail_json['pre_requisite_courses'] = pre_requisite_course_keys
self.client.ajax_post(url, course_detail_json)
# fetch updated course to assert pre_requisite_courses has new values
resp = self.client.get_json(url)
course_detail_json = json.loads(resp.content)
self.assertEqual(pre_requisite_course_keys, course_detail_json['pre_requisite_courses'])
# remove pre requisite course
course_detail_json['pre_requisite_courses'] = []
self.client.ajax_post(url, course_detail_json)
resp = self.client.get_json(url)
course_detail_json = json.loads(resp.content)
self.assertEqual([], course_detail_json['pre_requisite_courses'])
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_invalid_pre_requisite_course(self):
seed_milestone_relationship_types()
url = get_url(self.course.id)
resp = self.client.get_json(url)
course_detail_json = json.loads(resp.content)
# update pre requisite courses one valid and one invalid key
pre_requisite_course = CourseFactory.create(org='edX', course='900', run='test_run')
pre_requisite_course_keys = [unicode(pre_requisite_course.id), 'invalid_key']
course_detail_json['pre_requisite_courses'] = pre_requisite_course_keys
response = self.client.ajax_post(url, course_detail_json)
self.assertEqual(400, response.status_code)
@ddt.ddt
class CourseGradingTest(CourseTestCase):
"""
Tests for the course settings grading page.
"""
def test_initial_grader(self):
test_grader = CourseGradingModel(self.course)
self.assertIsNotNone(test_grader.graders)
self.assertIsNotNone(test_grader.grade_cutoffs)
def test_fetch_grader(self):
test_grader = CourseGradingModel.fetch(self.course.id)
self.assertIsNotNone(test_grader.graders, "No graders")
self.assertIsNotNone(test_grader.grade_cutoffs, "No cutoffs")
for i, grader in enumerate(test_grader.graders):
subgrader = CourseGradingModel.fetch_grader(self.course.id, i)
self.assertDictEqual(grader, subgrader, str(i) + "th graders not equal")
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_from_json(self, store):
self.course = CourseFactory.create(default_store=store)
test_grader = CourseGradingModel.fetch(self.course.id)
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "Noop update")
test_grader.graders[0]['weight'] = test_grader.graders[0].get('weight') * 2
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "Weight[0] * 2")
# test for bug LMS-11485
with modulestore().bulk_operations(self.course.id):
new_grader = test_grader.graders[0].copy()
new_grader['type'] += '_foo'
new_grader['short_label'] += '_foo'
new_grader['id'] = len(test_grader.graders)
test_grader.graders.append(new_grader)
# don't use altered cached def, get a fresh one
CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__)
test_grader.grade_cutoffs['D'] = 0.3
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "cutoff add D")
test_grader.grace_period = {'hours': 4, 'minutes': 5, 'seconds': 0}
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "4 hour grace period")
def test_update_grader_from_json(self):
test_grader = CourseGradingModel.fetch(self.course.id)
altered_grader = CourseGradingModel.update_grader_from_json(
self.course.id, test_grader.graders[1], self.user
)
self.assertDictEqual(test_grader.graders[1], altered_grader, "Noop update")
test_grader.graders[1]['min_count'] = test_grader.graders[1].get('min_count') + 2
altered_grader = CourseGradingModel.update_grader_from_json(
self.course.id, test_grader.graders[1], self.user)
self.assertDictEqual(test_grader.graders[1], altered_grader, "min_count[1] + 2")
test_grader.graders[1]['drop_count'] = test_grader.graders[1].get('drop_count') + 1
altered_grader = CourseGradingModel.update_grader_from_json(
self.course.id, test_grader.graders[1], self.user)
self.assertDictEqual(test_grader.graders[1], altered_grader, "drop_count[1] + 2")
def test_update_cutoffs_from_json(self):
test_grader = CourseGradingModel.fetch(self.course.id)
CourseGradingModel.update_cutoffs_from_json(self.course.id, test_grader.grade_cutoffs, self.user)
# Unlike other tests, need to actually perform a db fetch for this test since update_cutoffs_from_json
# simply returns the cutoffs you send into it, rather than returning the db contents.
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "Noop update")
test_grader.grade_cutoffs['D'] = 0.3
CourseGradingModel.update_cutoffs_from_json(self.course.id, test_grader.grade_cutoffs, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "cutoff add D")
test_grader.grade_cutoffs['Pass'] = 0.75
CourseGradingModel.update_cutoffs_from_json(self.course.id, test_grader.grade_cutoffs, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "cutoff change 'Pass'")
def test_delete_grace_period(self):
test_grader = CourseGradingModel.fetch(self.course.id)
CourseGradingModel.update_grace_period_from_json(
self.course.id, test_grader.grace_period, self.user
)
# update_grace_period_from_json doesn't return anything, so query the db for its contents.
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertEqual(test_grader.grace_period, altered_grader.grace_period, "Noop update")
test_grader.grace_period = {'hours': 15, 'minutes': 5, 'seconds': 30}
CourseGradingModel.update_grace_period_from_json(
self.course.id, test_grader.grace_period, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grace_period, altered_grader.grace_period, "Adding in a grace period")
test_grader.grace_period = {'hours': 1, 'minutes': 10, 'seconds': 0}
# Now delete the grace period
CourseGradingModel.delete_grace_period(self.course.id, self.user)
# update_grace_period_from_json doesn't return anything, so query the db for its contents.
altered_grader = CourseGradingModel.fetch(self.course.id)
# Once deleted, the grace period should simply be None
self.assertEqual(None, altered_grader.grace_period, "Delete grace period")
def test_update_section_grader_type(self):
# Get the descriptor and the section_grader_type and assert they are the default values
descriptor = modulestore().get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('notgraded', section_grader_type['graderType'])
self.assertEqual(None, descriptor.format)
self.assertEqual(False, descriptor.graded)
# Change the default grader type to Homework, which should also mark the section as graded
CourseGradingModel.update_section_grader_type(self.course, 'Homework', self.user)
descriptor = modulestore().get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('Homework', section_grader_type['graderType'])
self.assertEqual('Homework', descriptor.format)
self.assertEqual(True, descriptor.graded)
# Change the grader type back to notgraded, which should also unmark the section as graded
CourseGradingModel.update_section_grader_type(self.course, 'notgraded', self.user)
descriptor = modulestore().get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('notgraded', section_grader_type['graderType'])
self.assertEqual(None, descriptor.format)
self.assertEqual(False, descriptor.graded)
def test_get_set_grader_types_ajax(self):
"""
Test configuring the graders via ajax calls
"""
grader_type_url_base = get_url(self.course.id, 'grading_handler')
# test get whole
response = self.client.get_json(grader_type_url_base)
whole_model = json.loads(response.content)
self.assertIn('graders', whole_model)
self.assertIn('grade_cutoffs', whole_model)
self.assertIn('grace_period', whole_model)
# test post/update whole
whole_model['grace_period'] = {'hours': 1, 'minutes': 30, 'seconds': 0}
response = self.client.ajax_post(grader_type_url_base, whole_model)
self.assertEqual(200, response.status_code)
response = self.client.get_json(grader_type_url_base)
whole_model = json.loads(response.content)
self.assertEqual(whole_model['grace_period'], {'hours': 1, 'minutes': 30, 'seconds': 0})
# test get one grader
self.assertGreater(len(whole_model['graders']), 1) # ensure test will make sense
response = self.client.get_json(grader_type_url_base + '/1')
grader_sample = json.loads(response.content)
self.assertEqual(grader_sample, whole_model['graders'][1])
# test add grader
new_grader = {
"type": "Extra Credit",
"min_count": 1,
"drop_count": 2,
"short_label": None,
"weight": 15,
}
response = self.client.ajax_post(
'{}/{}'.format(grader_type_url_base, len(whole_model['graders'])),
new_grader
)
self.assertEqual(200, response.status_code)
grader_sample = json.loads(response.content)
new_grader['id'] = len(whole_model['graders'])
self.assertEqual(new_grader, grader_sample)
# test delete grader
response = self.client.delete(grader_type_url_base + '/1', HTTP_ACCEPT="application/json")
self.assertEqual(204, response.status_code)
response = self.client.get_json(grader_type_url_base)
updated_model = json.loads(response.content)
new_grader['id'] -= 1 # one fewer and the id mutates
self.assertIn(new_grader, updated_model['graders'])
self.assertNotIn(whole_model['graders'][1], updated_model['graders'])
def setup_test_set_get_section_grader_ajax(self):
"""
Populate the course, grab a section, get the url for the assignment type access
"""
self.populate_course()
sections = modulestore().get_items(self.course.id, qualifiers={'category': "sequential"})
# see if test makes sense
self.assertGreater(len(sections), 0, "No sections found")
section = sections[0] # just take the first one
return reverse_usage_url('xblock_handler', section.location)
def test_set_get_section_grader_ajax(self):
"""
Test setting and getting section grades via the grade as url
"""
grade_type_url = self.setup_test_set_get_section_grader_ajax()
response = self.client.ajax_post(grade_type_url, {'graderType': u'Homework'})
self.assertEqual(200, response.status_code)
response = self.client.get_json(grade_type_url + '?fields=graderType')
self.assertEqual(json.loads(response.content).get('graderType'), u'Homework')
# and unset
response = self.client.ajax_post(grade_type_url, {'graderType': u'notgraded'})
self.assertEqual(200, response.status_code)
response = self.client.get_json(grade_type_url + '?fields=graderType')
self.assertEqual(json.loads(response.content).get('graderType'), u'notgraded')
class CourseMetadataEditingTest(CourseTestCase):
"""
Tests for CourseMetadata.
"""
def setUp(self):
CourseTestCase.setUp(self)
self.fullcourse = CourseFactory.create()
self.course_setting_url = get_url(self.course.id, 'advanced_settings_handler')
self.fullcourse_setting_url = get_url(self.fullcourse.id, 'advanced_settings_handler')
def test_fetch_initial_fields(self):
test_model = CourseMetadata.fetch(self.course)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.course.display_name)
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertNotIn('graceperiod', test_model, 'blacklisted field leaked in')
self.assertIn('display_name', test_model, 'full missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.fullcourse.display_name)
self.assertIn('rerandomize', test_model, 'Missing rerandomize metadata field')
self.assertIn('showanswer', test_model, 'showanswer field ')
self.assertIn('xqa_key', test_model, 'xqa_key field ')
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': True})
def test_fetch_giturl_present(self):
"""
If feature flag ENABLE_EXPORT_GIT is on, show the setting as a non-deprecated Advanced Setting.
"""
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': False})
def test_fetch_giturl_not_present(self):
"""
If feature flag ENABLE_EXPORT_GIT is off, don't show the setting at all on the Advanced Settings page.
"""
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertNotIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': False})
def test_validate_update_filtered_off(self):
"""
If feature flag is off, then giturl must be filtered.
"""
# pylint: disable=unused-variable
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertNotIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': True})
def test_validate_update_filtered_on(self):
"""
If feature flag is on, then giturl must not be filtered.
"""
# pylint: disable=unused-variable
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': True})
def test_update_from_json_filtered_on(self):
"""
If feature flag is on, then giturl must be updated.
"""
test_model = CourseMetadata.update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': False})
def test_update_from_json_filtered_off(self):
"""
If feature flag is on, then giturl must not be updated.
"""
test_model = CourseMetadata.update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertNotIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': True})
def test_edxnotes_present(self):
"""
If feature flag ENABLE_EDXNOTES is on, show the setting as a non-deprecated Advanced Setting.
"""
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertIn('edxnotes', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': False})
def test_edxnotes_not_present(self):
"""
If feature flag ENABLE_EDXNOTES is off, don't show the setting at all on the Advanced Settings page.
"""
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertNotIn('edxnotes', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': False})
def test_validate_update_filtered_edxnotes_off(self):
"""
If feature flag is off, then edxnotes must be filtered.
"""
# pylint: disable=unused-variable
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"edxnotes": {"value": "true"},
},
user=self.user
)
self.assertNotIn('edxnotes', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': True})
def test_validate_update_filtered_edxnotes_on(self):
"""
If feature flag is on, then edxnotes must not be filtered.
"""
# pylint: disable=unused-variable
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"edxnotes": {"value": "true"},
},
user=self.user
)
self.assertIn('edxnotes', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': True})
def test_update_from_json_filtered_edxnotes_on(self):
"""
If feature flag is on, then edxnotes must be updated.
"""
test_model = CourseMetadata.update_from_json(
self.course,
{
"edxnotes": {"value": "true"},
},
user=self.user
)
self.assertIn('edxnotes', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': False})
def test_update_from_json_filtered_edxnotes_off(self):
"""
If feature flag is off, then edxnotes must not be updated.
"""
test_model = CourseMetadata.update_from_json(
self.course,
{
"edxnotes": {"value": "true"},
},
user=self.user
)
self.assertNotIn('edxnotes', test_model)
def test_validate_and_update_from_json_correct_inputs(self):
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"advertised_start": {"value": "start A"},
"days_early_for_beta": {"value": 2},
"advanced_modules": {"value": ['combinedopenended']},
},
user=self.user
)
self.assertTrue(is_valid)
self.assertTrue(len(errors) == 0)
self.update_check(test_model)
# fresh fetch to ensure persistence
fresh = modulestore().get_course(self.course.id)
test_model = CourseMetadata.fetch(fresh)
self.update_check(test_model)
# Tab gets tested in test_advanced_settings_munge_tabs
self.assertIn('advanced_modules', test_model, 'Missing advanced_modules')
self.assertEqual(test_model['advanced_modules']['value'], ['combinedopenended'], 'advanced_module is not updated')
def test_validate_and_update_from_json_wrong_inputs(self):
# input incorrectly formatted data
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"advertised_start": {"value": 1, "display_name": "Course Advertised Start Date", },
"days_early_for_beta": {"value": "supposed to be an integer",
"display_name": "Days Early for Beta Users", },
"advanced_modules": {"value": 1, "display_name": "Advanced Module List", },
},
user=self.user
)
# Check valid results from validate_and_update_from_json
self.assertFalse(is_valid)
self.assertEqual(len(errors), 3)
self.assertFalse(test_model)
error_keys = set([error_obj['model']['display_name'] for error_obj in errors])
test_keys = set(['Advanced Module List', 'Course Advertised Start Date', 'Days Early for Beta Users'])
self.assertEqual(error_keys, test_keys)
# try fresh fetch to ensure no update happened
fresh = modulestore().get_course(self.course.id)
test_model = CourseMetadata.fetch(fresh)
self.assertNotEqual(test_model['advertised_start']['value'], 1, 'advertised_start should not be updated to a wrong value')
self.assertNotEqual(test_model['days_early_for_beta']['value'], "supposed to be an integer",
'days_early_for beta should not be updated to a wrong value')
def test_correct_http_status(self):
json_data = json.dumps({
"advertised_start": {"value": 1, "display_name": "Course Advertised Start Date", },
"days_early_for_beta": {
"value": "supposed to be an integer",
"display_name": "Days Early for Beta Users",
},
"advanced_modules": {"value": 1, "display_name": "Advanced Module List", },
})
response = self.client.ajax_post(self.course_setting_url, json_data)
self.assertEqual(400, response.status_code)
def test_update_from_json(self):
test_model = CourseMetadata.update_from_json(
self.course,
{
"advertised_start": {"value": "start A"},
"days_early_for_beta": {"value": 2},
},
user=self.user
)
self.update_check(test_model)
# try fresh fetch to ensure persistence
fresh = modulestore().get_course(self.course.id)
test_model = CourseMetadata.fetch(fresh)
self.update_check(test_model)
# now change some of the existing metadata
test_model = CourseMetadata.update_from_json(
fresh,
{
"advertised_start": {"value": "start B"},
"display_name": {"value": "jolly roger"},
},
user=self.user
)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], 'jolly roger', "not expected value")
self.assertIn('advertised_start', test_model, 'Missing revised advertised_start metadata field')
self.assertEqual(test_model['advertised_start']['value'], 'start B', "advertised_start not expected value")
def update_check(self, test_model):
"""
checks that updates were made
"""
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.course.display_name)
self.assertIn('advertised_start', test_model, 'Missing new advertised_start metadata field')
self.assertEqual(test_model['advertised_start']['value'], 'start A', "advertised_start not expected value")
self.assertIn('days_early_for_beta', test_model, 'Missing days_early_for_beta metadata field')
self.assertEqual(test_model['days_early_for_beta']['value'], 2, "days_early_for_beta not expected value")
def test_http_fetch_initial_fields(self):
response = self.client.get_json(self.course_setting_url)
test_model = json.loads(response.content)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.course.display_name)
response = self.client.get_json(self.fullcourse_setting_url)
test_model = json.loads(response.content)
self.assertNotIn('graceperiod', test_model, 'blacklisted field leaked in')
self.assertIn('display_name', test_model, 'full missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.fullcourse.display_name)
self.assertIn('rerandomize', test_model, 'Missing rerandomize metadata field')
self.assertIn('showanswer', test_model, 'showanswer field ')
self.assertIn('xqa_key', test_model, 'xqa_key field ')
def test_http_update_from_json(self):
response = self.client.ajax_post(self.course_setting_url, {
"advertised_start": {"value": "start A"},
"days_early_for_beta": {"value": 2},
})
test_model = json.loads(response.content)
self.update_check(test_model)
response = self.client.get_json(self.course_setting_url)
test_model = json.loads(response.content)
self.update_check(test_model)
# now change some of the existing metadata
response = self.client.ajax_post(self.course_setting_url, {
"advertised_start": {"value": "start B"},
"display_name": {"value": "jolly roger"}
})
test_model = json.loads(response.content)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], 'jolly roger', "not expected value")
self.assertIn('advertised_start', test_model, 'Missing revised advertised_start metadata field')
self.assertEqual(test_model['advertised_start']['value'], 'start B', "advertised_start not expected value")
def test_advanced_components_munge_tabs(self):
"""
Test that adding and removing specific advanced components adds and removes tabs.
"""
self.assertNotIn(EXTRA_TAB_PANELS.get("open_ended"), self.course.tabs)
self.assertNotIn(EXTRA_TAB_PANELS.get("notes"), self.course.tabs)
self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": ["combinedopenended"]}
})
course = modulestore().get_course(self.course.id)
self.assertIn(EXTRA_TAB_PANELS.get("open_ended"), course.tabs)
self.assertNotIn(EXTRA_TAB_PANELS.get("notes"), course.tabs)
self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": []}
})
course = modulestore().get_course(self.course.id)
self.assertNotIn(EXTRA_TAB_PANELS.get("open_ended"), course.tabs)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': True})
def test_course_settings_munge_tabs(self):
"""
Test that adding and removing specific course settings adds and removes tabs.
"""
self.assertNotIn(EXTRA_TAB_PANELS.get("edxnotes"), self.course.tabs)
self.client.ajax_post(self.course_setting_url, {
"edxnotes": {"value": True}
})
course = modulestore().get_course(self.course.id)
self.assertIn(EXTRA_TAB_PANELS.get("edxnotes"), course.tabs)
self.client.ajax_post(self.course_setting_url, {
"edxnotes": {"value": False}
})
course = modulestore().get_course(self.course.id)
self.assertNotIn(EXTRA_TAB_PANELS.get("edxnotes"), course.tabs)
class CourseGraderUpdatesTest(CourseTestCase):
"""
Test getting, deleting, adding, & updating graders
"""
def setUp(self):
"""Compute the url to use in tests"""
super(CourseGraderUpdatesTest, self).setUp()
self.url = get_url(self.course.id, 'grading_handler')
self.starting_graders = CourseGradingModel(self.course).graders
def test_get(self):
"""Test getting a specific grading type record."""
resp = self.client.get_json(self.url + '/0')
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(self.starting_graders[0], obj)
def test_delete(self):
"""Test deleting a specific grading type record."""
resp = self.client.delete(self.url + '/0', HTTP_ACCEPT="application/json")
self.assertEqual(resp.status_code, 204)
current_graders = CourseGradingModel.fetch(self.course.id).graders
self.assertNotIn(self.starting_graders[0], current_graders)
self.assertEqual(len(self.starting_graders) - 1, len(current_graders))
def test_update(self):
"""Test updating a specific grading type record."""
grader = {
"id": 0,
"type": "manual",
"min_count": 5,
"drop_count": 10,
"short_label": "yo momma",
"weight": 17.3,
}
resp = self.client.ajax_post(self.url + '/0', grader)
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(obj, grader)
current_graders = CourseGradingModel.fetch(self.course.id).graders
self.assertEqual(len(self.starting_graders), len(current_graders))
def test_add(self):
"""Test adding a grading type record."""
# the same url works for changing the whole grading model (graceperiod, cutoffs, and grading types) when
# the grading_index is None; thus, using None to imply adding a grading_type doesn't work; so, it uses an
# index out of bounds to imply create item.
grader = {
"type": "manual",
"min_count": 5,
"drop_count": 10,
"short_label": "yo momma",
"weight": 17.3,
}
resp = self.client.ajax_post('{}/{}'.format(self.url, len(self.starting_graders) + 1), grader)
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(obj['id'], len(self.starting_graders))
del obj['id']
self.assertEqual(obj, grader)
current_graders = CourseGradingModel.fetch(self.course.id).graders
self.assertEqual(len(self.starting_graders) + 1, len(current_graders))
|
agpl-3.0
|
mavit/ansible
|
test/units/modules/network/nxos/test_nxos_nxapi.py
|
12
|
3065
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_nxapi
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosNxapiModule(TestNxosModule):
module = nxos_nxapi
def setUp(self):
super(TestNxosNxapiModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_nxapi.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_nxapi.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_capabilities = patch('ansible.modules.network.nxos.nxos_nxapi.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'device_info': {'network_os_platform': 'N7K-C7018', 'network_os_version': '8.3(1)'}, 'network_api': 'cliconf'}
def tearDown(self):
super(TestNxosNxapiModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
self.mock_get_capabilities.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
module_name = self.module.__name__.rsplit('.', 1)[1]
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture(module_name, filename, device))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = None
def test_nxos_nxapi_no_change(self):
set_module_args(dict(http=True, https=False, http_port=80, https_port=443, sandbox=False))
self.execute_module_devices(changed=False, commands=[])
def test_nxos_nxapi_disable(self):
set_module_args(dict(state='absent'))
self.execute_module_devices(changed=True, commands=['no feature nxapi'])
def test_nxos_nxapi_no_http(self):
set_module_args(dict(https=True, http=False, https_port=8443))
self.execute_module_devices(changed=True, commands=['no nxapi http', 'nxapi https port 8443'])
|
gpl-3.0
|
sudosurootdev/external_chromium_org
|
tools/telemetry/telemetry/unittest/decorators_unittest.py
|
69
|
1511
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import decorators
_counter = 0
class Foo(object):
@decorators.Cache
def GetCountCached(self, _):
global _counter
_counter = _counter + 1
return _counter
def CreateFooUncached(_):
return Foo()
@decorators.Cache
def CreateFooCached(_):
return Foo()
class DecoratorsUnitTest(unittest.TestCase):
# pylint: disable=C0102
def testCacheDecorator(self):
self.assertNotEquals(CreateFooUncached(1), CreateFooUncached(2))
self.assertNotEquals(CreateFooCached(1), CreateFooCached(2))
self.assertNotEquals(CreateFooUncached(1), CreateFooUncached(1))
self.assertEquals(CreateFooCached(1), CreateFooCached(1))
def testCacheableMemberCachesOnlyForSameArgs(self):
foo = Foo()
value_of_one = foo.GetCountCached(1)
self.assertEquals(value_of_one, foo.GetCountCached(1))
self.assertNotEquals(value_of_one, foo.GetCountCached(2))
def testCacheableMemberHasSeparateCachesForSiblingInstances(self):
foo = Foo()
sibling_foo = Foo()
self.assertNotEquals(foo.GetCountCached(1), sibling_foo.GetCountCached(1))
def testCacheableMemberHasSeparateCachesForNextGenerationInstances(self):
foo = Foo()
last_generation_count = foo.GetCountCached(1)
foo = None
foo = Foo()
self.assertNotEquals(last_generation_count, foo.GetCountCached(1))
|
bsd-3-clause
|
cmelange/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_nics.py
|
5
|
7994
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_nics
short_description: Module to manage network interfaces of Virtual Machines in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage network interfaces of Virtual Machines in oVirt."
options:
name:
description:
- "Name of the network interface to manage."
required: true
vm:
description:
- "Name of the Virtual Machine to manage."
required: true
state:
description:
- "Should the Virtual Machine NIC be present/absent/plugged/unplugged."
choices: ['present', 'absent', 'plugged', 'unplugged']
default: present
network:
description:
- "Logical network to which the VM network interface should use,
by default Empty network is used if network is not specified."
profile:
description:
- "Virtual network interface profile to be attached to VM network interface."
interface:
description:
- "Type of the network interface."
choices: ['virtio', 'e1000', 'rtl8139', 'pci_passthrough', 'rtl8139_virtio', 'spapr_vlan']
default: 'virtio'
mac_address:
description:
- "Custom MAC address of the network interface, by default it's obtained from MAC pool."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add NIC to VM
- ovirt_nics:
state: present
vm: myvm
name: mynic
interface: e1000
mac_address: 00:1a:4a:16:01:56
profile: ovirtmgmt
network: ovirtmgmt
# Plug NIC to VM
- ovirt_nics:
state: plugged
vm: myvm
name: mynic
# Unplug NIC from VM
- ovirt_nics:
state: unplugged
vm: myvm
name: mynic
# Remove NIC from VM
- ovirt_nics:
state: absent
vm: myvm
name: mynic
'''
RETURN = '''
id:
description: ID of the network interface which is managed
returned: On success if network interface is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
nic:
description: "Dictionary of all the network interface attributes. Network interface attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/nic."
returned: On success if network interface is found.
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_link_name,
ovirt_full_argument_spec,
search_by_name,
)
class VmNicsModule(BaseModule):
def __init__(self, *args, **kwargs):
super(VmNicsModule, self).__init__(*args, **kwargs)
self.vnic_id = None
@property
def vnic_id(self):
return self._vnic_id
@vnic_id.setter
def vnic_id(self, vnic_id):
self._vnic_id = vnic_id
def build_entity(self):
return otypes.Nic(
name=self._module.params.get('name'),
interface=otypes.NicInterface(
self._module.params.get('interface')
) if self._module.params.get('interface') else None,
vnic_profile=otypes.VnicProfile(
id=self.vnic_id,
) if self.vnic_id else None,
mac=otypes.Mac(
address=self._module.params.get('mac_address')
) if self._module.params.get('mac_address') else None,
)
def update_check(self, entity):
return (
equal(self._module.params.get('interface'), str(entity.interface)) and
equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile)) and
equal(self._module.params.get('mac_address'), entity.mac.address)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'plugged', 'unplugged'],
default='present'
),
vm=dict(required=True),
name=dict(required=True),
interface=dict(default=None),
profile=dict(default=None),
network=dict(default=None),
mac_address=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
# Locate the service that manages the virtual machines and use it to
# search for the NIC:
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
# Locate the VM, where we will manage NICs:
vm_name = module.params.get('vm')
vm = search_by_name(vms_service, vm_name)
if vm is None:
raise Exception("VM '%s' was not found." % vm_name)
# Locate the service that manages the virtual machines NICs:
vm_service = vms_service.vm_service(vm.id)
nics_service = vm_service.nics_service()
vmnics_module = VmNicsModule(
connection=connection,
module=module,
service=nics_service,
)
# Find vNIC id of the network interface (if any):
profile = module.params.get('profile')
if profile and module.params['network']:
cluster_name = get_link_name(connection, vm.cluster)
dcs_service = connection.system_service().data_centers_service()
dc = dcs_service.list(search='Clusters.name=%s' % cluster_name)[0]
networks_service = dcs_service.service(dc.id).networks_service()
network = search_by_name(networks_service, module.params['network'])
for vnic in connection.system_service().vnic_profiles_service().list():
if vnic.name == profile and vnic.network.id == network.id:
vmnics_module.vnic_id = vnic.id
# Handle appropriate action:
state = module.params['state']
if state == 'present':
ret = vmnics_module.create()
elif state == 'absent':
ret = vmnics_module.remove()
elif state == 'plugged':
vmnics_module.create()
ret = vmnics_module.action(
action='activate',
action_condition=lambda nic: not nic.plugged,
wait_condition=lambda nic: nic.plugged,
)
elif state == 'unplugged':
vmnics_module.create()
ret = vmnics_module.action(
action='deactivate',
action_condition=lambda nic: nic.plugged,
wait_condition=lambda nic: not nic.plugged,
)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
gpl-3.0
|
3nids/QGIS
|
tests/src/python/test_qgsmaprenderercache.py
|
18
|
13383
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMapRendererCache.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '1/02/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsMapRendererCache,
QgsRectangle,
QgsVectorLayer,
QgsProject,
QgsMapToPixel)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtCore import QCoreApplication
from qgis.PyQt.QtGui import QImage
from time import sleep
start_app()
class TestQgsMapRendererCache(unittest.TestCase):
def testSetCacheImages(self):
cache = QgsMapRendererCache()
# not set image
im = cache.cacheImage('littlehands')
self.assertTrue(im.isNull())
self.assertFalse(cache.hasCacheImage('littlehands'))
# set image
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('littlehands', im)
self.assertFalse(im.isNull())
self.assertEqual(cache.cacheImage('littlehands'), im)
self.assertTrue(cache.hasCacheImage('littlehands'))
# test another not set image when cache has images
self.assertTrue(cache.cacheImage('bad').isNull())
self.assertFalse(cache.hasCacheImage('bad'))
# clear cache image
cache.clearCacheImage('not in cache') # no crash!
cache.clearCacheImage('littlehands')
im = cache.cacheImage('littlehands')
self.assertTrue(im.isNull())
self.assertFalse(cache.hasCacheImage('littlehands'))
# clear whole cache
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('littlehands', im)
self.assertFalse(im.isNull())
self.assertTrue(cache.hasCacheImage('littlehands'))
cache.clear()
im = cache.cacheImage('littlehands')
self.assertTrue(im.isNull())
self.assertFalse(cache.hasCacheImage('littlehands'))
def testInit(self):
cache = QgsMapRendererCache()
extent = QgsRectangle(1, 2, 3, 4)
self.assertFalse(cache.init(extent, 1000))
# add a cache image
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('layer', im)
self.assertFalse(cache.cacheImage('layer').isNull())
self.assertTrue(cache.hasCacheImage('layer'))
# re init, without changing extent or scale
self.assertTrue(cache.init(extent, 1000))
# image should still be in cache
self.assertFalse(cache.cacheImage('layer').isNull())
self.assertTrue(cache.hasCacheImage('layer'))
# reinit with different scale
self.assertFalse(cache.init(extent, 2000))
# cache should be cleared
self.assertTrue(cache.cacheImage('layer').isNull())
self.assertFalse(cache.hasCacheImage('layer'))
# readd image to cache
cache.setCacheImage('layer', im)
self.assertFalse(cache.cacheImage('layer').isNull())
self.assertTrue(cache.hasCacheImage('layer'))
# change extent
self.assertFalse(cache.init(QgsRectangle(11, 12, 13, 14), 2000))
# cache should be cleared
self.assertTrue(cache.cacheImage('layer').isNull())
self.assertFalse(cache.hasCacheImage('layer'))
def testRequestRepaintSimple(self):
""" test requesting repaint with a single dependent layer """
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
QgsProject.instance().addMapLayers([layer])
self.assertTrue(layer.isValid())
# add image to cache
cache = QgsMapRendererCache()
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('xxx', im, [layer])
self.assertFalse(cache.cacheImage('xxx').isNull())
self.assertTrue(cache.hasCacheImage('xxx'))
# trigger repaint on layer
layer.triggerRepaint()
# cache image should be cleared
self.assertTrue(cache.cacheImage('xxx').isNull())
self.assertFalse(cache.hasCacheImage('xxx'))
QgsProject.instance().removeMapLayer(layer.id())
# test that cache is also cleared on deferred update
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
cache.setCacheImage('xxx', im, [layer])
layer.triggerRepaint(True)
self.assertFalse(cache.hasCacheImage('xxx'))
def testInvalidateCacheForLayer(self):
""" test invalidating the cache for a layer """
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
QgsProject.instance().addMapLayers([layer])
self.assertTrue(layer.isValid())
# add image to cache
cache = QgsMapRendererCache()
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('xxx', im, [layer])
self.assertFalse(cache.cacheImage('xxx').isNull())
self.assertTrue(cache.hasCacheImage('xxx'))
# invalidate cache for layer
cache.invalidateCacheForLayer(layer)
# cache image should be cleared
self.assertTrue(cache.cacheImage('xxx').isNull())
self.assertFalse(cache.hasCacheImage('xxx'))
QgsProject.instance().removeMapLayer(layer.id())
def testRequestRepaintMultiple(self):
""" test requesting repaint with multiple dependent layers """
layer1 = QgsVectorLayer("Point?field=fldtxt:string",
"layer1", "memory")
layer2 = QgsVectorLayer("Point?field=fldtxt:string",
"layer2", "memory")
QgsProject.instance().addMapLayers([layer1, layer2])
self.assertTrue(layer1.isValid())
self.assertTrue(layer2.isValid())
# add image to cache - no dependent layers
cache = QgsMapRendererCache()
im1 = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('nolayer', im1)
self.assertFalse(cache.cacheImage('nolayer').isNull())
self.assertTrue(cache.hasCacheImage('nolayer'))
# trigger repaint on layer
layer1.triggerRepaint()
layer1.triggerRepaint() # do this a couple of times - we don't want errors due to multiple disconnects, etc
layer2.triggerRepaint()
layer2.triggerRepaint()
# cache image should still exist - it's not dependent on layers
self.assertFalse(cache.cacheImage('nolayer').isNull())
self.assertTrue(cache.hasCacheImage('nolayer'))
# image depends on 1 layer
im_l1 = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('im1', im_l1, [layer1])
# image depends on 2 layers
im_l1_l2 = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('im1_im2', im_l1_l2, [layer1, layer2])
# image depends on 2nd layer alone
im_l2 = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('im2', im_l2, [layer2])
self.assertFalse(cache.cacheImage('im1').isNull())
self.assertTrue(cache.hasCacheImage('im1'))
self.assertFalse(cache.cacheImage('im1_im2').isNull())
self.assertTrue(cache.hasCacheImage('im1_im2'))
self.assertFalse(cache.cacheImage('im2').isNull())
self.assertTrue(cache.hasCacheImage('im2'))
# trigger repaint layer 1 (check twice - don't want disconnect errors)
for i in range(2):
layer1.triggerRepaint()
# should be cleared
self.assertTrue(cache.cacheImage('im1').isNull())
self.assertFalse(cache.hasCacheImage('im1'))
self.assertTrue(cache.cacheImage('im1_im2').isNull())
self.assertFalse(cache.hasCacheImage('im1_im2'))
# should be retained
self.assertTrue(cache.hasCacheImage('im2'))
self.assertFalse(cache.cacheImage('im2').isNull())
self.assertEqual(cache.cacheImage('im2'), im_l2)
self.assertTrue(cache.hasCacheImage('nolayer'))
self.assertFalse(cache.cacheImage('nolayer').isNull())
self.assertEqual(cache.cacheImage('nolayer'), im1)
# trigger repaint layer 2
for i in range(2):
layer2.triggerRepaint()
# should be cleared
self.assertFalse(cache.hasCacheImage('im1'))
self.assertTrue(cache.cacheImage('im1').isNull())
self.assertFalse(cache.hasCacheImage('im1_im2'))
self.assertTrue(cache.cacheImage('im1_im2').isNull())
self.assertFalse(cache.hasCacheImage('im2'))
self.assertTrue(cache.cacheImage('im2').isNull())
# should be retained
self.assertTrue(cache.hasCacheImage('nolayer'))
self.assertFalse(cache.cacheImage('nolayer').isNull())
self.assertEqual(cache.cacheImage('nolayer'), im1)
def testDependentLayers(self):
# bad layer tests
cache = QgsMapRendererCache()
self.assertEqual(cache.dependentLayers('not a layer'), [])
layer1 = QgsVectorLayer("Point?field=fldtxt:string",
"layer1", "memory")
layer2 = QgsVectorLayer("Point?field=fldtxt:string",
"layer2", "memory")
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('no depends', im, [])
self.assertEqual(cache.dependentLayers('no depends'), [])
cache.setCacheImage('depends', im, [layer1, layer2])
self.assertEqual(set(cache.dependentLayers('depends')), set([layer1, layer2]))
def testLayerRemoval(self):
"""test that cached image is cleared when a dependent layer is removed"""
cache = QgsMapRendererCache()
layer1 = QgsVectorLayer("Point?field=fldtxt:string",
"layer1", "memory")
layer2 = QgsVectorLayer("Point?field=fldtxt:string",
"layer2", "memory")
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('depends', im, [layer1, layer2])
cache.setCacheImage('depends2', im, [layer1])
cache.setCacheImage('depends3', im, [layer2])
cache.setCacheImage('no depends', im, [])
self.assertTrue(cache.hasCacheImage('depends'))
self.assertTrue(cache.hasCacheImage('depends2'))
self.assertTrue(cache.hasCacheImage('depends3'))
self.assertTrue(cache.hasCacheImage('no depends'))
# try deleting a layer
layer2 = None
self.assertFalse(cache.hasCacheImage('depends'))
self.assertTrue(cache.hasCacheImage('depends2'))
self.assertFalse(cache.hasCacheImage('depends3'))
self.assertTrue(cache.hasCacheImage('no depends'))
layer1 = None
self.assertFalse(cache.hasCacheImage('depends'))
self.assertFalse(cache.hasCacheImage('depends2'))
self.assertFalse(cache.hasCacheImage('depends3'))
self.assertTrue(cache.hasCacheImage('no depends'))
def testClearOnLayerAutoRefresh(self):
""" test that cache is cleared when layer auto refresh is triggered """
cache = QgsMapRendererCache()
layer1 = QgsVectorLayer("Point?field=fldtxt:string",
"layer1", "memory")
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('l1', im, [layer1])
self.assertTrue(cache.hasCacheImage('l1'))
layer1.setAutoRefreshInterval(100)
layer1.setAutoRefreshEnabled(True)
self.assertTrue(cache.hasCacheImage('l1'))
# wait a second...
sleep(1)
for i in range(100):
QCoreApplication.processEvents()
# cache should be cleared
self.assertFalse(cache.hasCacheImage('l1'))
def testSetCacheImageDifferentParams(self):
"""
Test setting cache image with different parameters
"""
cache = QgsMapRendererCache()
cache.updateParameters(QgsRectangle(1, 1, 3, 3), QgsMapToPixel(5))
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('im1', im, [])
self.assertEqual(cache.cacheImage('im1').width(), 200)
# if existing cached image exists with matching parameters, we don't store a new image -- old
# one should still be retained
im = QImage(201, 201, QImage.Format_RGB32)
cache.setCacheImageWithParameters('im1', im, QgsRectangle(1, 1, 3, 4), QgsMapToPixel(5), [])
self.assertEqual(cache.cacheImage('im1').width(), 200)
cache.setCacheImageWithParameters('im1', im, QgsRectangle(1, 1, 3, 3), QgsMapToPixel(6), [])
self.assertEqual(cache.cacheImage('im1').width(), 200)
# replace with matching parameters
cache.setCacheImageWithParameters('im1', im, QgsRectangle(1, 1, 3, 3), QgsMapToPixel(5), [])
self.assertEqual(cache.cacheImage('im1').width(), 201)
im = QImage(202, 202, QImage.Format_RGB32)
cache.setCacheImage('im1', im, [])
self.assertEqual(cache.cacheImage('im1').width(), 202)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
svirusxxx/cjdns
|
node_build/dependencies/libuv/build/gyp/test/mac/gyptest-action-envvars.py
|
100
|
1073
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that env vars work with actions, with relative directory paths.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
# The xcode-ninja generator handles gypfiles which are not at the
# project root incorrectly.
# cf. https://code.google.com/p/gyp/issues/detail?id=460
if test.format == 'xcode-ninja':
test.skip_test()
CHDIR = 'action-envvars'
test.run_gyp('action/action.gyp', chdir=CHDIR)
test.build('action/action.gyp', 'action', chdir=CHDIR, SYMROOT='../build')
result_file = test.built_file_path('result', chdir=CHDIR)
test.must_exist(result_file)
test.must_contain(result_file, 'Test output')
other_result_file = test.built_file_path('other_result', chdir=CHDIR)
test.must_exist(other_result_file)
test.must_contain(other_result_file, 'Other output')
test.pass_test()
|
gpl-3.0
|
aimas/TuniErp-8.0
|
openerp/osv/expression.py
|
65
|
57592
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Domain expression processing
The main duty of this module is to compile a domain expression into a
SQL query. A lot of things should be documented here, but as a first
step in the right direction, some tests in test_osv_expression.yml
might give you some additional information.
For legacy reasons, a domain uses an inconsistent two-levels abstract
syntax (domains are regular Python data structures). At the first
level, a domain is an expression made of terms (sometimes called
leaves) and (domain) operators used in prefix notation. The available
operators at this level are '!', '&', and '|'. '!' is a unary 'not',
'&' is a binary 'and', and '|' is a binary 'or'. For instance, here
is a possible domain. (<term> stands for an arbitrary term, more on
this later.)::
['&', '!', <term1>, '|', <term2>, <term3>]
It is equivalent to this pseudo code using infix notation::
(not <term1>) and (<term2> or <term3>)
The second level of syntax deals with the term representation. A term
is a triple of the form (left, operator, right). That is, a term uses
an infix notation, and the available operators, and possible left and
right operands differ with those of the previous level. Here is a
possible term::
('company_id.name', '=', 'OpenERP')
The left and right operand don't have the same possible values. The
left operand is field name (related to the model for which the domain
applies). Actually, the field name can use the dot-notation to
traverse relationships. The right operand is a Python value whose
type should match the used operator and field type. In the above
example, a string is used because the name field of a company has type
string, and because we use the '=' operator. When appropriate, a 'in'
operator can be used, and thus the right operand should be a list.
Note: the non-uniform syntax could have been more uniform, but this
would hide an important limitation of the domain syntax. Say that the
term representation was ['=', 'company_id.name', 'OpenERP']. Used in a
complete domain, this would look like::
['!', ['=', 'company_id.name', 'OpenERP']]
and you would be tempted to believe something like this would be
possible::
['!', ['=', 'company_id.name', ['&', ..., ...]]]
That is, a domain could be a valid operand. But this is not the
case. A domain is really limited to a two-level nature, and can not
take a recursive form: a domain is not a valid second-level operand.
Unaccent - Accent-insensitive search
OpenERP will use the SQL function 'unaccent' when available for the
'ilike' and 'not ilike' operators, and enabled in the configuration.
Normally the 'unaccent' function is obtained from `the PostgreSQL
'unaccent' contrib module
<http://developer.postgresql.org/pgdocs/postgres/unaccent.html>`_.
.. todo: The following explanation should be moved in some external
installation guide
The steps to install the module might differ on specific PostgreSQL
versions. We give here some instruction for PostgreSQL 9.x on a
Ubuntu system.
Ubuntu doesn't come yet with PostgreSQL 9.x, so an alternative package
source is used. We use Martin Pitt's PPA available at
`ppa:pitti/postgresql
<https://launchpad.net/~pitti/+archive/postgresql>`_.
.. code-block:: sh
> sudo add-apt-repository ppa:pitti/postgresql
> sudo apt-get update
Once the package list is up-to-date, you have to install PostgreSQL
9.0 and its contrib modules.
.. code-block:: sh
> sudo apt-get install postgresql-9.0 postgresql-contrib-9.0
When you want to enable unaccent on some database:
.. code-block:: sh
> psql9 <database> -f /usr/share/postgresql/9.0/contrib/unaccent.sql
Here :program:`psql9` is an alias for the newly installed PostgreSQL
9.0 tool, together with the correct port if necessary (for instance if
PostgreSQL 8.4 is running on 5432). (Other aliases can be used for
createdb and dropdb.)
.. code-block:: sh
> alias psql9='/usr/lib/postgresql/9.0/bin/psql -p 5433'
You can check unaccent is working:
.. code-block:: sh
> psql9 <database> -c"select unaccent('hélène')"
Finally, to instruct OpenERP to really use the unaccent function, you have to
start the server specifying the ``--unaccent`` flag.
"""
import collections
import logging
import traceback
import openerp.modules
from . import fields
from ..models import MAGIC_COLUMNS, BaseModel
import openerp.tools as tools
# Domain operators.
NOT_OPERATOR = '!'
OR_OPERATOR = '|'
AND_OPERATOR = '&'
DOMAIN_OPERATORS = (NOT_OPERATOR, OR_OPERATOR, AND_OPERATOR)
# List of available term operators. It is also possible to use the '<>'
# operator, which is strictly the same as '!='; the later should be prefered
# for consistency. This list doesn't contain '<>' as it is simpified to '!='
# by the normalize_operator() function (so later part of the code deals with
# only one representation).
# Internals (i.e. not available to the user) 'inselect' and 'not inselect'
# operators are also used. In this case its right operand has the form (subselect, params).
TERM_OPERATORS = ('=', '!=', '<=', '<', '>', '>=', '=?', '=like', '=ilike',
'like', 'not like', 'ilike', 'not ilike', 'in', 'not in',
'child_of')
# A subset of the above operators, with a 'negative' semantic. When the
# expressions 'in NEGATIVE_TERM_OPERATORS' or 'not in NEGATIVE_TERM_OPERATORS' are used in the code
# below, this doesn't necessarily mean that any of those NEGATIVE_TERM_OPERATORS is
# legal in the processed term.
NEGATIVE_TERM_OPERATORS = ('!=', 'not like', 'not ilike', 'not in')
TRUE_LEAF = (1, '=', 1)
FALSE_LEAF = (0, '=', 1)
TRUE_DOMAIN = [TRUE_LEAF]
FALSE_DOMAIN = [FALSE_LEAF]
_logger = logging.getLogger(__name__)
# --------------------------------------------------
# Generic domain manipulation
# --------------------------------------------------
def normalize_domain(domain):
"""Returns a normalized version of ``domain_expr``, where all implicit '&' operators
have been made explicit. One property of normalized domain expressions is that they
can be easily combined together as if they were single domain components.
"""
assert isinstance(domain, (list, tuple)), "Domains to normalize must have a 'domain' form: a list or tuple of domain components"
if not domain:
return TRUE_DOMAIN
result = []
expected = 1 # expected number of expressions
op_arity = {NOT_OPERATOR: 1, AND_OPERATOR: 2, OR_OPERATOR: 2}
for token in domain:
if expected == 0: # more than expected, like in [A, B]
result[0:0] = [AND_OPERATOR] # put an extra '&' in front
expected = 1
result.append(token)
if isinstance(token, (list, tuple)): # domain term
expected -= 1
else:
expected += op_arity.get(token, 0) - 1
assert expected == 0, 'This domain is syntactically not correct: %s' % (domain)
return result
def combine(operator, unit, zero, domains):
"""Returns a new domain expression where all domain components from ``domains``
have been added together using the binary operator ``operator``. The given
domains must be normalized.
:param unit: the identity element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``i`` which, when
combined with any domain ``x`` via ``operator``, yields ``x``.
E.g. [(1,'=',1)] is the typical unit for AND_OPERATOR: adding it
to any domain component gives the same domain.
:param zero: the absorbing element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``z`` which, when
combined with any domain ``x`` via ``operator``, yields ``z``.
E.g. [(1,'=',1)] is the typical zero for OR_OPERATOR: as soon as
you see it in a domain component the resulting domain is the zero.
:param domains: a list of normalized domains.
"""
result = []
count = 0
for domain in domains:
if domain == unit:
continue
if domain == zero:
return zero
if domain:
result += domain
count += 1
result = [operator] * (count - 1) + result
return result
def AND(domains):
"""AND([D1,D2,...]) returns a domain representing D1 and D2 and ... """
return combine(AND_OPERATOR, TRUE_DOMAIN, FALSE_DOMAIN, domains)
def OR(domains):
"""OR([D1,D2,...]) returns a domain representing D1 or D2 or ... """
return combine(OR_OPERATOR, FALSE_DOMAIN, TRUE_DOMAIN, domains)
def distribute_not(domain):
""" Distribute any '!' domain operators found inside a normalized domain.
Because we don't use SQL semantic for processing a 'left not in right'
query (i.e. our 'not in' is not simply translated to a SQL 'not in'),
it means that a '! left in right' can not be simply processed
by __leaf_to_sql by first emitting code for 'left in right' then wrapping
the result with 'not (...)', as it would result in a 'not in' at the SQL
level.
This function is thus responsible for pushing any '!' domain operators
inside the terms themselves. For example::
['!','&',('user_id','=',4),('partner_id','in',[1,2])]
will be turned into:
['|',('user_id','!=',4),('partner_id','not in',[1,2])]
"""
def negate(leaf):
"""Negates and returns a single domain leaf term,
using the opposite operator if possible"""
left, operator, right = leaf
mapping = {
'<': '>=',
'>': '<=',
'<=': '>',
'>=': '<',
'=': '!=',
'!=': '=',
}
if operator in ('in', 'like', 'ilike'):
operator = 'not ' + operator
return [(left, operator, right)]
if operator in ('not in', 'not like', 'not ilike'):
operator = operator[4:]
return [(left, operator, right)]
if operator in mapping:
operator = mapping[operator]
return [(left, operator, right)]
return [NOT_OPERATOR, (left, operator, right)]
def distribute_negate(domain):
"""Negate the domain ``subtree`` rooted at domain[0],
leaving the rest of the domain intact, and return
(negated_subtree, untouched_domain_rest)
"""
if is_leaf(domain[0]):
return negate(domain[0]), domain[1:]
if domain[0] == AND_OPERATOR:
done1, todo1 = distribute_negate(domain[1:])
done2, todo2 = distribute_negate(todo1)
return [OR_OPERATOR] + done1 + done2, todo2
if domain[0] == OR_OPERATOR:
done1, todo1 = distribute_negate(domain[1:])
done2, todo2 = distribute_negate(todo1)
return [AND_OPERATOR] + done1 + done2, todo2
if not domain:
return []
if domain[0] != NOT_OPERATOR:
return [domain[0]] + distribute_not(domain[1:])
if domain[0] == NOT_OPERATOR:
done, todo = distribute_negate(domain[1:])
return done + distribute_not(todo)
# --------------------------------------------------
# Generic leaf manipulation
# --------------------------------------------------
def _quote(to_quote):
if '"' not in to_quote:
return '"%s"' % to_quote
return to_quote
def generate_table_alias(src_table_alias, joined_tables=[]):
""" Generate a standard table alias name. An alias is generated as following:
- the base is the source table name (that can already be an alias)
- then, each joined table is added in the alias using a 'link field name'
that is used to render unique aliases for a given path
- returns a tuple composed of the alias, and the full table alias to be
added in a from condition with quoting done
Examples:
- src_table_alias='res_users', join_tables=[]:
alias = ('res_users','"res_users"')
- src_model='res_users', join_tables=[(res.partner, 'parent_id')]
alias = ('res_users__parent_id', '"res_partner" as "res_users__parent_id"')
:param model src_table_alias: model source of the alias
:param list joined_tables: list of tuples
(dst_model, link_field)
:return tuple: (table_alias, alias statement for from clause with quotes added)
"""
alias = src_table_alias
if not joined_tables:
return '%s' % alias, '%s' % _quote(alias)
for link in joined_tables:
alias += '__' + link[1]
assert len(alias) < 64, 'Table alias name %s is longer than the 64 characters size accepted by default in postgresql.' % alias
return '%s' % alias, '%s as %s' % (_quote(joined_tables[-1][0]), _quote(alias))
def get_alias_from_query(from_query):
""" :param string from_query: is something like :
- '"res_partner"' OR
- '"res_partner" as "res_users__partner_id"''
"""
from_splitted = from_query.split(' as ')
if len(from_splitted) > 1:
return from_splitted[0].replace('"', ''), from_splitted[1].replace('"', '')
else:
return from_splitted[0].replace('"', ''), from_splitted[0].replace('"', '')
def normalize_leaf(element):
""" Change a term's operator to some canonical form, simplifying later
processing. """
if not is_leaf(element):
return element
left, operator, right = element
original = operator
operator = operator.lower()
if operator == '<>':
operator = '!='
if isinstance(right, bool) and operator in ('in', 'not in'):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % ((left, original, right),))
operator = '=' if operator == 'in' else '!='
if isinstance(right, (list, tuple)) and operator in ('=', '!='):
_logger.warning("The domain term '%s' should use the 'in' or 'not in' operator." % ((left, original, right),))
operator = 'in' if operator == '=' else 'not in'
return left, operator, right
def is_operator(element):
""" Test whether an object is a valid domain operator. """
return isinstance(element, basestring) and element in DOMAIN_OPERATORS
def is_leaf(element, internal=False):
""" Test whether an object is a valid domain term:
- is a list or tuple
- with 3 elements
- second element if a valid op
:param tuple element: a leaf in form (left, operator, right)
:param boolean internal: allow or not the 'inselect' internal operator
in the term. This should be always left to False.
Note: OLD TODO change the share wizard to use this function.
"""
INTERNAL_OPS = TERM_OPERATORS + ('<>',)
if internal:
INTERNAL_OPS += ('inselect', 'not inselect')
return (isinstance(element, tuple) or isinstance(element, list)) \
and len(element) == 3 \
and element[1] in INTERNAL_OPS \
and ((isinstance(element[0], basestring) and element[0])
or tuple(element) in (TRUE_LEAF, FALSE_LEAF))
# --------------------------------------------------
# SQL utils
# --------------------------------------------------
def select_from_where(cr, select_field, from_table, where_field, where_ids, where_operator):
# todo: merge into parent query as sub-query
res = []
if where_ids:
if where_operator in ['<', '>', '>=', '<=']:
cr.execute('SELECT "%s" FROM "%s" WHERE "%s" %s %%s' % \
(select_field, from_table, where_field, where_operator),
(where_ids[0],)) # TODO shouldn't this be min/max(where_ids) ?
res = [r[0] for r in cr.fetchall()]
else: # TODO where_operator is supposed to be 'in'? It is called with child_of...
for i in range(0, len(where_ids), cr.IN_MAX):
subids = where_ids[i:i + cr.IN_MAX]
cr.execute('SELECT "%s" FROM "%s" WHERE "%s" IN %%s' % \
(select_field, from_table, where_field), (tuple(subids),))
res.extend([r[0] for r in cr.fetchall()])
return res
def select_distinct_from_where_not_null(cr, select_field, from_table):
cr.execute('SELECT distinct("%s") FROM "%s" where "%s" is not null' % (select_field, from_table, select_field))
return [r[0] for r in cr.fetchall()]
def get_unaccent_wrapper(cr):
if openerp.modules.registry.RegistryManager.get(cr.dbname).has_unaccent:
return lambda x: "unaccent(%s)" % (x,)
return lambda x: x
# --------------------------------------------------
# ExtendedLeaf class for managing leafs and contexts
# -------------------------------------------------
class ExtendedLeaf(object):
""" Class wrapping a domain leaf, and giving some services and management
features on it. In particular it managed join contexts to be able to
construct queries through multiple models.
"""
# --------------------------------------------------
# Join / Context manipulation
# running examples:
# - res_users.name, like, foo: name is on res_partner, not on res_users
# - res_partner.bank_ids.name, like, foo: bank_ids is a one2many with _auto_join
# - res_partner.state_id.name, like, foo: state_id is a many2one with _auto_join
# A join:
# - link between src_table and dst_table, using src_field and dst_field
# i.e.: inherits: res_users.partner_id = res_partner.id
# i.e.: one2many: res_partner.id = res_partner_bank.partner_id
# i.e.: many2one: res_partner.state_id = res_country_state.id
# - done in the context of a field
# i.e.: inherits: 'partner_id'
# i.e.: one2many: 'bank_ids'
# i.e.: many2one: 'state_id'
# - table names use aliases: initial table followed by the context field
# names, joined using a '__'
# i.e.: inherits: res_partner as res_users__partner_id
# i.e.: one2many: res_partner_bank as res_partner__bank_ids
# i.e.: many2one: res_country_state as res_partner__state_id
# - join condition use aliases
# i.e.: inherits: res_users.partner_id = res_users__partner_id.id
# i.e.: one2many: res_partner.id = res_partner__bank_ids.parr_id
# i.e.: many2one: res_partner.state_id = res_partner__state_id.id
# Variables explanation:
# - src_table: working table before the join
# -> res_users, res_partner, res_partner
# - dst_table: working table after the join
# -> res_partner, res_partner_bank, res_country_state
# - src_table_link_name: field name used to link the src table, not
# necessarily a field (because 'id' is not a field instance)
# i.e.: inherits: 'partner_id', found in the inherits of the current table
# i.e.: one2many: 'id', not a field
# i.e.: many2one: 'state_id', the current field name
# - dst_table_link_name: field name used to link the dst table, not
# necessarily a field (because 'id' is not a field instance)
# i.e.: inherits: 'id', not a field
# i.e.: one2many: 'partner_id', _fields_id of the current field
# i.e.: many2one: 'id', not a field
# - context_field_name: field name used as a context to make the alias
# i.e.: inherits: 'partner_id': found in the inherits of the current table
# i.e.: one2many: 'bank_ids': current field name
# i.e.: many2one: 'state_id': current field name
# --------------------------------------------------
def __init__(self, leaf, model, join_context=None, internal=False):
""" Initialize the ExtendedLeaf
:attr [string, tuple] leaf: operator or tuple-formatted domain
expression
:attr obj model: current working model
:attr list _models: list of chained models, updated when
adding joins
:attr list join_context: list of join contexts. This is a list of
tuples like ``(lhs, table, lhs_col, col, link)``
where
lhs
source (left hand) model
model
destination (right hand) model
lhs_col
source model column for join condition
col
destination model column for join condition
link
link column between source and destination model
that is not necessarily (but generally) a real column used
in the condition (i.e. in many2one); this link is used to
compute aliases
"""
assert isinstance(model, BaseModel), 'Invalid leaf creation without table'
self.join_context = join_context or []
self.leaf = leaf
# normalize the leaf's operator
self.normalize_leaf()
# set working variables; handle the context stack and previous tables
self.model = model
self._models = []
for item in self.join_context:
self._models.append(item[0])
self._models.append(model)
# check validity
self.check_leaf(internal)
def __str__(self):
return '<osv.ExtendedLeaf: %s on %s (ctx: %s)>' % (str(self.leaf), self.model._table, ','.join(self._get_context_debug()))
def generate_alias(self):
links = [(context[1]._table, context[4]) for context in self.join_context]
alias, alias_statement = generate_table_alias(self._models[0]._table, links)
return alias
def add_join_context(self, model, lhs_col, table_col, link):
""" See above comments for more details. A join context is a tuple like:
``(lhs, model, lhs_col, col, link)``
After adding the join, the model of the current leaf is updated.
"""
self.join_context.append((self.model, model, lhs_col, table_col, link))
self._models.append(model)
self.model = model
def get_join_conditions(self):
conditions = []
alias = self._models[0]._table
for context in self.join_context:
previous_alias = alias
alias += '__' + context[4]
conditions.append('"%s"."%s"="%s"."%s"' % (previous_alias, context[2], alias, context[3]))
return conditions
def get_tables(self):
tables = set()
links = []
for context in self.join_context:
links.append((context[1]._table, context[4]))
alias, alias_statement = generate_table_alias(self._models[0]._table, links)
tables.add(alias_statement)
return tables
def _get_context_debug(self):
names = ['"%s"."%s"="%s"."%s" (%s)' % (item[0]._table, item[2], item[1]._table, item[3], item[4]) for item in self.join_context]
return names
# --------------------------------------------------
# Leaf manipulation
# --------------------------------------------------
def check_leaf(self, internal=False):
""" Leaf validity rules:
- a valid leaf is an operator or a leaf
- a valid leaf has a field objects unless
- it is not a tuple
- it is an inherited field
- left is id, operator is 'child_of'
- left is in MAGIC_COLUMNS
"""
if not is_operator(self.leaf) and not is_leaf(self.leaf, internal):
raise ValueError("Invalid leaf %s" % str(self.leaf))
def is_operator(self):
return is_operator(self.leaf)
def is_true_leaf(self):
return self.leaf == TRUE_LEAF
def is_false_leaf(self):
return self.leaf == FALSE_LEAF
def is_leaf(self, internal=False):
return is_leaf(self.leaf, internal=internal)
def normalize_leaf(self):
self.leaf = normalize_leaf(self.leaf)
return True
def create_substitution_leaf(leaf, new_elements, new_model=None, internal=False):
""" From a leaf, create a new leaf (based on the new_elements tuple
and new_model), that will have the same join context. Used to
insert equivalent leafs in the processing stack. """
if new_model is None:
new_model = leaf.model
new_join_context = [tuple(context) for context in leaf.join_context]
new_leaf = ExtendedLeaf(new_elements, new_model, join_context=new_join_context, internal=internal)
return new_leaf
class expression(object):
""" Parse a domain expression
Use a real polish notation
Leafs are still in a ('foo', '=', 'bar') format
For more info: http://christophe-simonis-at-tiny.blogspot.com/2008/08/new-new-domain-notation.html
"""
def __init__(self, cr, uid, exp, table, context):
""" Initialize expression object and automatically parse the expression
right after initialization.
:param exp: expression (using domain ('foo', '=', 'bar' format))
:param table: root model
:attr list result: list that will hold the result of the parsing
as a list of ExtendedLeaf
:attr list joins: list of join conditions, such as
(res_country_state."id" = res_partner."state_id")
:attr root_model: base model for the query
:attr list expression: the domain expression, that will be normalized
and prepared
"""
self._unaccent = get_unaccent_wrapper(cr)
self.joins = []
self.root_model = table
# normalize and prepare the expression for parsing
self.expression = distribute_not(normalize_domain(exp))
# parse the domain expression
self.parse(cr, uid, context=context)
# ----------------------------------------
# Leafs management
# ----------------------------------------
def get_tables(self):
""" Returns the list of tables for SQL queries, like select from ... """
tables = []
for leaf in self.result:
for table in leaf.get_tables():
if table not in tables:
tables.append(table)
table_name = _quote(self.root_model._table)
if table_name not in tables:
tables.append(table_name)
return tables
# ----------------------------------------
# Parsing
# ----------------------------------------
def parse(self, cr, uid, context):
""" Transform the leaves of the expression
The principle is to pop elements from a leaf stack one at a time.
Each leaf is processed. The processing is a if/elif list of various
cases that appear in the leafs (many2one, function fields, ...).
Two things can happen as a processing result:
- the leaf has been modified and/or new leafs have to be introduced
in the expression; they are pushed into the leaf stack, to be
processed right after
- the leaf is added to the result
Some internal var explanation:
:var list path: left operand seen as a sequence of field names
("foo.bar" -> ["foo", "bar"])
:var obj model: model object, model containing the field
(the name provided in the left operand)
:var obj field: the field corresponding to `path[0]`
:var obj column: the column corresponding to `path[0]`
:var obj comodel: relational model of field (field.comodel)
(res_partner.bank_ids -> res.partner.bank)
"""
def to_ids(value, comodel, context=None, limit=None):
""" Normalize a single id or name, or a list of those, into a list of ids
:param {int,long,basestring,list,tuple} value:
if int, long -> return [value]
if basestring, convert it into a list of basestrings, then
if list of basestring ->
perform a name_search on comodel for each name
return the list of related ids
"""
names = []
if isinstance(value, basestring):
names = [value]
elif value and isinstance(value, (tuple, list)) and all(isinstance(item, basestring) for item in value):
names = value
elif isinstance(value, (int, long)):
return [value]
if names:
name_get_list = [name_get[0] for name in names for name_get in comodel.name_search(cr, uid, name, [], 'ilike', context=context, limit=limit)]
return list(set(name_get_list))
return list(value)
def child_of_domain(left, ids, left_model, parent=None, prefix='', context=None):
""" Return a domain implementing the child_of operator for [(left,child_of,ids)],
either as a range using the parent_left/right tree lookup fields
(when available), or as an expanded [(left,in,child_ids)] """
if left_model._parent_store and (not left_model.pool._init):
# TODO: Improve where joins are implemented for many with '.', replace by:
# doms += ['&',(prefix+'.parent_left','<',o.parent_right),(prefix+'.parent_left','>=',o.parent_left)]
doms = []
for o in left_model.browse(cr, uid, ids, context=context):
if doms:
doms.insert(0, OR_OPERATOR)
doms += [AND_OPERATOR, ('parent_left', '<', o.parent_right), ('parent_left', '>=', o.parent_left)]
if prefix:
return [(left, 'in', left_model.search(cr, uid, doms, context=context))]
return doms
else:
def recursive_children(ids, model, parent_field):
if not ids:
return []
ids2 = model.search(cr, uid, [(parent_field, 'in', ids)], context=context)
return ids + recursive_children(ids2, model, parent_field)
return [(left, 'in', recursive_children(ids, left_model, parent or left_model._parent_name))]
def pop():
""" Pop a leaf to process. """
return self.stack.pop()
def push(leaf):
""" Push a leaf to be processed right after. """
self.stack.append(leaf)
def push_result(leaf):
""" Push a leaf to the results. This leaf has been fully processed
and validated. """
self.result.append(leaf)
self.result = []
self.stack = [ExtendedLeaf(leaf, self.root_model) for leaf in self.expression]
# process from right to left; expression is from left to right
self.stack.reverse()
while self.stack:
# Get the next leaf to process
leaf = pop()
# Get working variables
if leaf.is_operator():
left, operator, right = leaf.leaf, None, None
elif leaf.is_true_leaf() or leaf.is_false_leaf():
# because we consider left as a string
left, operator, right = ('%s' % leaf.leaf[0], leaf.leaf[1], leaf.leaf[2])
else:
left, operator, right = leaf.leaf
path = left.split('.', 1)
model = leaf.model
field = model._fields.get(path[0])
column = model._columns.get(path[0])
comodel = model.pool.get(getattr(field, 'comodel_name', None))
# ----------------------------------------
# SIMPLE CASE
# 1. leaf is an operator
# 2. leaf is a true/false leaf
# -> add directly to result
# ----------------------------------------
if leaf.is_operator() or leaf.is_true_leaf() or leaf.is_false_leaf():
push_result(leaf)
# ----------------------------------------
# FIELD NOT FOUND
# -> from inherits'd fields -> work on the related model, and add
# a join condition
# -> ('id', 'child_of', '..') -> use a 'to_ids'
# -> but is one on the _log_access special fields, add directly to
# result
# TODO: make these fields explicitly available in self.columns instead!
# -> else: crash
# ----------------------------------------
elif not column and path[0] in model._inherit_fields:
# comments about inherits'd fields
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
next_model = model.pool[model._inherit_fields[path[0]][0]]
leaf.add_join_context(next_model, model._inherits[next_model._name], 'id', model._inherits[next_model._name])
push(leaf)
elif left == 'id' and operator == 'child_of':
ids2 = to_ids(right, model, context)
dom = child_of_domain(left, ids2, model)
for dom_leaf in reversed(dom):
new_leaf = create_substitution_leaf(leaf, dom_leaf, model)
push(new_leaf)
elif not column and path[0] in MAGIC_COLUMNS:
push_result(leaf)
elif not field:
raise ValueError("Invalid field %r in leaf %r" % (left, str(leaf)))
# ----------------------------------------
# PATH SPOTTED
# -> many2one or one2many with _auto_join:
# - add a join, then jump into linked column: column.remaining on
# src_table is replaced by remaining on dst_table, and set for re-evaluation
# - if a domain is defined on the column, add it into evaluation
# on the relational table
# -> many2one, many2many, one2many: replace by an equivalent computed
# domain, given by recursively searching on the remaining of the path
# -> note: hack about columns.property should not be necessary anymore
# as after transforming the column, it will go through this loop once again
# ----------------------------------------
elif len(path) > 1 and column._type == 'many2one' and column._auto_join:
# res_partner.state_id = res_partner__state_id.id
leaf.add_join_context(comodel, path[0], 'id', path[0])
push(create_substitution_leaf(leaf, (path[1], operator, right), comodel))
elif len(path) > 1 and column._type == 'one2many' and column._auto_join:
# res_partner.id = res_partner__bank_ids.partner_id
leaf.add_join_context(comodel, 'id', column._fields_id, path[0])
domain = column._domain(model) if callable(column._domain) else column._domain
push(create_substitution_leaf(leaf, (path[1], operator, right), comodel))
if domain:
domain = normalize_domain(domain)
for elem in reversed(domain):
push(create_substitution_leaf(leaf, elem, comodel))
push(create_substitution_leaf(leaf, AND_OPERATOR, comodel))
elif len(path) > 1 and column._auto_join:
raise NotImplementedError('_auto_join attribute not supported on many2many column %s' % left)
elif len(path) > 1 and column._type == 'many2one':
right_ids = comodel.search(cr, uid, [(path[1], operator, right)], context=context)
leaf.leaf = (path[0], 'in', right_ids)
push(leaf)
# Making search easier when there is a left operand as column.o2m or column.m2m
elif len(path) > 1 and column._type in ['many2many', 'one2many']:
right_ids = comodel.search(cr, uid, [(path[1], operator, right)], context=context)
table_ids = model.search(cr, uid, [(path[0], 'in', right_ids)], context=dict(context, active_test=False))
leaf.leaf = ('id', 'in', table_ids)
push(leaf)
elif not column:
# Non-stored field should provide an implementation of search.
if not field.search:
# field does not support search!
_logger.error("Non-stored field %s cannot be searched.", field)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(''.join(traceback.format_stack()))
# Ignore it: generate a dummy leaf.
domain = []
else:
# Let the field generate a domain.
recs = model.browse(cr, uid, [], context)
domain = field.determine_domain(recs, operator, right)
if not domain:
leaf.leaf = TRUE_LEAF
push(leaf)
else:
for elem in reversed(domain):
push(create_substitution_leaf(leaf, elem, model))
# -------------------------------------------------
# FUNCTION FIELD
# -> not stored: error if no _fnct_search, otherwise handle the result domain
# -> stored: management done in the remaining of parsing
# -------------------------------------------------
elif isinstance(column, fields.function) and not column.store:
# this is a function field that is not stored
if not column._fnct_search:
_logger.error(
"Field '%s' (%s) can not be searched: "
"non-stored function field without fnct_search",
column.string, left)
# avoid compiling stack trace if not needed
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(''.join(traceback.format_stack()))
# ignore it: generate a dummy leaf
fct_domain = []
else:
fct_domain = column.search(cr, uid, model, left, [leaf.leaf], context=context)
if not fct_domain:
leaf.leaf = TRUE_LEAF
push(leaf)
else:
# we assume that the expression is valid
# we create a dummy leaf for forcing the parsing of the resulting expression
for domain_element in reversed(fct_domain):
push(create_substitution_leaf(leaf, domain_element, model))
# self.push(create_substitution_leaf(leaf, TRUE_LEAF, model))
# self.push(create_substitution_leaf(leaf, AND_OPERATOR, model))
# -------------------------------------------------
# RELATIONAL FIELDS
# -------------------------------------------------
# Applying recursivity on field(one2many)
elif column._type == 'one2many' and operator == 'child_of':
ids2 = to_ids(right, comodel, context)
if column._obj != model._name:
dom = child_of_domain(left, ids2, comodel, prefix=column._obj)
else:
dom = child_of_domain('id', ids2, model, parent=left)
for dom_leaf in reversed(dom):
push(create_substitution_leaf(leaf, dom_leaf, model))
elif column._type == 'one2many':
call_null = True
if right is not False:
if isinstance(right, basestring):
ids2 = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, context=context, limit=None)]
if ids2:
operator = 'in'
elif isinstance(right, collections.Iterable):
ids2 = right
else:
ids2 = [right]
if not ids2:
if operator in ['like', 'ilike', 'in', '=']:
#no result found with given search criteria
call_null = False
push(create_substitution_leaf(leaf, FALSE_LEAF, model))
else:
ids2 = select_from_where(cr, column._fields_id, comodel._table, 'id', ids2, operator)
if ids2:
call_null = False
o2m_op = 'not in' if operator in NEGATIVE_TERM_OPERATORS else 'in'
push(create_substitution_leaf(leaf, ('id', o2m_op, ids2), model))
if call_null:
o2m_op = 'in' if operator in NEGATIVE_TERM_OPERATORS else 'not in'
push(create_substitution_leaf(leaf, ('id', o2m_op, select_distinct_from_where_not_null(cr, column._fields_id, comodel._table)), model))
elif column._type == 'many2many':
rel_table, rel_id1, rel_id2 = column._sql_names(model)
#FIXME
if operator == 'child_of':
def _rec_convert(ids):
if comodel == model:
return ids
return select_from_where(cr, rel_id1, rel_table, rel_id2, ids, operator)
ids2 = to_ids(right, comodel, context)
dom = child_of_domain('id', ids2, comodel)
ids2 = comodel.search(cr, uid, dom, context=context)
push(create_substitution_leaf(leaf, ('id', 'in', _rec_convert(ids2)), model))
else:
call_null_m2m = True
if right is not False:
if isinstance(right, basestring):
res_ids = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, context=context)]
if res_ids:
operator = 'in'
else:
if not isinstance(right, list):
res_ids = [right]
else:
res_ids = right
if not res_ids:
if operator in ['like', 'ilike', 'in', '=']:
#no result found with given search criteria
call_null_m2m = False
push(create_substitution_leaf(leaf, FALSE_LEAF, model))
else:
operator = 'in' # operator changed because ids are directly related to main object
else:
call_null_m2m = False
m2m_op = 'not in' if operator in NEGATIVE_TERM_OPERATORS else 'in'
push(create_substitution_leaf(leaf, ('id', m2m_op, select_from_where(cr, rel_id1, rel_table, rel_id2, res_ids, operator) or [0]), model))
if call_null_m2m:
m2m_op = 'in' if operator in NEGATIVE_TERM_OPERATORS else 'not in'
push(create_substitution_leaf(leaf, ('id', m2m_op, select_distinct_from_where_not_null(cr, rel_id1, rel_table)), model))
elif column._type == 'many2one':
if operator == 'child_of':
ids2 = to_ids(right, comodel, context)
if column._obj != model._name:
dom = child_of_domain(left, ids2, comodel, prefix=column._obj)
else:
dom = child_of_domain('id', ids2, model, parent=left)
for dom_leaf in reversed(dom):
push(create_substitution_leaf(leaf, dom_leaf, model))
else:
def _get_expression(comodel, cr, uid, left, right, operator, context=None):
if context is None:
context = {}
c = context.copy()
c['active_test'] = False
#Special treatment to ill-formed domains
operator = (operator in ['<', '>', '<=', '>=']) and 'in' or operator
dict_op = {'not in': '!=', 'in': '=', '=': 'in', '!=': 'not in'}
if isinstance(right, tuple):
right = list(right)
if (not isinstance(right, list)) and operator in ['not in', 'in']:
operator = dict_op[operator]
elif isinstance(right, list) and operator in ['!=', '=']: # for domain (FIELD,'=',['value1','value2'])
operator = dict_op[operator]
res_ids = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, limit=None, context=c)]
if operator in NEGATIVE_TERM_OPERATORS:
res_ids.append(False) # TODO this should not be appended if False was in 'right'
return left, 'in', res_ids
# resolve string-based m2o criterion into IDs
if isinstance(right, basestring) or \
right and isinstance(right, (tuple, list)) and all(isinstance(item, basestring) for item in right):
push(create_substitution_leaf(leaf, _get_expression(comodel, cr, uid, left, right, operator, context=context), model))
else:
# right == [] or right == False and all other cases are handled by __leaf_to_sql()
push_result(leaf)
# -------------------------------------------------
# OTHER FIELDS
# -> datetime fields: manage time part of the datetime
# column when it is not there
# -> manage translatable fields
# -------------------------------------------------
else:
if column._type == 'datetime' and right and len(right) == 10:
if operator in ('>', '<='):
right += ' 23:59:59'
else:
right += ' 00:00:00'
push(create_substitution_leaf(leaf, (left, operator, right), model))
elif column.translate and right:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
if need_wildcard:
right = '%%%s%%' % right
inselect_operator = 'inselect'
if sql_operator in NEGATIVE_TERM_OPERATORS:
# negate operator (fix lp:1071710)
sql_operator = sql_operator[4:] if sql_operator[:3] == 'not' else '='
inselect_operator = 'not inselect'
unaccent = self._unaccent if sql_operator.endswith('like') else lambda x: x
instr = unaccent('%s')
if sql_operator == 'in':
# params will be flatten by to_sql() => expand the placeholders
instr = '(%s)' % ', '.join(['%s'] * len(right))
subselect = """WITH temp_irt_current (id, name) as (
SELECT ct.id, coalesce(it.value,ct.{quote_left})
FROM {current_table} ct
LEFT JOIN ir_translation it ON (it.name = %s and
it.lang = %s and
it.type = %s and
it.res_id = ct.id and
it.value != '')
)
SELECT id FROM temp_irt_current WHERE {name} {operator} {right} order by name
""".format(current_table=model._table, quote_left=_quote(left), name=unaccent('name'),
operator=sql_operator, right=instr)
params = (
model._name + ',' + left,
context.get('lang') or 'en_US',
'model',
right,
)
push(create_substitution_leaf(leaf, ('id', inselect_operator, (subselect, params)), model, internal=True))
else:
push_result(leaf)
# ----------------------------------------
# END OF PARSING FULL DOMAIN
# -> generate joins
# ----------------------------------------
joins = set()
for leaf in self.result:
joins |= set(leaf.get_join_conditions())
self.joins = list(joins)
def __leaf_to_sql(self, eleaf):
model = eleaf.model
leaf = eleaf.leaf
left, operator, right = leaf
# final sanity checks - should never fail
assert operator in (TERM_OPERATORS + ('inselect', 'not inselect')), \
"Invalid operator %r in domain term %r" % (operator, leaf)
assert leaf in (TRUE_LEAF, FALSE_LEAF) or left in model._fields \
or left in MAGIC_COLUMNS, "Invalid field %r in domain term %r" % (left, leaf)
assert not isinstance(right, BaseModel), \
"Invalid value %r in domain term %r" % (right, leaf)
table_alias = '"%s"' % (eleaf.generate_alias())
if leaf == TRUE_LEAF:
query = 'TRUE'
params = []
elif leaf == FALSE_LEAF:
query = 'FALSE'
params = []
elif operator == 'inselect':
query = '(%s."%s" in (%s))' % (table_alias, left, right[0])
params = right[1]
elif operator == 'not inselect':
query = '(%s."%s" not in (%s))' % (table_alias, left, right[0])
params = right[1]
elif operator in ['in', 'not in']:
# Two cases: right is a boolean or a list. The boolean case is an
# abuse and handled for backward compatibility.
if isinstance(right, bool):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % (leaf,))
if operator == 'in':
r = 'NOT NULL' if right else 'NULL'
else:
r = 'NULL' if right else 'NOT NULL'
query = '(%s."%s" IS %s)' % (table_alias, left, r)
params = []
elif isinstance(right, (list, tuple)):
params = list(right)
check_nulls = False
for i in range(len(params))[::-1]:
if params[i] == False:
check_nulls = True
del params[i]
if params:
if left == 'id':
instr = ','.join(['%s'] * len(params))
else:
ss = model._columns[left]._symbol_set
instr = ','.join([ss[0]] * len(params))
params = map(ss[1], params)
query = '(%s."%s" %s (%s))' % (table_alias, left, operator, instr)
else:
# The case for (left, 'in', []) or (left, 'not in', []).
query = 'FALSE' if operator == 'in' else 'TRUE'
if check_nulls and operator == 'in':
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
elif not check_nulls and operator == 'not in':
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
elif check_nulls and operator == 'not in':
query = '(%s AND %s."%s" IS NOT NULL)' % (query, table_alias, left) # needed only for TRUE.
else: # Must not happen
raise ValueError("Invalid domain term %r" % (leaf,))
elif right == False and (left in model._columns) and model._columns[left]._type == "boolean" and (operator == '='):
query = '(%s."%s" IS NULL or %s."%s" = false )' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '='):
query = '%s."%s" IS NULL ' % (table_alias, left)
params = []
elif right == False and (left in model._columns) and model._columns[left]._type == "boolean" and (operator == '!='):
query = '(%s."%s" IS NOT NULL and %s."%s" != false)' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '!='):
query = '%s."%s" IS NOT NULL' % (table_alias, left)
params = []
elif operator == '=?':
if right is False or right is None:
# '=?' is a short-circuit that makes the term TRUE if right is None or False
query = 'TRUE'
params = []
else:
# '=?' behaves like '=' in other cases
query, params = self.__leaf_to_sql(
create_substitution_leaf(eleaf, (left, '=', right), model))
elif left == 'id':
query = '%s.id %s %%s' % (table_alias, operator)
params = right
else:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
cast = '::text' if sql_operator.endswith('like') else ''
if left in model._columns:
format = need_wildcard and '%s' or model._columns[left]._symbol_set[0]
unaccent = self._unaccent if sql_operator.endswith('like') else lambda x: x
column = '%s.%s' % (table_alias, _quote(left))
query = '(%s %s %s)' % (unaccent(column + cast), sql_operator, unaccent(format))
elif left in MAGIC_COLUMNS:
query = "(%s.\"%s\"%s %s %%s)" % (table_alias, left, cast, sql_operator)
params = right
else: # Must not happen
raise ValueError("Invalid field %r in domain term %r" % (left, leaf))
add_null = False
if need_wildcard:
if isinstance(right, str):
str_utf8 = right
elif isinstance(right, unicode):
str_utf8 = right.encode('utf-8')
else:
str_utf8 = str(right)
params = '%%%s%%' % str_utf8
add_null = not str_utf8
elif left in model._columns:
params = model._columns[left]._symbol_set[1](right)
if add_null:
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
if isinstance(params, basestring):
params = [params]
return query, params
def to_sql(self):
stack = []
params = []
# Process the domain from right to left, using a stack, to generate a SQL expression.
self.result.reverse()
for leaf in self.result:
if leaf.is_leaf(internal=True):
q, p = self.__leaf_to_sql(leaf)
params.insert(0, p)
stack.append(q)
elif leaf.leaf == NOT_OPERATOR:
stack.append('(NOT (%s))' % (stack.pop(),))
else:
ops = {AND_OPERATOR: ' AND ', OR_OPERATOR: ' OR '}
q1 = stack.pop()
q2 = stack.pop()
stack.append('(%s %s %s)' % (q1, ops[leaf.leaf], q2,))
assert len(stack) == 1
query = stack[0]
joins = ' AND '.join(self.joins)
if joins:
query = '(%s) AND %s' % (joins, query)
return query, tools.flatten(params)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
JacobFischer/Joueur.py
|
games/anarchy/building.py
|
1
|
3963
|
# Building: A basic building. It does nothing besides burn down. Other Buildings inherit from this class.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from games.anarchy.game_object import GameObject
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Building(GameObject):
"""The class representing the Building in the Anarchy game.
A basic building. It does nothing besides burn down. Other Buildings inherit from this class.
"""
def __init__(self):
"""Initializes a Building with basic logic as provided by the Creer code generator."""
GameObject.__init__(self)
# private attributes to hold the properties so they appear read only
self._bribed = False
self._building_east = None
self._building_north = None
self._building_south = None
self._building_west = None
self._fire = 0
self._health = 0
self._is_headquarters = False
self._owner = None
self._x = 0
self._y = 0
@property
def bribed(self):
"""When True this building has already been bribed this turn and cannot be bribed again this turn.
:rtype: bool
"""
return self._bribed
@property
def building_east(self):
"""The Building directly to the east of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_east
@property
def building_north(self):
"""The Building directly to the north of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_north
@property
def building_south(self):
"""The Building directly to the south of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_south
@property
def building_west(self):
"""The Building directly to the west of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_west
@property
def fire(self):
"""How much fire is currently burning the building, and thus how much damage it will take at the end of its owner's turn. 0 means no fire.
:rtype: int
"""
return self._fire
@property
def health(self):
"""How much health this building currently has. When this reaches 0 the Building has been burned down.
:rtype: int
"""
return self._health
@property
def is_headquarters(self):
"""True if this is the Headquarters of the owning player, False otherwise. Burning this down wins the game for the other Player.
:rtype: bool
"""
return self._is_headquarters
@property
def owner(self):
"""The player that owns this building. If it burns down (health reaches 0) that player gets an additional bribe(s).
:rtype: games.anarchy.player.Player
"""
return self._owner
@property
def x(self):
"""The location of the Building along the x-axis.
:rtype: int
"""
return self._x
@property
def y(self):
"""The location of the Building along the y-axis.
:rtype: int
"""
return self._y
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
|
mit
|
gento/dionaea
|
modules/python/scripts/pptp/include/packets.py
|
1
|
4630
|
#********************************************************************************
#* Dionaea
#* - catches bugs -
#*
#*
#*
#* Copyright (C) 2015 Tan Kean Siong
#*
#* This program is free software; you can redistribute it and/or
#* modify it under the terms of the GNU General Public License
#* as published by the Free Software Foundation; either version 2
#* of the License, or (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program; if not, write to the Free Software
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*
#*
#* contact nepenthesdev@gmail.com
#*
#*******************************************************************************/
from dionaea.smb.include.packet import *
from dionaea.smb.include.fieldtypes import *
#PPTP Control Message Types
PPTP_CTRMSG_TYPE_STARTCTRCON_REQUEST = 0x01
PPTP_CTRMSG_TYPE_STARTCTRCON_REPLY = 0x02
PPTP_CTRMSG_TYPE_OUTGOINGCALL_REQUEST = 0x07
PPTP_CTRMSG_TYPE_OUTGOINGCALL_REPLY = 0x08
#PPP Link Control Protocol Types
PPP_LCP_Configuration_Request = 0x01
# https://www.ietf.org/rfc/rfc2637.txt
class PPTP_StartControlConnection_Request(Packet):
name="PPTP Start-Control-Connection-Request"
controlmessage_type = PPTP_CTRMSG_TYPE_STARTCTRCON_REQUEST
fields_desc =[
XShortField("Length",0),
XShortField("MessageType",0),
XIntField("MagicCookie",0),
XShortField("ControlMessageType",0),
XShortField("Reserved",0),
XShortField("ProtocolVersion",0),
XShortField("Reserved",0),
XIntField("FramingCapabilites",0),
XIntField("BearerCapabilites",0),
XShortField("MaxChannels",0),
XShortField("FirmwareRevision",0),
StrFixedLenField("HostName", "", 64),
StrFixedLenField("VendorName", "", 64),
]
class PPTP_StartControlConnection_Reply(Packet):
name="PPTP Start-Control-Connection-Reply"
controlmessage_type = PPTP_CTRMSG_TYPE_STARTCTRCON_REPLY
fields_desc =[
XShortField("Length",0x9c),
XShortField("MessageType",0x01),
XIntField("MagicCookie",0x1a2b3c4d),
XShortField("ControlMessageType",0x02),
XShortField("Reserved",0),
LEShortField("ProtocolVersion",0x01),
ByteField("ResultCode",0x01),
ByteField("ErrorCode",0x00),
LEIntField("FramingCapabilites",0),
LEIntField("BearerCapabilites",0),
XShortField("MaxChannels",1),
XShortField("FirmwareRevision",1),
StrFixedLenField("HostName", "", 64),
StrFixedLenField("VendorName", "", 64),
]
class PPTP_OutgoingCall_Request(Packet):
name="PPTP Outgoing-Call-Request"
controlmessage_type = PPTP_CTRMSG_TYPE_OUTGOINGCALL_REQUEST
fields_desc =[
XShortField("Length",0),
XShortField("MessageType",0),
XIntField("MagicCookie",0),
XShortField("ControlMessageType",0),
XShortField("Reserved",0),
XShortField("CallID",0),
XShortField("CallSerialNumber",0),
XIntField("MinBPS",0),
XIntField("MaxBPS",0),
XIntField("BearerType",0),
XIntField("FramingType",0),
XShortField("PacketWindowSize",0),
XShortField("PacketProcessingDelay",0),
XShortField("PacketNumberLength",0),
XShortField("Reserved",0),
StrFixedLenField("PhoneNumber", "", 64),
StrFixedLenField("Subaddress", "", 64),
]
class PPTP_OutgoingCall_Reply(Packet):
name="PPTP Outgoing-Call-Reply"
controlmessage_type = PPTP_CTRMSG_TYPE_OUTGOINGCALL_REPLY
fields_desc =[
XShortField("Length",0x20),
XShortField("MessageType",0x01),
XIntField("MagicCookie",0x1a2b3c4d),
XShortField("ControlMessageType",0x08),
XShortField("Reserved",0),
XShortField("CallID",0x480),
XShortField("PeerCallID",0),
ByteField("ResultCode",0x01),
ByteField("ErrorCode",0x00),
XShortField("CauseCode",0),
XIntField("ConnectSpeed",0x05F5E100),
XShortField("PacketWindowSize",0x2000),
XShortField("PacketProcessingDelay",0),
XShortField("PacketNumberLength",0),
XShortField("PhysicalChannelID",0),
]
class PPTP(Packet):
name="PPTP"
fields_desc =[
ByteField("Address",0),
ByteField("Control",0),
XShortField("Protocol",0),
]
class PPP_LCP_Configuration_Request(Packet):
name="PPP LCP_Configuration_Request"
controlmessage_type = PPP_LCP_Configuration_Request
fields_desc =[
ByteField("Code",0),
ByteField("Identifier",0),
XShortField("Length",0),
StrFixedLenField("Options", b"", length_from=lambda pkt: pkt.Length-4),
]
|
gpl-2.0
|
MillerDix/NEChromeX
|
flaskTest/venv/lib/python2.7/site-packages/pip/utils/appdirs.py
|
340
|
8811
|
"""
This code was taken from https://github.com/ActiveState/appdirs and modified
to suit our purposes.
"""
from __future__ import absolute_import
import os
import sys
from pip.compat import WINDOWS, expanduser
from pip._vendor.six import PY2, text_type
def user_cache_dir(appname):
r"""
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
macOS: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
"""
if WINDOWS:
# Get the base path
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# When using Python 2, return paths as bytes on Windows like we do on
# other operating systems. See helper function docs for more details.
if PY2 and isinstance(path, text_type):
path = _win_path_to_bytes(path)
# Add our app name and Cache directory to it
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
# Get the base path
path = expanduser("~/Library/Caches")
# Add our app name to it
path = os.path.join(path, appname)
else:
# Get the base path
path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache"))
# Add our app name to it
path = os.path.join(path, appname)
return path
def user_data_dir(appname, roaming=False):
"""
Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in
$XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\ ...
...Application Data\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local ...
...Settings\Application Data\<AppName>
Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName>
Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if WINDOWS:
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.join(os.path.normpath(_get_win_folder(const)), appname)
elif sys.platform == "darwin":
path = os.path.join(
expanduser('~/Library/Application Support/'),
appname,
)
else:
path = os.path.join(
os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")),
appname,
)
return path
def user_config_dir(appname, roaming=True):
"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if WINDOWS:
path = user_data_dir(appname, roaming=roaming)
elif sys.platform == "darwin":
path = user_data_dir(appname)
else:
path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.config"))
path = os.path.join(path, appname)
return path
# for the discussion regarding site_config_dirs locations
# see <https://github.com/pypa/pip/issues/1733>
def site_config_dirs(appname):
"""Return a list of potential user-shared config dirs for this application.
"appname" is the name of application.
Typical user config directories are:
macOS: /Library/Application Support/<AppName>/
Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in
$XDG_CONFIG_DIRS
Win XP: C:\Documents and Settings\All Users\Application ...
...Data\<AppName>\
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory
on Vista.)
Win 7: Hidden, but writeable on Win 7:
C:\ProgramData\<AppName>\
"""
if WINDOWS:
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
pathlist = [os.path.join(path, appname)]
elif sys.platform == 'darwin':
pathlist = [os.path.join('/Library/Application Support', appname)]
else:
# try looking in $XDG_CONFIG_DIRS
xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
if xdg_config_dirs:
pathlist = [
os.path.join(expanduser(x), appname)
for x in xdg_config_dirs.split(os.pathsep)
]
else:
pathlist = []
# always look in /etc directly as well
pathlist.append('/etc')
return pathlist
# -- Windows support functions --
def _get_win_folder_from_registry(csidl_name):
"""
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
return directory
def _get_win_folder_with_ctypes(csidl_name):
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if WINDOWS:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
def _win_path_to_bytes(path):
"""Encode Windows paths to bytes. Only used on Python 2.
Motivation is to be consistent with other operating systems where paths
are also returned as bytes. This avoids problems mixing bytes and Unicode
elsewhere in the codebase. For more details and discussion see
<https://github.com/pypa/pip/issues/3463>.
If encoding using ASCII and MBCS fails, return the original Unicode path.
"""
for encoding in ('ASCII', 'MBCS'):
try:
return path.encode(encoding)
except (UnicodeEncodeError, LookupError):
pass
return path
|
mit
|
rcmorano/gecosws-config-assistant
|
firstboot_lib/FirstbootEntry.py
|
2
|
1362
|
import os
import types
import firstbootconfig
import firstboot.pages
from xdg.IniFile import *
class FirstbootEntry(IniFile):
default_group = 'Firstboot Entry'
def __init__(self):
self.content = dict()
self.config_path = '/var/lib/firstboot'
self.config_file = os.path.join(self.config_path, 'firstboot.conf')
if not os.path.exists(self.config_path):
os.makedirs(self.config_path)
if not os.path.exists(self.config_file):
self._create_config_file()
IniFile.parse(self, self.config_file, [self.default_group])
def _create_config_file(self):
fd = open(self.config_file, 'w')
if fd != None:
fd.write('[Firstboot Entry]\n')
fd.write('firststart=0\n')
fd.write('\n')
fd.write('[LinkToServer]\n')
fd.write('url=http://GECOS-SERVER/auth/config/\n')
fd.write('\n')
fd.close()
def get_firststart(self):
fs = self.get('firststart').strip()
fs = bool(int(fs))
return fs
def set_firststart(self, value):
self.set('firststart', value)
self.write()
def get_url(self):
return self.get('url', group='LinkToServer')
def set_url(self, value):
self.set('url', value, group='LinkToServer')
self.write()
|
gpl-2.0
|
dmilith/SublimeText3-dmilith
|
Packages/pyte/all/pyte/graphics.py
|
1
|
3441
|
# -*- coding: utf-8 -*-
"""
pyte.graphics
~~~~~~~~~~~~~
This module defines graphic-related constants, mostly taken from
:manpage:`console_codes(4)` and
http://pueblo.sourceforge.net/doc/manual/ansi_color_codes.html.
:copyright: (c) 2011-2012 by Selectel.
:copyright: (c) 2012-2017 by pyte authors and contributors,
see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
from __future__ import unicode_literals
#: A mapping of ANSI text style codes to style names, "+" means the:
#: attribute is set, "-" -- reset; example:
#:
#: >>> text[1]
#: '+bold'
#: >>> text[9]
#: '+strikethrough'
TEXT = {
1: "+bold",
3: "+italics",
4: "+underscore",
7: "+reverse",
9: "+strikethrough",
22: "-bold",
23: "-italics",
24: "-underscore",
27: "-reverse",
29: "-strikethrough",
}
#: A mapping of ANSI foreground color codes to color names.
#:
#: >>> FG_ANSI[30]
#: 'black'
#: >>> FG_ANSI[38]
#: 'default'
FG_ANSI = {
30: "black",
31: "red",
32: "green",
33: "brown",
34: "blue",
35: "magenta",
36: "cyan",
37: "white",
39: "default" # white.
}
#: An alias to :data:`~pyte.graphics.FG_ANSI` for compatibility.
FG = FG_ANSI
#: A mapping of non-standard ``aixterm`` foreground color codes to
#: color names. These are high intensity colors and thus should be
#: complemented by ``+bold``.
FG_AIXTERM = {
90: "black",
91: "red",
92: "green",
93: "brown",
94: "blue",
95: "magenta",
96: "cyan",
97: "white"
}
#: A mapping of ANSI background color codes to color names.
#:
#: >>> BG_ANSI[40]
#: 'black'
#: >>> BG_ANSI[48]
#: 'default'
BG_ANSI = {
40: "black",
41: "red",
42: "green",
43: "brown",
44: "blue",
45: "magenta",
46: "cyan",
47: "white",
49: "default" # black.
}
#: An alias to :data:`~pyte.graphics.BG_ANSI` for compatibility.
BG = BG_ANSI
#: A mapping of non-standard ``aixterm`` background color codes to
#: color names. These are high intensity colors and thus should be
#: complemented by ``+bold``.
BG_AIXTERM = {
100: "black",
101: "red",
102: "green",
103: "brown",
104: "blue",
105: "magenta",
106: "cyan",
107: "white"
}
#: SGR code for foreground in 256 or True color mode.
FG_256 = 38
#: SGR code for background in 256 or True color mode.
BG_256 = 48
#: A table of 256 foreground or background colors.
# The following code is part of the Pygments project (BSD licensed).
FG_BG_256 = [
(0x00, 0x00, 0x00), # 0
(0xcd, 0x00, 0x00), # 1
(0x00, 0xcd, 0x00), # 2
(0xcd, 0xcd, 0x00), # 3
(0x00, 0x00, 0xee), # 4
(0xcd, 0x00, 0xcd), # 5
(0x00, 0xcd, 0xcd), # 6
(0xe5, 0xe5, 0xe5), # 7
(0x7f, 0x7f, 0x7f), # 8
(0xff, 0x00, 0x00), # 9
(0x00, 0xff, 0x00), # 10
(0xff, 0xff, 0x00), # 11
(0x5c, 0x5c, 0xff), # 12
(0xff, 0x00, 0xff), # 13
(0x00, 0xff, 0xff), # 14
(0xff, 0xff, 0xff), # 15
]
# colors 16..231: the 6x6x6 color cube
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
for i in range(216):
r = valuerange[(i // 36) % 6]
g = valuerange[(i // 6) % 6]
b = valuerange[i % 6]
FG_BG_256.append((r, g, b))
# colors 232..255: grayscale
for i in range(24):
v = 8 + i * 10
FG_BG_256.append((v, v, v))
FG_BG_256 = ["{0:02x}{1:02x}{2:02x}".format(r, g, b) for r, g, b in FG_BG_256]
|
mit
|
gisce/OCB
|
addons/account_chart/__init__.py
|
526
|
1055
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
BrainTech/openbci
|
obci/logic/logic_speller_peer.py
|
1
|
1099
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author:
# Mateusz Kruszyński <mateusz.kruszynski@gmail.com>
#
import time
from obci.utils import tags_helper
from multiplexer.multiplexer_constants import peers, types
from obci.logic import logic_helper
from obci.logic.logic_decision_peer import LogicDecision
from obci.logic.engines.speller_engine import SpellerEngine
from obci.utils import context as ctx
from obci.configs import settings, variables_pb2
from obci.utils.openbci_logging import log_crash
class LogicSpeller(LogicDecision, SpellerEngine):
"""A class for creating a manifest file with metadata."""
@log_crash
def __init__(self, addresses):
LogicDecision.__init__(self, addresses=addresses)
context = ctx.get_new_context()
context['logger'] = self.logger
SpellerEngine.__init__(self, self.config.param_values(), context)
self.ready()
self._update_letters()
def _run_post_actions(self, p_decision):
self._update_letters()
if __name__ == "__main__":
LogicSpeller(settings.MULTIPLEXER_ADDRESSES).loop()
|
gpl-3.0
|
ethanbao/artman
|
artman/pipelines/core_generation.py
|
1
|
2951
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipelines that run protoc core codegen. The generated core library for each
language contains the well known types, defined by protobuf, for that language.
"""
from artman.pipelines import code_generation as code_gen
from artman.tasks import protoc_tasks
from artman.tasks import package_metadata_tasks
from artman.utils import task_utils
class CoreProtoPipeline(code_gen.CodeGenerationPipelineBase):
def __init__(self, **kwargs):
super(CoreProtoPipeline, self).__init__(
get_core_task_factory(kwargs['language']), **kwargs)
class CoreTaskFactoryBase(code_gen.TaskFactoryBase):
def get_tasks(self, **kwargs):
return task_utils.instantiate_tasks(
self._get_core_codegen_tasks(**kwargs), kwargs)
def _get_core_codegen_tasks(self, **kwargs):
raise NotImplementedError('Subclass must implement abstract method')
def get_validate_kwargs(self):
return code_gen.COMMON_REQUIRED
def get_invalid_kwargs(self):
return []
class _GoCoreTaskFactory(CoreTaskFactoryBase):
"""Responsible for the protobuf flow for Go language."""
def _get_core_codegen_tasks(self, **kwargs):
return [
protoc_tasks.ProtoCodeGenTask,
protoc_tasks.GoCopyTask,
]
def get_validate_kwargs(self):
return ['gapic_api_yaml', 'gapic_code_dir'] + code_gen.COMMON_REQUIRED
class _CSharpCoreTaskFactory(CoreTaskFactoryBase):
def _get_core_codegen_tasks(self, **kwargs):
return [protoc_tasks.ProtoCodeGenTask]
class _JavaCoreTaskFactory(CoreTaskFactoryBase):
"""Responsible for the core protobuf flow for Java language."""
def _get_core_codegen_tasks(self, **kwargs):
return [protoc_tasks.ProtoDescGenTask,
protoc_tasks.ProtoCodeGenTask,
package_metadata_tasks.PackageMetadataConfigGenTask,
package_metadata_tasks.ProtoPackageMetadataGenTask,
protoc_tasks.JavaProtoCopyTask]
_CORE_TASK_FACTORY_DICT = {
'go': _GoCoreTaskFactory,
'csharp': _CSharpCoreTaskFactory,
'java': _JavaCoreTaskFactory,
}
def get_core_task_factory(language):
cls = _CORE_TASK_FACTORY_DICT.get(language)
if cls:
return cls()
else:
raise ValueError('No core task factory found for language: '
+ language)
|
apache-2.0
|
dbiesecke/dbiesecke.github.io
|
repo/script.module.urlresolver/lib/urlresolver/plugins/waaw.py
|
2
|
6528
|
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, urllib, json
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class WaawResolver(UrlResolver):
name = "waaw"
domains = ["waaw.tv", "hqq.watch", "netu.tv", "hqq.tv", "waaw1.tv"]
pattern = "(?://|\.)((?:waaw1?|netu|hqq)\.(?:tv|watch))/(?:watch_video\.php\?v|.+?vid)=([a-zA-Z0-9]+)"
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT,
'Referer': 'https://waaw.tv/watch_video.php?v=%s&post=1' % media_id}
html = self.net.http_GET(web_url, headers=headers).content
if html:
try:
wise = re.search('''<script type=["']text/javascript["']>\s*;?(eval.*?)</script>''', html,
re.DOTALL | re.I).groups()[0]
data_unwise = self.jswise(wise).replace("\\", "")
try:
at = re.search('at=(\w+)', data_unwise, re.I).groups()[0]
except:
at = ""
try:
http_referer = re.search('http_referer=(.*?)&', data_unwise, re.I).groups()[0]
except:
http_referer = ""
player_url = "http://hqq.watch/sec/player/embed_player.php?iss=&vid=%s&at=%s&autoplayed=yes&referer=on&http_referer=%s&pass=&embed_from=&need_captcha=0&hash_from=&secured=0" % (
media_id, at, http_referer)
headers.update({'Referer': web_url})
data_player = self.net.http_GET(player_url, headers=headers).content
data_unescape = re.findall('document.write\(unescape\("([^"]+)"', data_player)
data = ""
for d in data_unescape:
data += urllib.unquote(d)
data_unwise_player = ""
wise = ""
wise = re.search('''<script type=["']text/javascript["']>\s*;?(eval.*?)</script>''', data_player,
re.DOTALL | re.I)
if wise:
data_unwise_player = self.jswise(wise.group(1)).replace("\\", "")
try:
vars_data = re.search('/player/get_md5.php",\s*\{(.*?)\}', data, re.DOTALL | re.I).groups()[0]
except:
vars_data = ""
matches = re.findall('\s*([^:]+):\s*([^,]*)[,"]', vars_data)
params = {}
for key, value in matches:
if key == "adb":
params[key] = "0/"
elif '"' in value:
params[key] = value.replace('"', '')
else:
try:
value_var = re.search('var\s*%s\s*=\s*"([^"]+)"' % value, data, re.I).groups()[0]
except:
value_var = ""
if not value_var and data_unwise_player:
try:
value_var = \
re.search('var\s*%s\s*=\s*"([^"]+)"' % value, data_unwise_player, re.I).groups()[0]
except:
value_var = ""
params[key] = value_var
params = urllib.urlencode(params)
headers.update({'X-Requested-With': 'XMLHttpRequest', 'Referer': player_url})
data = ""
data = self.net.http_GET("http://hqq.watch/player/get_md5.php?" + params, headers=headers).content
url_data = json.loads(data)
media_url = "https:" + self.tb(url_data["obf_link"].replace("#", "")) + ".mp4.m3u8"
if media_url:
del headers['X-Requested-With']
headers.update({'Origin': 'https://hqq.watch'})
return media_url + helpers.append_headers(headers)
except Exception as e:
raise ResolverError(e)
raise ResolverError('Video not found')
def tb(self, b_m3u8_2):
j = 0
s2 = ""
while j < len(b_m3u8_2):
s2 += "\\u0" + b_m3u8_2[j:(j + 3)]
j += 3
return s2.decode('unicode-escape').encode('ASCII', 'ignore')
## loop2unobfuscated
def jswise(self, wise):
while True:
wise = re.search("var\s.+?\('([^']+)','([^']+)','([^']+)','([^']+)'\)", wise, re.DOTALL)
if not wise: break
ret = wise = self.js_wise(wise.groups())
return ret
## js2python
def js_wise(self, wise):
w, i, s, e = wise
v0 = 0;
v1 = 0;
v2 = 0
v3 = [];
v4 = []
while True:
if v0 < 5:
v4.append(w[v0])
elif v0 < len(w):
v3.append(w[v0])
v0 += 1
if v1 < 5:
v4.append(i[v1])
elif v1 < len(i):
v3.append(i[v1])
v1 += 1
if v2 < 5:
v4.append(s[v2])
elif v2 < len(s):
v3.append(s[v2])
v2 += 1
if len(w) + len(i) + len(s) + len(e) == len(v3) + len(v4) + len(e): break
v5 = "".join(v3);
v6 = "".join(v4)
v1 = 0
v7 = []
for v0 in range(0, len(v3), 2):
v8 = -1
if ord(v6[v1]) % 2: v8 = 1
v7.append(chr(int(v5[v0:v0 + 2], 36) - v8))
v1 += 1
if v1 >= len(v4): v1 = 0
return "".join(v7)
def get_url(self, host, media_id):
return self._default_get_url(host, media_id,
template='http://hqq.watch/player/embed_player.php?vid={media_id}&autoplay=no')
|
mit
|
aleksandra-tarkowska/django
|
django/contrib/gis/db/backends/oracle/models.py
|
86
|
2268
|
"""
The GeometryColumns and SpatialRefSys models for the Oracle spatial
backend.
It should be noted that Oracle Spatial does not have database tables
named according to the OGC standard, so the closest analogs are used.
For example, the `USER_SDO_GEOM_METADATA` is used for the GeometryColumns
model and the `SDO_COORD_REF_SYS` is used for the SpatialRefSys model.
"""
from django.contrib.gis.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class OracleGeometryColumns(models.Model):
"Maps to the Oracle USER_SDO_GEOM_METADATA table."
table_name = models.CharField(max_length=32)
column_name = models.CharField(max_length=1024)
srid = models.IntegerField(primary_key=True)
# TODO: Add support for `diminfo` column (type MDSYS.SDO_DIM_ARRAY).
class Meta:
app_label = 'gis'
db_table = 'USER_SDO_GEOM_METADATA'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'column_name'
def __str__(self):
return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid)
class OracleSpatialRefSys(models.Model, SpatialRefSysMixin):
"Maps to the Oracle MDSYS.CS_SRS table."
cs_name = models.CharField(max_length=68)
srid = models.IntegerField(primary_key=True)
auth_srid = models.IntegerField()
auth_name = models.CharField(max_length=256)
wktext = models.CharField(max_length=2046)
# Optional geometry representing the bounds of this coordinate
# system. By default, all are NULL in the table.
cs_bounds = models.PolygonField(null=True)
objects = models.GeoManager()
class Meta:
app_label = 'gis'
db_table = 'CS_SRS'
managed = False
@property
def wkt(self):
return self.wktext
@classmethod
def wkt_col(cls):
return 'wktext'
|
bsd-3-clause
|
jcpowermac/ansible
|
lib/ansible/modules/cloud/vmware/vmware_maintenancemode.py
|
10
|
6537
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, VMware, Inc.
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_maintenancemode
short_description: Place a host into maintenance mode
description:
- This module can be used for placing a ESXi host into maintenance mode.
- Support for VSAN compliant maintenance mode when selected.
author:
- "Jay Jahns <jjahns@vmware.com>"
- "Abhijeet Kasurde (@akasurde)"
version_added: "2.1"
notes:
- Tested on vSphere 5.5, 6.0 and 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
esxi_hostname:
description:
- Name of the host as defined in vCenter.
required: True
vsan_mode:
description:
- Specify which VSAN compliant mode to enter.
choices:
- 'ensureObjectAccessibility'
- 'evacuateAllData'
- 'noAction'
required: False
evacuate:
description:
- If True, evacuate all powered off VMs.
choices:
- True
- False
default: False
required: False
timeout:
description:
- Specify a timeout for the operation.
required: False
default: 0
state:
description:
- Enter or exit maintenance mode.
choices:
- present
- absent
default: present
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Enter VSAN-Compliant Maintenance Mode
vmware_maintenancemode:
hostname: vc_host
username: vc_user
password: vc_pass
esxi_hostname: esxi.host.example
vsan: ensureObjectAccessibility
evacuate: yes
timeout: 3600
state: present
'''
RETURN = '''
hostsystem:
description: Name of vim reference
returned: always
type: string
sample: "'vim.HostSystem:host-236'"
hostname:
description: Name of host in vCenter
returned: always
type: string
sample: "esxi.local.domain"
status:
description: Action taken
returned: always
type: string
sample: "ENTER"
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, TaskError, vmware_argument_spec, wait_for_task
from ansible.module_utils._text import to_native
class VmwareMaintenanceMgr(PyVmomi):
def __init__(self, module):
super(VmwareMaintenanceMgr, self).__init__(module)
self.esxi_hostname = self.module.params.get('esxi_hostname')
self.vsan = self.module.params.get('vsan', None)
self.host = self.find_hostsystem_by_name(host_name=self.esxi_hostname)
if not self.host:
self.module.fail_json(msg='Host %s not found in vCenter' % self.esxi_hostname)
def EnterMaintenanceMode(self):
if self.host.runtime.inMaintenanceMode:
self.module.exit_json(changed=False,
hostsystem=str(self.host),
hostname=self.esxi_hostname,
status='NO_ACTION',
msg='Host %s already in maintenance mode' % self.esxi_hostname)
spec = vim.host.MaintenanceSpec()
if self.vsan:
spec.vsanMode = vim.vsan.host.DecommissionMode()
spec.vsanMode.objectAction = self.vsan
try:
task = self.host.EnterMaintenanceMode_Task(self.module.params['timeout'],
self.module.params['evacuate'],
spec)
success, result = wait_for_task(task)
self.module.exit_json(changed=success,
hostsystem=str(self.host),
hostname=self.esxi_hostname,
status='ENTER',
msg='Host %s entered maintenance mode' % self.esxi_hostname)
except TaskError as e:
self.module.fail_json(msg='Host %s failed to enter maintenance mode due to %s' % (self.esxi_hostname, to_native(e)))
def ExitMaintenanceMode(self):
if not self.host.runtime.inMaintenanceMode:
self.module.exit_json(changed=False,
hostsystem=str(self.host),
hostname=self.esxi_hostname,
status='NO_ACTION',
msg='Host %s not in maintenance mode' % self.esxi_hostname)
try:
task = self.host.ExitMaintenanceMode_Task(self.module.params['timeout'])
success, result = wait_for_task(task)
self.module.exit_json(changed=success,
hostsystem=str(self.host),
hostname=self.esxi_hostname,
status='EXIT',
msg='Host %s exited maintenance mode' % self.esxi_hostname)
except TaskError as e:
self.module.fail_json(msg='Host %s failed to exit maintenance mode due to %s' % (self.esxi_hostname, to_native(e)))
def main():
spec = vmware_argument_spec()
spec.update(dict(esxi_hostname=dict(type='str', required=True),
vsan=dict(type='str', choices=['ensureObjectAccessibility',
'evacuateAllData',
'noAction']
),
evacuate=dict(type='bool', default=False),
timeout=dict(default=0, type='int'),
state=dict(required=False, default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec=spec)
host_maintenance_mgr = VmwareMaintenanceMgr(module=module)
if module.params['state'] == 'present':
host_maintenance_mgr.EnterMaintenanceMode()
elif module.params['state'] == 'absent':
host_maintenance_mgr.ExitMaintenanceMode()
if __name__ == '__main__':
main()
|
gpl-3.0
|
vrv/tensorflow
|
tensorflow/contrib/cloud/python/ops/bigquery_reader_ops_test.py
|
26
|
9667
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BigQueryReader Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import re
import socket
import threading
from six.moves import SimpleHTTPServer
from six.moves import socketserver
from tensorflow.contrib.cloud.python.ops import bigquery_reader_ops as cloud
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
_PROJECT = "test-project"
_DATASET = "test-dataset"
_TABLE = "test-table"
# List representation of the test rows in the 'test-table' in BigQuery.
# The schema for each row is: [int64, string, float].
# The values for rows are generated such that some columns have null values. The
# general formula here is:
# - The int64 column is present in every row.
# - The string column is only avaiable in even rows.
# - The float column is only available in every third row.
_ROWS = [[0, "s_0", 0.1], [1, None, None], [2, "s_2", None], [3, None, 3.1],
[4, "s_4", None], [5, None, None], [6, "s_6", 6.1], [7, None, None],
[8, "s_8", None], [9, None, 9.1]]
# Schema for 'test-table'.
# The schema currently has three columns: int64, string, and float
_SCHEMA = {
"kind": "bigquery#table",
"id": "test-project:test-dataset.test-table",
"schema": {
"fields": [{
"name": "int64_col",
"type": "INTEGER",
"mode": "NULLABLE"
}, {
"name": "string_col",
"type": "STRING",
"mode": "NULLABLE"
}, {
"name": "float_col",
"type": "FLOAT",
"mode": "NULLABLE"
}]
}
}
def _ConvertRowToExampleProto(row):
"""Converts the input row to an Example proto.
Args:
row: Input Row instance.
Returns:
An Example proto initialized with row values.
"""
example = example_pb2.Example()
example.features.feature["int64_col"].int64_list.value.append(row[0])
if row[1] is not None:
example.features.feature["string_col"].bytes_list.value.append(
compat.as_bytes(row[1]))
if row[2] is not None:
example.features.feature["float_col"].float_list.value.append(row[2])
return example
class IPv6TCPServer(socketserver.TCPServer):
address_family = socket.AF_INET6
class FakeBigQueryServer(threading.Thread):
"""Fake http server to return schema and data for sample table."""
def __init__(self, address, port):
"""Creates a FakeBigQueryServer.
Args:
address: Server address
port: Server port. Pass 0 to automatically pick an empty port.
"""
threading.Thread.__init__(self)
self.handler = BigQueryRequestHandler
try:
self.httpd = socketserver.TCPServer((address, port), self.handler)
self.host_port = "{}:{}".format(*self.httpd.server_address)
except IOError:
self.httpd = IPv6TCPServer((address, port), self.handler)
self.host_port = "[{}]:{}".format(*self.httpd.server_address)
def run(self):
self.httpd.serve_forever()
def shutdown(self):
self.httpd.shutdown()
self.httpd.socket.close()
class BigQueryRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Responds to BigQuery HTTP requests.
Attributes:
num_rows: num_rows in the underlying table served by this class.
"""
num_rows = 0
def do_GET(self):
if "data?maxResults=" not in self.path:
# This is a schema request.
_SCHEMA["numRows"] = self.num_rows
response = json.dumps(_SCHEMA)
else:
# This is a data request.
#
# Extract max results and start index.
max_results = int(re.findall(r"maxResults=(\d+)", self.path)[0])
start_index = int(re.findall(r"startIndex=(\d+)", self.path)[0])
# Send the rows as JSON.
rows = []
for row in _ROWS[start_index:start_index + max_results]:
row_json = {
"f": [{
"v": str(row[0])
}, {
"v": str(row[1]) if row[1] is not None else None
}, {
"v": str(row[2]) if row[2] is not None else None
}]
}
rows.append(row_json)
response = json.dumps({
"kind": "bigquery#table",
"id": "test-project:test-dataset.test-table",
"rows": rows
})
self.send_response(200)
self.end_headers()
self.wfile.write(compat.as_bytes(response))
def _SetUpQueue(reader):
"""Sets up a queue for a reader."""
queue = data_flow_ops.FIFOQueue(8, [types_pb2.DT_STRING], shapes=())
key, value = reader.read(queue)
queue.enqueue_many(reader.partitions()).run()
queue.close().run()
return key, value
class BigQueryReaderOpsTest(test.TestCase):
def setUp(self):
super(BigQueryReaderOpsTest, self).setUp()
self.server = FakeBigQueryServer("localhost", 0)
self.server.start()
logging.info("server address is %s", self.server.host_port)
# An override to bypass the GCP auth token retrieval logic
# in google_auth_provider.cc.
os.environ["GOOGLE_AUTH_TOKEN_FOR_TESTING"] = "not-used"
def tearDown(self):
self.server.shutdown()
super(BigQueryReaderOpsTest, self).tearDown()
def _ReadAndCheckRowsUsingFeatures(self, num_rows):
self.server.handler.num_rows = num_rows
with self.test_session() as sess:
feature_configs = {
"int64_col":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.int64),
"string_col":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.string, default_value="s_default"),
}
reader = cloud.BigQueryReader(
project_id=_PROJECT,
dataset_id=_DATASET,
table_id=_TABLE,
num_partitions=4,
features=feature_configs,
timestamp_millis=1,
test_end_point=self.server.host_port)
key, value = _SetUpQueue(reader)
seen_rows = []
features = parsing_ops.parse_example(
array_ops.reshape(value, [1]), feature_configs)
for _ in range(num_rows):
int_value, str_value = sess.run(
[features["int64_col"], features["string_col"]])
# Parse values returned from the session.
self.assertEqual(int_value.shape, (1, 1))
self.assertEqual(str_value.shape, (1, 1))
int64_col = int_value[0][0]
string_col = str_value[0][0]
seen_rows.append(int64_col)
# Compare.
expected_row = _ROWS[int64_col]
self.assertEqual(int64_col, expected_row[0])
self.assertEqual(
compat.as_str(string_col), ("s_%d" % int64_col) if expected_row[1]
else "s_default")
self.assertItemsEqual(seen_rows, range(num_rows))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testReadingSingleRowUsingFeatures(self):
self._ReadAndCheckRowsUsingFeatures(1)
def testReadingMultipleRowsUsingFeatures(self):
self._ReadAndCheckRowsUsingFeatures(10)
def testReadingMultipleRowsUsingColumns(self):
num_rows = 10
self.server.handler.num_rows = num_rows
with self.test_session() as sess:
reader = cloud.BigQueryReader(
project_id=_PROJECT,
dataset_id=_DATASET,
table_id=_TABLE,
num_partitions=4,
columns=["int64_col", "float_col", "string_col"],
timestamp_millis=1,
test_end_point=self.server.host_port)
key, value = _SetUpQueue(reader)
seen_rows = []
for row_index in range(num_rows):
returned_row_id, example_proto = sess.run([key, value])
example = example_pb2.Example()
example.ParseFromString(example_proto)
self.assertIn("int64_col", example.features.feature)
feature = example.features.feature["int64_col"]
self.assertEqual(len(feature.int64_list.value), 1)
int64_col = feature.int64_list.value[0]
seen_rows.append(int64_col)
# Create our expected Example.
expected_example = example_pb2.Example()
expected_example = _ConvertRowToExampleProto(_ROWS[int64_col])
# Compare.
self.assertProtoEquals(example, expected_example)
self.assertEqual(row_index, int(returned_row_id))
self.assertItemsEqual(seen_rows, range(num_rows))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
if __name__ == "__main__":
test.main()
|
apache-2.0
|
Universal-Model-Converter/UMC3.0a
|
data/Python/x86/Lib/lib2to3/tests/test_util.py
|
126
|
21245
|
""" Test suite for the code in fixer_util """
# Testing imports
from . import support
# Python imports
import os.path
# Local imports
from lib2to3.pytree import Node, Leaf
from lib2to3 import fixer_util
from lib2to3.fixer_util import Attr, Name, Call, Comma
from lib2to3.pgen2 import token
def parse(code, strip_levels=0):
# The topmost node is file_input, which we don't care about.
# The next-topmost node is a *_stmt node, which we also don't care about
tree = support.parse_string(code)
for i in range(strip_levels):
tree = tree.children[0]
tree.parent = None
return tree
class MacroTestCase(support.TestCase):
def assertStr(self, node, string):
if isinstance(node, (tuple, list)):
node = Node(fixer_util.syms.simple_stmt, node)
self.assertEqual(str(node), string)
class Test_is_tuple(support.TestCase):
def is_tuple(self, string):
return fixer_util.is_tuple(parse(string, strip_levels=2))
def test_valid(self):
self.assertTrue(self.is_tuple("(a, b)"))
self.assertTrue(self.is_tuple("(a, (b, c))"))
self.assertTrue(self.is_tuple("((a, (b, c)),)"))
self.assertTrue(self.is_tuple("(a,)"))
self.assertTrue(self.is_tuple("()"))
def test_invalid(self):
self.assertFalse(self.is_tuple("(a)"))
self.assertFalse(self.is_tuple("('foo') % (b, c)"))
class Test_is_list(support.TestCase):
def is_list(self, string):
return fixer_util.is_list(parse(string, strip_levels=2))
def test_valid(self):
self.assertTrue(self.is_list("[]"))
self.assertTrue(self.is_list("[a]"))
self.assertTrue(self.is_list("[a, b]"))
self.assertTrue(self.is_list("[a, [b, c]]"))
self.assertTrue(self.is_list("[[a, [b, c]],]"))
def test_invalid(self):
self.assertFalse(self.is_list("[]+[]"))
class Test_Attr(MacroTestCase):
def test(self):
call = parse("foo()", strip_levels=2)
self.assertStr(Attr(Name("a"), Name("b")), "a.b")
self.assertStr(Attr(call, Name("b")), "foo().b")
def test_returns(self):
attr = Attr(Name("a"), Name("b"))
self.assertEqual(type(attr), list)
class Test_Name(MacroTestCase):
def test(self):
self.assertStr(Name("a"), "a")
self.assertStr(Name("foo.foo().bar"), "foo.foo().bar")
self.assertStr(Name("a", prefix="b"), "ba")
class Test_Call(MacroTestCase):
def _Call(self, name, args=None, prefix=None):
"""Help the next test"""
children = []
if isinstance(args, list):
for arg in args:
children.append(arg)
children.append(Comma())
children.pop()
return Call(Name(name), children, prefix)
def test(self):
kids = [None,
[Leaf(token.NUMBER, 1), Leaf(token.NUMBER, 2),
Leaf(token.NUMBER, 3)],
[Leaf(token.NUMBER, 1), Leaf(token.NUMBER, 3),
Leaf(token.NUMBER, 2), Leaf(token.NUMBER, 4)],
[Leaf(token.STRING, "b"), Leaf(token.STRING, "j", prefix=" ")]
]
self.assertStr(self._Call("A"), "A()")
self.assertStr(self._Call("b", kids[1]), "b(1,2,3)")
self.assertStr(self._Call("a.b().c", kids[2]), "a.b().c(1,3,2,4)")
self.assertStr(self._Call("d", kids[3], prefix=" "), " d(b, j)")
class Test_does_tree_import(support.TestCase):
def _find_bind_rec(self, name, node):
# Search a tree for a binding -- used to find the starting
# point for these tests.
c = fixer_util.find_binding(name, node)
if c: return c
for child in node.children:
c = self._find_bind_rec(name, child)
if c: return c
def does_tree_import(self, package, name, string):
node = parse(string)
# Find the binding of start -- that's what we'll go from
node = self._find_bind_rec('start', node)
return fixer_util.does_tree_import(package, name, node)
def try_with(self, string):
failing_tests = (("a", "a", "from a import b"),
("a.d", "a", "from a.d import b"),
("d.a", "a", "from d.a import b"),
(None, "a", "import b"),
(None, "a", "import b, c, d"))
for package, name, import_ in failing_tests:
n = self.does_tree_import(package, name, import_ + "\n" + string)
self.assertFalse(n)
n = self.does_tree_import(package, name, string + "\n" + import_)
self.assertFalse(n)
passing_tests = (("a", "a", "from a import a"),
("x", "a", "from x import a"),
("x", "a", "from x import b, c, a, d"),
("x.b", "a", "from x.b import a"),
("x.b", "a", "from x.b import b, c, a, d"),
(None, "a", "import a"),
(None, "a", "import b, c, a, d"))
for package, name, import_ in passing_tests:
n = self.does_tree_import(package, name, import_ + "\n" + string)
self.assertTrue(n)
n = self.does_tree_import(package, name, string + "\n" + import_)
self.assertTrue(n)
def test_in_function(self):
self.try_with("def foo():\n\tbar.baz()\n\tstart=3")
class Test_find_binding(support.TestCase):
def find_binding(self, name, string, package=None):
return fixer_util.find_binding(name, parse(string), package)
def test_simple_assignment(self):
self.assertTrue(self.find_binding("a", "a = b"))
self.assertTrue(self.find_binding("a", "a = [b, c, d]"))
self.assertTrue(self.find_binding("a", "a = foo()"))
self.assertTrue(self.find_binding("a", "a = foo().foo.foo[6][foo]"))
self.assertFalse(self.find_binding("a", "foo = a"))
self.assertFalse(self.find_binding("a", "foo = (a, b, c)"))
def test_tuple_assignment(self):
self.assertTrue(self.find_binding("a", "(a,) = b"))
self.assertTrue(self.find_binding("a", "(a, b, c) = [b, c, d]"))
self.assertTrue(self.find_binding("a", "(c, (d, a), b) = foo()"))
self.assertTrue(self.find_binding("a", "(a, b) = foo().foo[6][foo]"))
self.assertFalse(self.find_binding("a", "(foo, b) = (b, a)"))
self.assertFalse(self.find_binding("a", "(foo, (b, c)) = (a, b, c)"))
def test_list_assignment(self):
self.assertTrue(self.find_binding("a", "[a] = b"))
self.assertTrue(self.find_binding("a", "[a, b, c] = [b, c, d]"))
self.assertTrue(self.find_binding("a", "[c, [d, a], b] = foo()"))
self.assertTrue(self.find_binding("a", "[a, b] = foo().foo[a][foo]"))
self.assertFalse(self.find_binding("a", "[foo, b] = (b, a)"))
self.assertFalse(self.find_binding("a", "[foo, [b, c]] = (a, b, c)"))
def test_invalid_assignments(self):
self.assertFalse(self.find_binding("a", "foo.a = 5"))
self.assertFalse(self.find_binding("a", "foo[a] = 5"))
self.assertFalse(self.find_binding("a", "foo(a) = 5"))
self.assertFalse(self.find_binding("a", "foo(a, b) = 5"))
def test_simple_import(self):
self.assertTrue(self.find_binding("a", "import a"))
self.assertTrue(self.find_binding("a", "import b, c, a, d"))
self.assertFalse(self.find_binding("a", "import b"))
self.assertFalse(self.find_binding("a", "import b, c, d"))
def test_from_import(self):
self.assertTrue(self.find_binding("a", "from x import a"))
self.assertTrue(self.find_binding("a", "from a import a"))
self.assertTrue(self.find_binding("a", "from x import b, c, a, d"))
self.assertTrue(self.find_binding("a", "from x.b import a"))
self.assertTrue(self.find_binding("a", "from x.b import b, c, a, d"))
self.assertFalse(self.find_binding("a", "from a import b"))
self.assertFalse(self.find_binding("a", "from a.d import b"))
self.assertFalse(self.find_binding("a", "from d.a import b"))
def test_import_as(self):
self.assertTrue(self.find_binding("a", "import b as a"))
self.assertTrue(self.find_binding("a", "import b as a, c, a as f, d"))
self.assertFalse(self.find_binding("a", "import a as f"))
self.assertFalse(self.find_binding("a", "import b, c as f, d as e"))
def test_from_import_as(self):
self.assertTrue(self.find_binding("a", "from x import b as a"))
self.assertTrue(self.find_binding("a", "from x import g as a, d as b"))
self.assertTrue(self.find_binding("a", "from x.b import t as a"))
self.assertTrue(self.find_binding("a", "from x.b import g as a, d"))
self.assertFalse(self.find_binding("a", "from a import b as t"))
self.assertFalse(self.find_binding("a", "from a.d import b as t"))
self.assertFalse(self.find_binding("a", "from d.a import b as t"))
def test_simple_import_with_package(self):
self.assertTrue(self.find_binding("b", "import b"))
self.assertTrue(self.find_binding("b", "import b, c, d"))
self.assertFalse(self.find_binding("b", "import b", "b"))
self.assertFalse(self.find_binding("b", "import b, c, d", "c"))
def test_from_import_with_package(self):
self.assertTrue(self.find_binding("a", "from x import a", "x"))
self.assertTrue(self.find_binding("a", "from a import a", "a"))
self.assertTrue(self.find_binding("a", "from x import *", "x"))
self.assertTrue(self.find_binding("a", "from x import b, c, a, d", "x"))
self.assertTrue(self.find_binding("a", "from x.b import a", "x.b"))
self.assertTrue(self.find_binding("a", "from x.b import *", "x.b"))
self.assertTrue(self.find_binding("a", "from x.b import b, c, a, d", "x.b"))
self.assertFalse(self.find_binding("a", "from a import b", "a"))
self.assertFalse(self.find_binding("a", "from a.d import b", "a.d"))
self.assertFalse(self.find_binding("a", "from d.a import b", "a.d"))
self.assertFalse(self.find_binding("a", "from x.y import *", "a.b"))
def test_import_as_with_package(self):
self.assertFalse(self.find_binding("a", "import b.c as a", "b.c"))
self.assertFalse(self.find_binding("a", "import a as f", "f"))
self.assertFalse(self.find_binding("a", "import a as f", "a"))
def test_from_import_as_with_package(self):
# Because it would take a lot of special-case code in the fixers
# to deal with from foo import bar as baz, we'll simply always
# fail if there is an "from ... import ... as ..."
self.assertFalse(self.find_binding("a", "from x import b as a", "x"))
self.assertFalse(self.find_binding("a", "from x import g as a, d as b", "x"))
self.assertFalse(self.find_binding("a", "from x.b import t as a", "x.b"))
self.assertFalse(self.find_binding("a", "from x.b import g as a, d", "x.b"))
self.assertFalse(self.find_binding("a", "from a import b as t", "a"))
self.assertFalse(self.find_binding("a", "from a import b as t", "b"))
self.assertFalse(self.find_binding("a", "from a import b as t", "t"))
def test_function_def(self):
self.assertTrue(self.find_binding("a", "def a(): pass"))
self.assertTrue(self.find_binding("a", "def a(b, c, d): pass"))
self.assertTrue(self.find_binding("a", "def a(): b = 7"))
self.assertFalse(self.find_binding("a", "def d(b, (c, a), e): pass"))
self.assertFalse(self.find_binding("a", "def d(a=7): pass"))
self.assertFalse(self.find_binding("a", "def d(a): pass"))
self.assertFalse(self.find_binding("a", "def d(): a = 7"))
s = """
def d():
def a():
pass"""
self.assertFalse(self.find_binding("a", s))
def test_class_def(self):
self.assertTrue(self.find_binding("a", "class a: pass"))
self.assertTrue(self.find_binding("a", "class a(): pass"))
self.assertTrue(self.find_binding("a", "class a(b): pass"))
self.assertTrue(self.find_binding("a", "class a(b, c=8): pass"))
self.assertFalse(self.find_binding("a", "class d: pass"))
self.assertFalse(self.find_binding("a", "class d(a): pass"))
self.assertFalse(self.find_binding("a", "class d(b, a=7): pass"))
self.assertFalse(self.find_binding("a", "class d(b, *a): pass"))
self.assertFalse(self.find_binding("a", "class d(b, **a): pass"))
self.assertFalse(self.find_binding("a", "class d: a = 7"))
s = """
class d():
class a():
pass"""
self.assertFalse(self.find_binding("a", s))
def test_for(self):
self.assertTrue(self.find_binding("a", "for a in r: pass"))
self.assertTrue(self.find_binding("a", "for a, b in r: pass"))
self.assertTrue(self.find_binding("a", "for (a, b) in r: pass"))
self.assertTrue(self.find_binding("a", "for c, (a,) in r: pass"))
self.assertTrue(self.find_binding("a", "for c, (a, b) in r: pass"))
self.assertTrue(self.find_binding("a", "for c in r: a = c"))
self.assertFalse(self.find_binding("a", "for c in a: pass"))
def test_for_nested(self):
s = """
for b in r:
for a in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for a, c in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for (a, c) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for (a,) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c, (a, d) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c in b:
a = 7"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c in b:
d = a"""
self.assertFalse(self.find_binding("a", s))
s = """
for b in r:
for c in a:
d = 7"""
self.assertFalse(self.find_binding("a", s))
def test_if(self):
self.assertTrue(self.find_binding("a", "if b in r: a = c"))
self.assertFalse(self.find_binding("a", "if a in r: d = e"))
def test_if_nested(self):
s = """
if b in r:
if c in d:
a = c"""
self.assertTrue(self.find_binding("a", s))
s = """
if b in r:
if c in d:
c = a"""
self.assertFalse(self.find_binding("a", s))
def test_while(self):
self.assertTrue(self.find_binding("a", "while b in r: a = c"))
self.assertFalse(self.find_binding("a", "while a in r: d = e"))
def test_while_nested(self):
s = """
while b in r:
while c in d:
a = c"""
self.assertTrue(self.find_binding("a", s))
s = """
while b in r:
while c in d:
c = a"""
self.assertFalse(self.find_binding("a", s))
def test_try_except(self):
s = """
try:
a = 6
except:
b = 8"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except KeyError:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 6"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_nested(self):
s = """
try:
try:
a = 6
except:
pass
except:
b = 8"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
try:
a = 6
except:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
try:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
try:
b = 8
except KeyError:
pass
except:
a = 6
except:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
pass
except:
try:
b = 8
except KeyError:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 6"""
self.assertFalse(self.find_binding("a", s))
s = """
try:
try:
b = 8
except:
c = d
except:
try:
b = 6
except:
t = 8
except:
o = y"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_finally(self):
s = """
try:
c = 6
except:
b = 8
finally:
a = 9"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
b = 6"""
self.assertFalse(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 9
finally:
b = 6"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_finally_nested(self):
s = """
try:
c = 6
except:
b = 8
finally:
try:
a = 9
except:
b = 9
finally:
c = 9"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
try:
pass
finally:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
try:
b = 6
finally:
b = 7"""
self.assertFalse(self.find_binding("a", s))
class Test_touch_import(support.TestCase):
def test_after_docstring(self):
node = parse('"""foo"""\nbar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), '"""foo"""\nimport foo\nbar()\n\n')
def test_after_imports(self):
node = parse('"""foo"""\nimport bar\nbar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), '"""foo"""\nimport bar\nimport foo\nbar()\n\n')
def test_beginning(self):
node = parse('bar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), 'import foo\nbar()\n\n')
def test_from_import(self):
node = parse('bar()')
fixer_util.touch_import("html", "escape", node)
self.assertEqual(str(node), 'from html import escape\nbar()\n\n')
def test_name_import(self):
node = parse('bar()')
fixer_util.touch_import(None, "cgi", node)
self.assertEqual(str(node), 'import cgi\nbar()\n\n')
class Test_find_indentation(support.TestCase):
def test_nothing(self):
fi = fixer_util.find_indentation
node = parse("node()")
self.assertEqual(fi(node), u"")
node = parse("")
self.assertEqual(fi(node), u"")
def test_simple(self):
fi = fixer_util.find_indentation
node = parse("def f():\n x()")
self.assertEqual(fi(node), u"")
self.assertEqual(fi(node.children[0].children[4].children[2]), u" ")
node = parse("def f():\n x()\n y()")
self.assertEqual(fi(node.children[0].children[4].children[4]), u" ")
|
mit
|
calamityman/ansible-modules-extras
|
database/postgresql/postgresql_ext.py
|
26
|
5791
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: postgresql_ext
short_description: Add or remove PostgreSQL extensions from a database.
description:
- Add or remove PostgreSQL extensions from a database.
version_added: "1.9"
options:
name:
description:
- name of the extension to add or remove
required: true
default: null
db:
description:
- name of the database to add or remove the extension to/from
required: true
default: null
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
port:
description:
- Database port to connect to.
required: false
default: 5432
state:
description:
- The database extension state
required: false
default: present
choices: [ "present", "absent" ]
notes:
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
requirements: [ psycopg2 ]
author: "Daniel Schep (@dschep)"
'''
EXAMPLES = '''
# Adds postgis to the database "acme"
- postgresql_ext: name=postgis db=acme
'''
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def ext_exists(cursor, ext):
query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
cursor.execute(query, {'ext': ext})
return cursor.rowcount == 1
def ext_delete(cursor, ext):
if ext_exists(cursor, ext):
query = "DROP EXTENSION \"%s\"" % ext
cursor.execute(query)
return True
else:
return False
def ext_create(cursor, ext):
if not ext_exists(cursor, ext):
query = 'CREATE EXTENSION "%s"' % ext
cursor.execute(query)
return True
else:
return False
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
port=dict(default="5432"),
db=dict(required=True),
ext=dict(required=True, aliases=['name']),
state=dict(default="present", choices=["absent", "present"]),
),
supports_check_mode = True
)
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
db = module.params["db"]
ext = module.params["ext"]
port = module.params["port"]
state = module.params["state"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != '' )
try:
db_connection = psycopg2.connect(database=db, **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2
.extensions
.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
except Exception, e:
module.fail_json(msg="unable to connect to database: %s" % e)
try:
if module.check_mode:
if state == "present":
changed = not ext_exists(cursor, ext)
elif state == "absent":
changed = ext_exists(cursor, ext)
else:
if state == "absent":
changed = ext_delete(cursor, ext)
elif state == "present":
changed = ext_create(cursor, ext)
except NotSupportedError, e:
module.fail_json(msg=str(e))
except Exception, e:
module.fail_json(msg="Database query failed: %s" % e)
module.exit_json(changed=changed, db=db, ext=ext)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
kevinsung/OpenFermion
|
src/openfermion/utils/_testing_utils.py
|
1
|
13228
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions useful for tests."""
import collections
import itertools
import numpy
from scipy.linalg import qr
from openfermion.ops import (DiagonalCoulombHamiltonian,
InteractionOperator,
QuadraticHamiltonian,
QubitOperator)
def random_qubit_operator(n_qubits=16,
max_num_terms=16,
max_many_body_order=16,
seed=None):
prng = numpy.random.RandomState(seed)
op = QubitOperator()
num_terms = prng.randint(1, max_num_terms+1)
for _ in range(num_terms):
many_body_order = prng.randint(max_many_body_order+1)
term = []
for _ in range(many_body_order):
index = prng.randint(n_qubits)
action = prng.choice(('X', 'Y', 'Z'))
term.append((index, action))
coefficient = prng.randn()
op += QubitOperator(term, coefficient)
return op
def haar_random_vector(n, seed=None):
"""Generate an n dimensional Haar randomd vector."""
if seed is not None:
numpy.random.seed(seed)
vector = numpy.random.randn(n).astype(complex)
vector += 1.j * numpy.random.randn(n).astype(complex)
normalization = numpy.sqrt(vector.dot(numpy.conjugate(vector)))
return vector / normalization
def random_antisymmetric_matrix(n, real=False, seed=None):
"""Generate a random n x n antisymmetric matrix."""
if seed is not None:
numpy.random.seed(seed)
if real:
rand_mat = numpy.random.randn(n, n)
else:
rand_mat = numpy.random.randn(n, n) + 1.j * numpy.random.randn(n, n)
antisymmetric_mat = rand_mat - rand_mat.T
return antisymmetric_mat
def random_diagonal_coulomb_hamiltonian(n_qubits, real=False, seed=None):
"""Generate a random instance of DiagonalCoulombHamiltonian.
Args:
n_qubits: The number of qubits
real: Whether to use only real numbers in the one-body term
"""
if seed is not None:
numpy.random.seed(seed)
one_body = random_hermitian_matrix(n_qubits, real=real)
two_body = random_hermitian_matrix(n_qubits, real=True)
constant = numpy.random.randn()
return DiagonalCoulombHamiltonian(one_body, two_body, constant)
def random_hermitian_matrix(n, real=False, seed=None):
"""Generate a random n x n Hermitian matrix."""
if seed is not None:
numpy.random.seed(seed)
if real:
rand_mat = numpy.random.randn(n, n)
else:
rand_mat = numpy.random.randn(n, n) + 1.j * numpy.random.randn(n, n)
hermitian_mat = rand_mat + rand_mat.T.conj()
return hermitian_mat
def random_interaction_operator(
n_orbitals, expand_spin=False, real=True, seed=None):
"""Generate a random instance of InteractionOperator.
Args:
n_orbitals: The number of orbitals.
expand_spin: Whether to expand each orbital symmetrically into two
spin orbitals. Note that if this option is set to True, then
the total number of orbitals will be doubled.
real: Whether to use only real numbers.
seed: A random number generator seed.
"""
if seed is not None:
numpy.random.seed(seed)
if real:
dtype = float
else:
dtype = complex
# The constant has to be real.
constant = numpy.random.randn()
# The one-body tensor is a random Hermitian matrix.
one_body_coefficients = random_hermitian_matrix(n_orbitals, real)
# Generate random two-body coefficients.
two_body_coefficients = numpy.zeros((n_orbitals, n_orbitals,
n_orbitals, n_orbitals), dtype)
for p, q, r, s in itertools.product(range(n_orbitals), repeat=4):
coeff = numpy.random.randn()
if not real and len(set([p,q,r,s])) >= 3:
coeff += 1.j * numpy.random.randn()
# Four point symmetry.
two_body_coefficients[p, q, r, s] = coeff
two_body_coefficients[q, p, s, r] = coeff
two_body_coefficients[s, r, q, p] = coeff.conjugate()
two_body_coefficients[r, s, p, q] = coeff.conjugate()
# Eight point symmetry.
if real:
two_body_coefficients[r, q, p, s] = coeff
two_body_coefficients[p, s, r, q] = coeff
two_body_coefficients[s, p, q, r] = coeff
two_body_coefficients[q, r, s, p] = coeff
# If requested, expand to spin orbitals.
if expand_spin:
n_spin_orbitals = 2 * n_orbitals
# Expand one-body tensor.
one_body_coefficients = numpy.kron(one_body_coefficients, numpy.eye(2))
# Expand two-body tensor.
new_two_body_coefficients = numpy.zeros((
n_spin_orbitals, n_spin_orbitals,
n_spin_orbitals, n_spin_orbitals), dtype=complex)
for p, q, r, s in itertools.product(range(n_orbitals), repeat=4):
coefficient = two_body_coefficients[p, q, r, s]
# Mixed spin.
new_two_body_coefficients[2 * p, 2 * q + 1, 2 * r + 1, 2 * s] = (
coefficient)
new_two_body_coefficients[2 * p + 1, 2 * q, 2 * r, 2 * s + 1] = (
coefficient)
# Same spin.
new_two_body_coefficients[2 * p, 2 * q, 2 * r, 2 * s] = coefficient
new_two_body_coefficients[2 * p + 1, 2 * q + 1,
2 * r + 1, 2 * s + 1] = coefficient
two_body_coefficients = new_two_body_coefficients
# Create the InteractionOperator.
interaction_operator = InteractionOperator(
constant, one_body_coefficients, two_body_coefficients)
return interaction_operator
def random_quadratic_hamiltonian(n_orbitals,
conserves_particle_number=False,
real=False,
expand_spin=False,
seed=None):
"""Generate a random instance of QuadraticHamiltonian.
Args:
n_orbitals(int): the number of orbitals
conserves_particle_number(bool): whether the returned Hamiltonian
should conserve particle number
real(bool): whether to use only real numbers
expand_spin: Whether to expand each orbital symmetrically into two
spin orbitals. Note that if this option is set to True, then
the total number of orbitals will be doubled.
Returns:
QuadraticHamiltonian
"""
if seed is not None:
numpy.random.seed(seed)
constant = numpy.random.randn()
chemical_potential = numpy.random.randn()
hermitian_mat = random_hermitian_matrix(n_orbitals, real)
if conserves_particle_number:
antisymmetric_mat = None
else:
antisymmetric_mat = random_antisymmetric_matrix(n_orbitals, real)
if expand_spin:
hermitian_mat = numpy.kron(hermitian_mat, numpy.eye(2))
if antisymmetric_mat is not None:
antisymmetric_mat = numpy.kron(antisymmetric_mat, numpy.eye(2))
return QuadraticHamiltonian(hermitian_mat, antisymmetric_mat,
constant, chemical_potential)
def random_unitary_matrix(n, real=False, seed=None):
"""Obtain a random n x n unitary matrix."""
if seed is not None:
numpy.random.seed(seed)
if real:
rand_mat = numpy.random.randn(n, n)
else:
rand_mat = numpy.random.randn(n, n) + 1.j * numpy.random.randn(n, n)
Q, _ = qr(rand_mat)
return Q
class EqualsTester(object):
"""Tests equality against user-provided disjoint equivalence groups."""
def __init__(self, test_case):
self.groups = [(_ClassUnknownToSubjects(),)]
self.test_case = test_case
def add_equality_group(self, *group_items):
"""Tries to add a disjoint equivalence group to the equality tester.
This methods asserts that items within the group must all be equal to
each other, but not equal to any items in other groups that have been
or will be added.
Args:
*group_items: The items making up the equivalence group.
Raises:
AssertError: Items within the group are not equal to each other, or
items in another group are equal to items within the new group,
or the items violate the equals-implies-same-hash rule.
"""
self.test_case.assertIsNotNone(group_items)
# Check that group items are equivalent to each other.
for v1, v2 in itertools.product(group_items, repeat=2):
# Binary operators should always work.
self.test_case.assertTrue(v1 == v2)
self.test_case.assertTrue(not v1 != v2)
# __eq__ and __ne__ should both be correct or not implemented.
self.test_case.assertTrue(
hasattr(v1, '__eq__') == hasattr(v1, '__ne__'))
# Careful: python2 int doesn't have __eq__ or __ne__.
if hasattr(v1, '__eq__'):
eq = v1.__eq__(v2)
ne = v1.__ne__(v2)
self.test_case.assertIn(
(eq, ne),
[(True, False),
(NotImplemented, False),
(NotImplemented, NotImplemented)])
# Check that this group's items don't overlap with other groups.
for other_group in self.groups:
for v1, v2 in itertools.product(group_items, other_group):
# Binary operators should always work.
self.test_case.assertTrue(not v1 == v2)
self.test_case.assertTrue(v1 != v2)
# __eq__ and __ne__ should both be correct or not implemented.
self.test_case.assertTrue(
hasattr(v1, '__eq__') == hasattr(v1, '__ne__'))
# Careful: python2 int doesn't have __eq__ or __ne__.
if hasattr(v1, '__eq__'):
eq = v1.__eq__(v2)
ne = v1.__ne__(v2)
self.test_case.assertIn(
(eq, ne),
[(False, True),
(NotImplemented, True),
(NotImplemented, NotImplemented)])
# Check that group items hash to the same thing, or are all unhashable.
hashes = [hash(v) if isinstance(v, collections.Hashable) else None
for v in group_items]
if len(set(hashes)) > 1:
examples = ((v1, h1, v2, h2)
for v1, h1 in zip(group_items, hashes)
for v2, h2 in zip(group_items, hashes)
if h1 != h2)
example = next(examples)
raise AssertionError(
'Items in the same group produced different hashes. '
'Example: hash({}) is {} but hash({}) is {}.'.format(*example))
# Remember this group, to enable disjoint checks vs later groups.
self.groups.append(group_items)
def make_equality_pair(self, factory):
"""Tries to add a disjoint (item, item) group to the equality tester.
Uses the factory method to produce two different objects containing
equal items. Asserts that the two object are equal, but not equal to
any items in other groups that have been or will be added. Adds the
pair as a group.
Args:
factory (Callable[[], Any]): A method for producing independent
copies of an item.
Raises:
AssertError: The factory produces items not equal to each other, or
items in another group are equal to items from the factory, or
the items violate the equal-implies-same-hash rule.
"""
self.add_equality_group(factory(), factory())
class _ClassUnknownToSubjects(object):
"""Equality methods should be able to deal with the unexpected."""
def __eq__(self, other):
return isinstance(other, _ClassUnknownToSubjects)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(_ClassUnknownToSubjects)
def module_importable(module):
"""Without importing it, returns whether python module is importable.
Args:
module (string): Name of module.
Returns:
bool
"""
import sys
if sys.version_info >= (3, 4):
from importlib import util
plug_spec = util.find_spec(module)
else:
import pkgutil
plug_spec = pkgutil.find_loader(module)
if plug_spec is None:
return False
else:
return True
|
apache-2.0
|
DragonQuiz/MCEdit-Unified
|
albow/extended_widgets.py
|
2
|
10322
|
# -*- coding: UTF-8 -*-
# extended_widgets.py
# Moved albow related stuff from mceutils.
import resource_packs
from controls import ValueDisplay
from dialogs import alert, ask, Dialog
from controls import Button, Label, ValueButton, CheckBox, AttrRef
from widget import Widget
import root
from layout import Column, Row
from translate import _
from menu import Menu
from fields import FloatField, IntField, TextFieldWrapped, TextField
from datetime import timedelta, datetime
class HotkeyColumn(Widget):
is_gl_container = True
#-# Translation live update preparation
def __init__(self, items, keysColumn=None, buttonsColumn=None, item_spacing=None):
self.items = items
self.item_spacing = item_spacing
self.keysColumn = keysColumn
self.buttonsColumn = buttonsColumn
Widget.__init__(self)
self.buildWidgets()
def set_update_translation(self, v):
if v:
self.buildWidgets()
def buildWidgets(self):
keysColumn = self.keysColumn
buttonsColumn = self.buttonsColumn
items = self.items
item_spacing = self.item_spacing
if keysColumn is None or True:
keysColumn = []
if buttonsColumn is None or True:
buttonsColumn = []
labels = []
for w in self.subwidgets:
for _w in w.subwidgets:
w.remove(_w)
self.remove(w)
for t in items:
if len(t) == 3:
(hotkey, title, action) = t
tooltipText = None
else:
(hotkey, title, action, tooltipText) = t
if isinstance(title, (str, unicode)):
button = Button(title, action=action)
else:
button = ValueButton(ref=title, action=action, width=200)
button.anchor = self.anchor
label = Label(hotkey, width=100, margin=button.margin)
label.anchor = "wh"
label.height = button.height
labels.append(label)
if tooltipText:
button.tooltipText = tooltipText
keysColumn.append(label)
buttonsColumn.append(button)
self.buttons = list(buttonsColumn)
#.#
if item_spacing == None:
buttonsColumn = Column(buttonsColumn)
else:
buttonsColumn = Column(buttonsColumn, spacing=item_spacing)
#.#
buttonsColumn.anchor = self.anchor
#.#
if item_spacing == None:
keysColumn = Column(keysColumn)
else:
keysColumn = Column(keysColumn, spacing=item_spacing)
commandRow = Row((keysColumn, buttonsColumn))
self.labels = labels
self.add(commandRow)
self.shrink_wrap()
self.invalidate()
#-#
class MenuButton(Button):
def __init__(self, title, choices, **kw):
Button.__init__(self, title, **kw)
self.choices = choices
self.menu = Menu(title, ((c, c) for c in choices))
def action(self):
index = self.menu.present(self, (0, 0))
if index == -1:
return
self.menu_picked(index)
def menu_picked(self, index):
pass
class ChoiceButton(ValueButton):
align = "c"
choose = None
def __init__(self, choices, scrolling=True, scroll_items=30, **kw):
# passing an empty list of choices is ill-advised
if 'choose' in kw:
self.choose = kw.pop('choose')
#-# Translation live update preparation
self.scrolling = scrolling
self.scroll_items = scroll_items
self.choices = choices or ["[UNDEFINED]"]
ValueButton.__init__(self, action=self.showMenu, **kw)
self.calc_width()
#-#
self.choiceIndex = 0
#-# Translation live update preparation
def set_update_translation(self, v):
ValueButton.set_update_translation(self, v)
self.menu.set_update_translation(v)
def calc_width(self):
widths = [self.font.size(_(c))[0] for c in self.choices] + [self.width]
if len(widths):
self.width = max(widths) + self.margin * 2
def calc_size(self):
ValueButton.calc_size(self)
self.calc_width()
#-#
def showMenu(self):
choiceIndex = self.menu.present(self, (0, 0))
if choiceIndex != -1:
self.choiceIndex = choiceIndex
if self.choose:
self.choose()
def get_value(self):
return self.selectedChoice
@property
def selectedChoice(self):
if self.choiceIndex >= len(self.choices) or self.choiceIndex < 0:
return ""
return self.choices[self.choiceIndex]
@selectedChoice.setter
def selectedChoice(self, val):
idx = self.choices.index(val)
if idx != -1:
self.choiceIndex = idx
@property
def choices(self):
return self._choices
@choices.setter
def choices(self, ch):
self._choices = ch
self.menu = Menu("", [(name, "pickMenu") for name in self._choices],
self.scrolling, self.scroll_items)
def CheckBoxLabel(title, *args, **kw):
tooltipText = kw.pop('tooltipText', None)
cb = CheckBox(*args, **kw)
lab = Label(title, fg_color=cb.fg_color)
lab.mouse_down = cb.mouse_down
if tooltipText:
cb.tooltipText = tooltipText
lab.tooltipText = tooltipText
class CBRow(Row):
margin = 0
@property
def value(self):
return self.checkbox.value
@value.setter
def value(self, val):
self.checkbox.value = val
row = CBRow((lab, cb))
row.checkbox = cb
return row
def FloatInputRow(title, *args, **kw):
return Row((Label(title, tooltipText=kw.get('tooltipText')), FloatField(*args, **kw)))
def IntInputRow(title, *args, **kw):
return Row((Label(title, tooltipText=kw.get('tooltipText')), IntField(*args, **kw)))
def TextInputRow(title, *args, **kw):
return Row((Label(title, tooltipText=kw.get('tooltipText')), TextFieldWrapped(*args, **kw)))
def BasicTextInputRow(title, *args, **kw):
return Row((Label(title, tooltipText=kw.get('tooltipText')), TextField(*args, **kw)))
def showProgress(progressText, progressIterator, cancel=False):
"""Show the progress for a long-running synchronous operation.
progressIterator should be a generator-like object that can return
either None, for an indeterminate indicator,
A float value between 0.0 and 1.0 for a determinate indicator,
A string, to update the progress info label
or a tuple of (float value, string) to set the progress and update the label"""
class ProgressWidget(Dialog):
progressFraction = 0.0
firstDraw = False
root = None
def draw(self, surface):
if self.root is None:
self.root = self.get_root()
Widget.draw(self, surface)
frameStart = datetime.now()
frameInterval = timedelta(0, 1, 0) / 2
amount = None
try:
while datetime.now() < frameStart + frameInterval:
amount = progressIterator.next()
if self.firstDraw is False:
self.firstDraw = True
break
except StopIteration:
self.dismiss()
infoText = ""
if amount is not None:
if isinstance(amount, tuple):
if len(amount) > 2:
infoText = ": " + amount[2]
amount, max = amount[:2]
else:
max = amount
maxwidth = (self.width - self.margin * 2)
if amount is None:
self.progressBar.width = maxwidth
self.progressBar.bg_color = (255, 255, 25, 255)
elif isinstance(amount, basestring):
self.statusText = amount
else:
self.progressAmount = amount
if isinstance(amount, (int, float)):
self.progressFraction = float(amount) / (float(max) or 1)
self.progressBar.width = maxwidth * self.progressFraction
self.statusText = str("{0} / {1}".format(amount, max))
else:
self.statusText = str(amount)
if infoText:
self.statusText += infoText
@property
def estimateText(self):
delta = (datetime.now() - self.startTime)
progressPercent = (int(self.progressFraction * 10000))
left = delta * (10000 - progressPercent) / (progressPercent or 1)
return _("Time left: {0}").format(left)
def cancel(self):
if cancel:
self.dismiss(False)
def idleevent(self, evt):
self.invalidate()
def key_down(self, event):
pass
def key_up(self, event):
pass
def mouse_up(self, event):
try:
if "SelectionTool" in str(self.root.editor.currentTool):
if self.root.get_nudge_block().count > 0:
self.root.get_nudge_block().mouse_up(event)
except:
pass
widget = ProgressWidget()
widget.progressText = _(progressText)
widget.statusText = ""
widget.progressAmount = 0.0
progressLabel = ValueDisplay(ref=AttrRef(widget, 'progressText'), width=550)
statusLabel = ValueDisplay(ref=AttrRef(widget, 'statusText'), width=550)
estimateLabel = ValueDisplay(ref=AttrRef(widget, 'estimateText'), width=550)
progressBar = Widget(size=(550, 20), bg_color=(150, 150, 150, 255))
widget.progressBar = progressBar
col = (progressLabel, statusLabel, estimateLabel, progressBar)
if cancel:
cancelButton = Button("Cancel", action=widget.cancel, fg_color=(255, 0, 0, 255))
col += (Column((cancelButton,), align="r"),)
widget.add(Column(col))
widget.shrink_wrap()
widget.startTime = datetime.now()
if widget.present():
return widget.progressAmount
else:
return "Canceled"
|
isc
|
kdart/pycopia
|
core/pycopia/stringmatch.py
|
1
|
4548
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Match plain strings like they were re module objects.
The StringExpression object implements a subset of re compile expressions.
This allows for a more consistent interface for the match types. Since
string.find is about 10 times faster than an RE search with a plain string,
this should speed up matches in that case by about that much, while
keeping a consistent interface.
"""
from __future__ import print_function
from __future__ import division
class StringMatchObject(object):
def __init__(self, start, end, string, pos, endpos, re):
self._start = start
self._end = end
self.string = string
self.pos = pos
self.endpos = endpos
self.lastgroup = None
self.lastindex = None
self.re = re # not really an RE.
def __repr__(self):
return "{0}(start={1!r}, end={2!r}, string={3!r}, pos={4!r}, endpos={5!r}, re={6!r})".format(self.__class__.__name__,
self._start, self._end, self.string, self.pos, self.endpos, self.re)
def expand(self, template):
raise NotImplementedError
def group(self, *args):
if args and args[0] == 0:
return self.string[self._start:self._end]
else:
raise IndexError("no such group")
def groups(self, default=None):
return ()
def groupdict(self, default=None):
return {}
def start(self, group=0):
if group == 0:
return self._start
else:
raise IndexError("no such group")
def end(self, group=0):
if group == 0:
return self._end
else:
raise IndexError("no such group")
def span(self, group=0):
if group == 0:
return self._start, self._end
else:
return -1, -1
def __nonzero__(self):
return 1
# an object that looks like a compiled regular expression, but does exact
# string matching. should be much faster in that case.
class StringExpression(object):
def __init__(self, patt, flags=0):
self.pattern = patt
# bogus attributes to simulate compiled REs from re module.
self.flags = flags
self.groupindex = {}
def __repr__(self):
return "{0}(patt={1!r}, flags={2!r})".format(self.__class__.__name__,
self.pattern, self.flags)
def search(self, text, pos=0, endpos=2147483647):
n = text.find(self.pattern, pos, endpos)
if n >= 0:
return StringMatchObject(n, n+len(self.pattern), text, pos, endpos, self)
else:
return None
match = search # match is same as search for strings
def split(self, text, maxsplit=0):
return text.split(self.pattern, maxsplit)
def findall(self, string, pos=0, endpos=2147483647):
rv = []
i = 0
while i >= 0:
i = string.find(self.pattern, i)
if i >= 0:
rv.append(self.pattern)
return rv
def finditer(self, string, pos=0, endpos=2147483647):
while 1:
mo = self.search(string, pos, endpos)
if mo:
yield mo
else:
return
def sub(self, repl, string, count=2147483647):
return string.replace(self.pattern, repl, count)
def subn(repl, string, count=2147483647):
i = 0
N = 0
while i >= 0:
i = string.find(self.pattern, i)
if i >= 0:
N += 1
return string.replace(self.pattern, repl, count), N
# factory function to "compile" EXACT patterns (which are strings)
def compile_exact(string, flags=0):
return StringExpression(string, flags)
def _test(argv):
cs = compile_exact("me")
mo = cs.search("matchme")
assert mo is not None
print(mo.span())
assert mo.span() == (5,7)
if __name__ == "__main__":
import sys
_test(sys.argv)
|
apache-2.0
|
maas/python-libmaas
|
maas/client/utils/multipart.py
|
3
|
6112
|
# Copyright 2016-2017 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoding of MIME multipart data."""
__all__ = ["encode_multipart_data"]
from collections import Iterable, Mapping
from email.generator import BytesGenerator
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from io import BytesIO, IOBase
from itertools import chain
import mimetypes
def get_content_type(*names):
"""Return the MIME content type for the file with the given name."""
for name in names:
if name is not None:
mimetype, encoding = mimetypes.guess_type(name)
if mimetype is not None:
if isinstance(mimetype, bytes):
return mimetype.decode("ascii")
else:
return mimetype
else:
return "application/octet-stream"
def make_bytes_payload(name, content):
payload = MIMEApplication(content)
payload.add_header("Content-Disposition", "form-data", name=name)
return payload
def make_string_payload(name, content):
payload = MIMEApplication(content.encode("utf-8"), charset="utf-8")
payload.add_header("Content-Disposition", "form-data", name=name)
payload.set_type("text/plain")
return payload
def make_file_payload(name, content):
payload = MIMEApplication(content.read())
payload.add_header("Content-Disposition", "form-data", name=name, filename=name)
names = name, getattr(content, "name", None)
payload.set_type(get_content_type(*names))
return payload
def make_payloads(name, content):
"""Constructs payload(s) for the given `name` and `content`.
If `content` is a byte string, this calls `make_bytes_payload` to
construct the payload, which this then yields.
If `content` is a unicode string, this calls `make_string_payload`.
If `content` is file-like -- it inherits from `IOBase` or `file` --
this calls `make_file_payload`.
If `content` is iterable, this calls `make_payloads` for each item,
with the same name, and then re-yields each payload generated.
If `content` is callable, this calls it with no arguments, and then
uses the result as a context manager. This can be useful if the
callable returns an open file, for example, because the context
protocol means it will be closed after use.
This raises `AssertionError` if it encounters anything else.
"""
if content is None:
yield make_bytes_payload(name, b"")
elif isinstance(content, bool):
if content:
yield make_bytes_payload(name, b"true")
else:
yield make_bytes_payload(name, b"false")
elif isinstance(content, int):
yield make_bytes_payload(name, b"%d" % content)
elif isinstance(content, bytes):
yield make_bytes_payload(name, content)
elif isinstance(content, str):
yield make_string_payload(name, content)
elif isinstance(content, IOBase):
yield make_file_payload(name, content)
elif callable(content):
with content() as content:
for payload in make_payloads(name, content):
yield payload
elif isinstance(content, Iterable):
for part in content:
for payload in make_payloads(name, part):
yield payload
else:
raise AssertionError("%r is unrecognised: %r" % (name, content))
def build_multipart_message(data):
message = MIMEMultipart("form-data")
for name, content in data:
for payload in make_payloads(name, content):
message.attach(payload)
return message
def encode_multipart_message(message):
# The message must be multipart.
assert message.is_multipart()
# The body length cannot yet be known.
assert "Content-Length" not in message
# So line-endings can be fixed-up later on, component payloads must have
# no Content-Length and their Content-Transfer-Encoding must be base64
# (and not quoted-printable, which Django doesn't appear to understand).
for part in message.get_payload():
assert "Content-Length" not in part
assert part["Content-Transfer-Encoding"] == "base64"
# Flatten the message without headers.
buf = BytesIO()
generator = BytesGenerator(buf, False) # Don't mangle "^From".
generator._write_headers = lambda self: None # Ignore.
generator.flatten(message)
# Ensure the body has CRLF-delimited lines. See
# http://bugs.python.org/issue1349106.
body = b"\r\n".join(buf.getvalue().splitlines())
# Only now is it safe to set the content length.
message.add_header("Content-Length", "%d" % len(body))
return message.items(), body
def encode_multipart_data(data=(), files=()):
"""Create a MIME multipart payload from L{data} and L{files}.
**Note** that this function is deprecated. Use `build_multipart_message`
and `encode_multipart_message` instead.
@param data: A mapping of names (ASCII strings) to data (byte string).
@param files: A mapping of names (ASCII strings) to file objects ready to
be read.
@return: A 2-tuple of C{(body, headers)}, where C{body} is a a byte string
and C{headers} is a dict of headers to add to the enclosing request in
which this payload will travel.
"""
if isinstance(data, Mapping):
data = data.items()
if isinstance(files, Mapping):
files = files.items()
message = build_multipart_message(chain(data, files))
headers, body = encode_multipart_message(message)
return body, dict(headers)
|
agpl-3.0
|
transition-robotics/paparazzi
|
sw/tools/tcp_aircraft_server/tcp_aircraft_server.py
|
46
|
2721
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#Copyright 2014, Antoine Drouin
from __future__ import print_function
import logging, base64, socket
from gi.repository import GLib, GObject
import ivy.ivy as ivy
ivylogger = logging.getLogger('Ivy')
ivylogger.setLevel(logging.CRITICAL)
import phoenix.messages
import phoenix.pprz_transport
from os import path, getenv
# if PAPARAZZI_HOME not set, then assume the tree containing this
# file is a reasonable substitute
home_dir = getenv("PAPARAZZI_HOME", path.normpath(path.join(
path.dirname(path.abspath(__file__)), '../../../')))
default_ivybus = '127.255.255.255:2010'
class Server(ivy.IvyServer):
def __init__(self, bus, tcp_port=4242):
ivy.IvyServer.__init__(self, 'TCP_aircraft_server', usesDaemons=True)
self.nb_msgs = 0
self.nb_bytes = 0
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
cs.bind(('', tcp_port))
cs.listen(1)
GObject.io_add_watch(cs, GObject.IO_IN, self.handle_conn)
print("server listening on {:d}".format(tcp_port))
self.transp = phoenix.pprz_transport.Transport(check_crc=False, debug=False)
self.protocol = phoenix.messages.Protocol(path=path.join(home_dir, "conf/messages.xml"), debug=True)
self.start(bus)
GObject.timeout_add(500, self.periodic, priority=GObject.PRIORITY_HIGH)
def handle_conn(self, sock, cond):
conn, addr = sock.accept()
print("Connection from {}".format(addr))
GObject.io_add_watch(conn, GObject.IO_IN, self.handle_data)
return True
def handle_data(self, sock, cond):
buf = sock.recv(4096)
if not len(buf):
print("Connection closed.")
return False
else:
#print phoenix.hex_of_bin(buf)
msgs = self.transp.parse_many(buf)
for hdr, payload in msgs:
msg = self.protocol.get_message_by_id("telemetry", hdr.msgid)
try:
ivy_str = '{} {} {}'.format(hdr.acid, msg.name, ' '.join([str(v) for v in msg.unpack_values(payload)]))
#print ivy_str
self.send_msg(ivy_str)
self.nb_msgs += 1
self.nb_bytes += len(payload)
except:
print('FAILED', msg.name)
return True
def periodic(self):
print('msgs {} ({} bytes)'.format(self.nb_msgs, self.nb_bytes))
return True
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
server = Server(default_ivybus)
GLib.MainLoop().run()
|
gpl-2.0
|
jishnu7/silpa
|
src/silpa/common/urlrelay.py
|
3
|
12299
|
# Copyright (c) 2006-2008 L. C. Rees
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''RESTful WSGI URL dispatcher.'''
import re
import time
import copy
import sys
import random
import threading
from fnmatch import translate
__author__ = 'L.C. Rees (lcrees@gmail.com)'
__revision__ = '0.7'
__all__ = ['URLRelay', 'url', 'register']
def synchronized(func):
'''Decorator to lock and unlock a method (Phillip J. Eby).
@param func Method to decorate
'''
def wrapper(self, *__args, **__kw):
self._lock.acquire()
try:
return func(self, *__args, **__kw)
finally:
self._lock.release()
wrapper.__name__ = func.__name__
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
return wrapper
def _handler(environ, start_response):
start_response('404 Not Found', [('content-type', 'text/plain')])
return ['Requested URL was not found on this server.']
def pattern_compile(pattern, pattern_type):
'''Compile pattern.'''
# Convert glob expression to regex
if pattern_type == 'glob': pattern = translate(pattern)
return re.compile(pattern)
class _Registry(object):
'''Maintains order of URL preference while updating the central URL path
registry.'''
_register = list()
def __iter__(self):
'''Iterator for registry.'''
return iter(self._register)
def add(self, pattern, mapping):
'''Add tuple to registry.
@param pattern URL pattern
@param mapping WSGI application mapping
'''
self._register.append((pattern, mapping))
def get(self):
'''Returns current registry.'''
return tuple(self._register)
# URL registry
_reg = _Registry()
def register(pattern, application, method=None):
'''Registers a pattern, application, and optional HTTP method.
@param pattern URL pattern
@param application WSGI application
@param method HTTP method (default: None)
'''
if method is None:
_reg.add(pattern, application)
# Handle URL/method combinations
else:
# Update any existing registry entry
for entry in _reg:
if entry[0] == pattern:
entry[1][method] = application
return None
# Add new registry entry
_reg.add(pattern, {method:application})
def url(pattern, method=None):
'''Decorator for registering a path pattern /application pair.
@param pattern Regex pattern for path
@param method HTTP method (default: None)
'''
def decorator(application):
register(pattern, application, method)
return application
return decorator
class lazy(object):
'''Lazily assign attributes on an instance upon first use.'''
def __init__(self, method):
self.method = method
self.name = method.__name__
def __get__(self, instance, cls):
if instance is None: return self
value = self.method(instance)
setattr(instance, self.name, value)
return value
class MemoryCache(object):
'''Base Cache class.'''
def __init__(self, **kw):
# Set timeout
timeout = kw.get('timeout', 300)
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.timeout = timeout
# Get random seed
random.seed()
self._cache = dict()
# Set max entries
max_entries = kw.get('max_entries', 300)
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
# Set maximum number of items to cull if over max
self._maxcull = kw.get('maxcull', 10)
self._lock = threading.Condition()
def __getitem__(self, key):
'''Fetch a given key from the cache.'''
return self.get(key)
def __setitem__(self, key, value):
'''Set a value in the cache. '''
self.set(key, value)
def __delitem__(self, key):
'''Delete a key from the cache.'''
self.delete(key)
def __contains__(self, key):
'''Tell if a given key is in the cache.'''
return self.get(key) is not None
def get(self, key, default=None):
'''Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
@param key Keyword of item in cache.
@param default Default value (default: None)
'''
values = self._cache.get(key)
if values is None:
value = default
elif values[0] < time.time():
self.delete(key)
value = default
else:
value = values[1]
return copy.deepcopy(value)
def set(self, key, value):
'''Set a value in the cache.
@param key Keyword of item in cache.
@param value Value to be inserted in cache.
'''
# Cull timed out values if over max # of entries
if len(self._cache) >= self._max_entries: self._cull()
# Set value and timeout in cache
self._cache[key] = (time.time() + self.timeout, value)
def delete(self, key):
'''Delete a key from the cache, failing silently.
@param key Keyword of item in cache.
'''
try:
del self._cache[key]
except KeyError:
pass
def keys(self):
'''Returns a list of keys in the cache.'''
return self._cache.keys()
def _cull(self):
'''Remove items in cache to make room.'''
num, maxcull = 0, self._maxcull
# Cull number of items allowed (set by self._maxcull)
for key in self.keys():
# Remove only maximum # of items allowed by maxcull
if num <= maxcull:
# Remove items if expired
if self.get(key) is None: num += 1
else: break
# Remove any additional items up to max # of items allowed by maxcull
while len(self.keys()) >= self._max_entries and num <= maxcull:
# Cull remainder of allowed quota at random
self.delete(random.choice(self.keys()))
num += 1
class URLRelay(object):
'''Passes HTTP requests to a WSGI callable based on URL path component and
HTTP request method.
'''
def __init__(self, **kw):
# Values can be 'regex' or 'glob'
pattern_type = kw.get('pattern_type', 'regex')
# Add any iterable of pairs consisting of a path pattern and either a
# callback name or a dictionary of HTTP method/callback names
self._paths = tuple(
(pattern_compile(u, pattern_type), v)
for u, v in kw.get('paths', _reg.get())
)
# Shortcut for full module search path
self._modpath = kw.get('modpath', '')
# 404 handler
self._response = kw.get('handler', _handler)
# Default function
self._default = kw.get('default')
# Set maximum number of items to cull from cache if over max
self._maxcull = kw.get('maxcull', 10)
# Set cache max entries
self._max_entries = kw.get('max_entries', 300)
# Set cache time out
self._timeout = kw.get('timeout', 300)
def __call__(self, env, start_response):
try:
# Fetch app and any positional or keyword arguments in path
app, arg, kw = self.resolve(env['PATH_INFO'], env['REQUEST_METHOD'])
# Place args in environ dictionary
env['wsgiorg.routing_args'] = (arg, kw)
except (ImportError, AttributeError):
# Return 404 handler for any exceptions
return self._response(env, start_response)
return app(env, start_response)
@lazy
def _cache(self):
'''URL <-> callable mapping Cache.'''
return MemoryCache(
timeout=self._timeout,
maxcull=self._maxcull,
max_entries=self._max_entries,
)
def _getapp(self, app):
'''Loads a callable based on its name
@param app An WSGI application'''
if isinstance(app, basestring):
try:
# Add shortcut to module if present
dot = app.rindex('.')
# Import module
return getattr(__import__(app[:dot], '', '', ['']), app[dot+1:])
# If nothing but module name, import the module
except ValueError:
return __import__(app, '', '', [''])
return app
def _loadapp(self, app):
'''Loads an application based on its name.
@param app Web application name'''
# Add module shortcut to module string
if self._modpath != '' and isinstance(app, basestring):
app = '.'.join([self._modpath, app])
newapp = self._getapp(app)
return newapp
def resolve(self, path, method):
'''Fetches a WSGI app based on URL path component and method.
@param path URL path component
@param method HTTP method
'''
key = ':'.join([path, method])
# Try fetching app from cache
app = self._cache.get(key)
if app is not None: return app
# Loop through path patterns -> applications
for pattern, app in self._paths:
# Test path for match
search = pattern.search(path)
# Continue with next iteration if no match
if not search: continue
# Get any app specifically linked to an HTTP method
if isinstance(app, dict):
app = app.get(method)
if app is None: continue
app = self._loadapp(app)
# Ensure we have a callable
assert hasattr(app, '__call__')
# Extract any keyword arguments in the path
kw = search.groupdict()
# Extract any positional arguments
args = tuple(i for i in search.groups() if i not in kw)
# Cache app, positional and keyword arguments
self._cache[key] = (app, args, kw)
return app, args, kw
# Return default app if no matching path and default app is set
if self._default is not None:
default = self._loadapp(self._default)
return default, (), {}
raise ImportError()
|
agpl-3.0
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/bin/2.78/scripts/addons/add_curve_sapling/presets/callistemon.py
|
2
|
2255
|
{'handleType': '0', 'rotate': (99.5, 137.5, 137.5, 137.5), 'baseSize_s': 0.1600000560283661, 'af2': 1.0, 'pruneRatio': 0.75, 'radiusTweak': (1.0, 1.0, 1.0, 1.0), 'pruneWidthPeak': 0.5, 'boneStep': (1, 1, 1, 1), 'nrings': 0, 'leafScale': 0.4000000059604645, 'makeMesh': False, 'baseSize': 0.30000001192092896, 'lengthV': (0.0, 0.10000000149011612, 0.0, 0.0), 'shapeS': '10', 'pruneBase': 0.11999999731779099, 'af3': 4.0, 'loopFrames': 0, 'horzLeaves': True, 'curveRes': (8, 5, 3, 1), 'minRadius': 0.001500000013038516, 'leafDist': '6', 'rotateV': (15.0, 0.0, 0.0, 0.0), 'bevel': True, 'curveBack': (0.0, 0.0, 0.0, 0.0), 'leafScaleV': 0.15000000596046448, 'prunePowerHigh': 0.5, 'rootFlare': 1.0, 'prune': False, 'branches': (0, 55, 10, 1), 'taperCrown': 0.5, 'useArm': False, 'splitBias': 0.5499999523162842, 'segSplits': (0.10000000149011612, 0.5, 0.20000000298023224, 0.0), 'resU': 4, 'useParentAngle': True, 'ratio': 0.014999999664723873, 'taper': (1.0, 1.0, 1.0, 1.0), 'length': (0.800000011920929, 0.6000000238418579, 0.5, 0.10000000149011612), 'scale0': 1.0, 'scaleV': 2.0, 'leafRotate': 137.5, 'shape': '7', 'scaleV0': 0.10000000149011612, 'leaves': 150, 'scale': 5.0, 'leafShape': 'hex', 'prunePowerLow': 0.0010000000474974513, 'splitAngle': (18.0, 18.0, 22.0, 0.0), 'seed': 0, 'showLeaves': True, 'downAngle': (0.0, 26.209999084472656, 52.55999755859375, 30.0), 'leafDownAngle': 30.0, 'autoTaper': True, 'rMode': 'rotate', 'leafScaleX': 0.20000000298023224, 'leafScaleT': 0.10000000149011612, 'gust': 1.0, 'armAnim': False, 'wind': 1.0, 'leafRotateV': 15.0, 'baseSplits': 3, 'attractOut': (0.0, 0.800000011920929, 0.0, 0.0), 'armLevels': 2, 'leafAnim': False, 'ratioPower': 1.2000000476837158, 'splitHeight': 0.20000000298023224, 'splitByLen': True, 'af1': 1.0, 'branchDist': 1.5, 'closeTip': False, 'previewArm': False, 'attractUp': (3.5, -1.899843692779541, 0.0, 0.0), 'bevelRes': 1, 'pruneWidth': 0.3400000035762787, 'gustF': 0.07500000298023224, 'leafangle': -12.0, 'curveV': (20.0, 50.0, 75.0, 0.0), 'useOldDownAngle': True, 'leafDownAngleV': -10.0, 'frameRate': 1.0, 'splitAngleV': (5.0, 5.0, 5.0, 0.0), 'levels': 2, 'downAngleV': (0.0, 10.0, 10.0, 10.0), 'customShape': (0.5, 1.0, 0.30000001192092896, 0.5), 'curve': (0.0, -15.0, 0.0, 0.0)}
|
gpl-3.0
|
openvapour/ryu
|
ryu/contrib/ovs/db/schema.py
|
50
|
10545
|
# Copyright (c) 2009, 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from ovs.db import error
import ovs.db.parser
from ovs.db import types
def _check_id(name, json):
if name.startswith('_'):
raise error.Error('names beginning with "_" are reserved', json)
elif not ovs.db.parser.is_identifier(name):
raise error.Error("name must be a valid id", json)
class DbSchema(object):
"""Schema for an OVSDB database."""
def __init__(self, name, version, tables):
self.name = name
self.version = version
self.tables = tables
# "isRoot" was not part of the original schema definition. Before it
# was added, there was no support for garbage collection. So, for
# backward compatibility, if the root set is empty then assume that
# every table is in the root set.
if self.__root_set_size() == 0:
for table in self.tables.itervalues():
table.is_root = True
# Find the "ref_table"s referenced by "ref_table_name"s.
#
# Also force certain columns to be persistent, as explained in
# __check_ref_table(). This requires 'is_root' to be known, so this
# must follow the loop updating 'is_root' above.
for table in self.tables.itervalues():
for column in table.columns.itervalues():
self.__follow_ref_table(column, column.type.key, "key")
self.__follow_ref_table(column, column.type.value, "value")
def __root_set_size(self):
"""Returns the number of tables in the schema's root set."""
n_root = 0
for table in self.tables.itervalues():
if table.is_root:
n_root += 1
return n_root
@staticmethod
def from_json(json):
parser = ovs.db.parser.Parser(json, "database schema")
name = parser.get("name", ['id'])
version = parser.get_optional("version", [str, unicode])
parser.get_optional("cksum", [str, unicode])
tablesJson = parser.get("tables", [dict])
parser.finish()
if (version is not None and
not re.match('[0-9]+\.[0-9]+\.[0-9]+$', version)):
raise error.Error('schema version "%s" not in format x.y.z'
% version)
tables = {}
for tableName, tableJson in tablesJson.iteritems():
_check_id(tableName, json)
tables[tableName] = TableSchema.from_json(tableJson, tableName)
return DbSchema(name, version, tables)
def to_json(self):
# "isRoot" was not part of the original schema definition. Before it
# was added, there was no support for garbage collection. So, for
# backward compatibility, if every table is in the root set then do not
# output "isRoot" in table schemas.
default_is_root = self.__root_set_size() == len(self.tables)
tables = {}
for table in self.tables.itervalues():
tables[table.name] = table.to_json(default_is_root)
json = {"name": self.name, "tables": tables}
if self.version:
json["version"] = self.version
return json
def copy(self):
return DbSchema.from_json(self.to_json())
def __follow_ref_table(self, column, base, base_name):
if not base or base.type != types.UuidType or not base.ref_table_name:
return
base.ref_table = self.tables.get(base.ref_table_name)
if not base.ref_table:
raise error.Error("column %s %s refers to undefined table %s"
% (column.name, base_name, base.ref_table_name),
tag="syntax error")
if base.is_strong_ref() and not base.ref_table.is_root:
# We cannot allow a strong reference to a non-root table to be
# ephemeral: if it is the only reference to a row, then replaying
# the database log from disk will cause the referenced row to be
# deleted, even though it did exist in memory. If there are
# references to that row later in the log (to modify it, to delete
# it, or just to point to it), then this will yield a transaction
# error.
column.persistent = True
class IdlSchema(DbSchema):
def __init__(self, name, version, tables, idlPrefix, idlHeader):
DbSchema.__init__(self, name, version, tables)
self.idlPrefix = idlPrefix
self.idlHeader = idlHeader
@staticmethod
def from_json(json):
parser = ovs.db.parser.Parser(json, "IDL schema")
idlPrefix = parser.get("idlPrefix", [str, unicode])
idlHeader = parser.get("idlHeader", [str, unicode])
subjson = dict(json)
del subjson["idlPrefix"]
del subjson["idlHeader"]
schema = DbSchema.from_json(subjson)
return IdlSchema(schema.name, schema.version, schema.tables,
idlPrefix, idlHeader)
def column_set_from_json(json, columns):
if json is None:
return tuple(columns)
elif type(json) != list:
raise error.Error("array of distinct column names expected", json)
else:
for column_name in json:
if type(column_name) not in [str, unicode]:
raise error.Error("array of distinct column names expected",
json)
elif column_name not in columns:
raise error.Error("%s is not a valid column name"
% column_name, json)
if len(set(json)) != len(json):
# Duplicate.
raise error.Error("array of distinct column names expected", json)
return tuple([columns[column_name] for column_name in json])
class TableSchema(object):
def __init__(self, name, columns, mutable=True, max_rows=sys.maxint,
is_root=True, indexes=[]):
self.name = name
self.columns = columns
self.mutable = mutable
self.max_rows = max_rows
self.is_root = is_root
self.indexes = indexes
@staticmethod
def from_json(json, name):
parser = ovs.db.parser.Parser(json, "table schema for table %s" % name)
columns_json = parser.get("columns", [dict])
mutable = parser.get_optional("mutable", [bool], True)
max_rows = parser.get_optional("maxRows", [int])
is_root = parser.get_optional("isRoot", [bool], False)
indexes_json = parser.get_optional("indexes", [list], [])
parser.finish()
if max_rows == None:
max_rows = sys.maxint
elif max_rows <= 0:
raise error.Error("maxRows must be at least 1", json)
if not columns_json:
raise error.Error("table must have at least one column", json)
columns = {}
for column_name, column_json in columns_json.iteritems():
_check_id(column_name, json)
columns[column_name] = ColumnSchema.from_json(column_json,
column_name)
indexes = []
for index_json in indexes_json:
index = column_set_from_json(index_json, columns)
if not index:
raise error.Error("index must have at least one column", json)
elif len(index) == 1:
index[0].unique = True
for column in index:
if not column.persistent:
raise error.Error("ephemeral columns (such as %s) may "
"not be indexed" % column.name, json)
indexes.append(index)
return TableSchema(name, columns, mutable, max_rows, is_root, indexes)
def to_json(self, default_is_root=False):
"""Returns this table schema serialized into JSON.
The "isRoot" member is included in the JSON only if its value would
differ from 'default_is_root'. Ordinarily 'default_is_root' should be
false, because ordinarily a table would be not be part of the root set
if its "isRoot" member is omitted. However, garbage collection was not
orginally included in OVSDB, so in older schemas that do not include
any "isRoot" members, every table is implicitly part of the root set.
To serialize such a schema in a way that can be read by older OVSDB
tools, specify 'default_is_root' as True.
"""
json = {}
if not self.mutable:
json["mutable"] = False
if default_is_root != self.is_root:
json["isRoot"] = self.is_root
json["columns"] = columns = {}
for column in self.columns.itervalues():
if not column.name.startswith("_"):
columns[column.name] = column.to_json()
if self.max_rows != sys.maxint:
json["maxRows"] = self.max_rows
if self.indexes:
json["indexes"] = []
for index in self.indexes:
json["indexes"].append([column.name for column in index])
return json
class ColumnSchema(object):
def __init__(self, name, mutable, persistent, type_):
self.name = name
self.mutable = mutable
self.persistent = persistent
self.type = type_
self.unique = False
@staticmethod
def from_json(json, name):
parser = ovs.db.parser.Parser(json, "schema for column %s" % name)
mutable = parser.get_optional("mutable", [bool], True)
ephemeral = parser.get_optional("ephemeral", [bool], False)
type_ = types.Type.from_json(parser.get("type", [dict, str, unicode]))
parser.finish()
return ColumnSchema(name, mutable, not ephemeral, type_)
def to_json(self):
json = {"type": self.type.to_json()}
if not self.mutable:
json["mutable"] = False
if not self.persistent:
json["ephemeral"] = True
return json
|
apache-2.0
|
2014c2g2/2014c2
|
wsgi/static/Brython2.1.0-20140419-113919/Lib/_thread.py
|
740
|
4879
|
"""Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
# Brython-specific to avoid circular references between threading and _threading_local
class _local:
pass
|
gpl-2.0
|
germanovm/vdsm
|
vdsm/v2v.py
|
1
|
26018
|
# Copyright 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
When importing a VM a thread start with a new process of virt-v2v.
The way to feedback the information on the progress and the status of the
process (ie job) is via getVdsStats() with the fields progress and status.
progress is a number which represent percentage of a single disk copy,
status is a way to feedback information on the job (init, error etc)
"""
from collections import namedtuple
from contextlib import closing, contextmanager
import errno
import logging
import os
import re
import signal
import threading
import xml.etree.ElementTree as ET
import libvirt
from vdsm.constants import P_VDSM_RUN
from vdsm.define import errCode, doneCode
from vdsm import libvirtconnection, response
from vdsm.infra import zombiereaper
from vdsm.utils import traceback, CommandPath, execCmd, NICENESS, IOCLASS
import caps
_lock = threading.Lock()
_jobs = {}
_V2V_DIR = os.path.join(P_VDSM_RUN, 'v2v')
_VIRT_V2V = CommandPath('virt-v2v', '/usr/bin/virt-v2v')
_OVF_RESOURCE_CPU = 3
_OVF_RESOURCE_MEMORY = 4
_OVF_RESOURCE_NETWORK = 10
# OVF Specification:
# https://www.iso.org/obp/ui/#iso:std:iso-iec:17203:ed-1:v1:en
_OVF_NS = 'http://schemas.dmtf.org/ovf/envelope/1'
_RASD_NS = 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' \
'CIM_ResourceAllocationSettingData'
ImportProgress = namedtuple('ImportProgress',
['current_disk', 'disk_count', 'description'])
DiskProgress = namedtuple('DiskProgress', ['progress'])
class STATUS:
'''
STARTING: request granted and starting the import process
COPYING_DISK: copying disk in progress
ABORTED: user initiated aborted
FAILED: error during import process
DONE: convert process successfully finished
'''
STARTING = 'starting'
COPYING_DISK = 'copying_disk'
ABORTED = 'aborted'
FAILED = 'error'
DONE = 'done'
class V2VError(Exception):
''' Base class for v2v errors '''
class ClientError(Exception):
''' Base class for client error '''
class InvalidVMConfiguration(ValueError):
''' Unexpected error while parsing libvirt domain xml '''
class OutputParserError(V2VError):
''' Error while parsing virt-v2v output '''
class JobExistsError(ClientError):
''' Job already exists in _jobs collection '''
err_name = 'JobExistsError'
class VolumeError(ClientError):
''' Error preparing volume '''
class NoSuchJob(ClientError):
''' Job not exists in _jobs collection '''
err_name = 'NoSuchJob'
class JobNotDone(ClientError):
''' Import process still in progress '''
err_name = 'JobNotDone'
class NoSuchOvf(V2VError):
''' Ovf path is not exists in /var/run/vdsm/v2v/ '''
err_name = 'V2VNoSuchOvf'
class V2VProcessError(V2VError):
''' virt-v2v process had error in execution '''
class InvalidInputError(ClientError):
''' Invalid input received '''
def supported():
return not (caps.getos() in (caps.OSName.RHEVH, caps.OSName.RHEL)
and caps.osversion()['version'].startswith('6'))
def get_external_vms(uri, username, password):
if not supported():
return errCode["noimpl"]
try:
conn = libvirtconnection.open_connection(uri=uri,
username=username,
passwd=password)
except libvirt.libvirtError as e:
logging.error('error connection to hypervisor: %r', e.message)
return {'status': {'code': errCode['V2VConnection']['status']['code'],
'message': e.message}}
with closing(conn):
vms = []
for vm in conn.listAllDomains():
root = ET.fromstring(vm.XMLDesc(0))
params = {}
_add_vm_info(vm, params)
try:
_add_general_info(root, params)
except InvalidVMConfiguration as e:
logging.error('error parsing domain xml, msg: %s xml: %s',
e.message, vm.XMLDesc(0))
continue
_add_networks(root, params)
_add_disks(root, params)
for disk in params['disks']:
_add_disk_info(conn, disk)
vms.append(params)
return {'status': doneCode, 'vmList': vms}
def convert_external_vm(uri, username, password, vminfo, job_id, irs):
job = ImportVm.from_libvirt(uri, username, password, vminfo, job_id, irs)
job.start()
_add_job(job_id, job)
return {'status': doneCode}
def convert_ova(ova_path, vminfo, job_id, irs):
job = ImportVm.from_ova(ova_path, vminfo, job_id, irs)
job.start()
_add_job(job_id, job)
return response.success()
def get_ova_info(ova_path):
ns = {'ovf': _OVF_NS, 'rasd': _RASD_NS}
try:
root = ET.fromstring(_read_ovf_from_ova(ova_path))
except ET.ParseError as e:
raise V2VError('Error reading ovf from ova, position: %r' % e.position)
vm = {}
_add_general_ovf_info(vm, root, ns)
_add_disks_ovf_info(vm, root, ns)
_add_networks_ovf_info(vm, root, ns)
return response.success(vmList=vm)
def get_converted_vm(job_id):
try:
job = _get_job(job_id)
_validate_job_done(job)
ovf = _read_ovf(job_id)
except ClientError as e:
logging.info('Converted VM error %s', e)
return errCode[e.err_name]
except V2VError as e:
logging.error('Converted VM error %s', e)
return errCode[e.err_name]
return {'status': doneCode, 'ovf': ovf}
def delete_job(job_id):
try:
job = _get_job(job_id)
_validate_job_finished(job)
_remove_job(job_id)
except ClientError as e:
logging.info('Cannot delete job, error: %s', e)
return errCode[e.err_name]
return {'status': doneCode}
def abort_job(job_id):
try:
job = _get_job(job_id)
job.abort()
except ClientError as e:
logging.info('Cannot abort job, error: %s', e)
return errCode[e.err_name]
return {'status': doneCode}
def get_jobs_status():
ret = {}
with _lock:
items = tuple(_jobs.items())
for job_id, job in items:
ret[job_id] = {
'status': job.status,
'description': job.description,
'progress': job.progress
}
return ret
def _add_job(job_id, job):
with _lock:
if job_id in _jobs:
raise JobExistsError("Job %r exists" % job_id)
_jobs[job_id] = job
def _get_job(job_id):
with _lock:
if job_id not in _jobs:
raise NoSuchJob("No such job %r" % job_id)
return _jobs[job_id]
def _remove_job(job_id):
with _lock:
if job_id not in _jobs:
raise NoSuchJob("No such job %r" % job_id)
del _jobs[job_id]
def _validate_job_done(job):
if job.status != STATUS.DONE:
raise JobNotDone("Job %r is %s" % (job.id, job.status))
def _validate_job_finished(job):
if job.status not in (STATUS.DONE, STATUS.FAILED, STATUS.ABORTED):
raise JobNotDone("Job %r is %s" % (job.id, job.status))
def _read_ovf(job_id):
file_name = os.path.join(_V2V_DIR, "%s.ovf" % job_id)
try:
with open(file_name, 'r') as f:
return f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
raise NoSuchOvf("No such ovf %r" % file_name)
def get_storage_domain_path(path):
'''
prepareImage returns /prefix/sdUUID/images/imgUUID/volUUID
we need storage domain absolute path so we go up 3 levels
'''
return path.rsplit(os.sep, 3)[0]
@contextmanager
def password_file(job_id, file_name, password):
fd = os.open(file_name, os.O_WRONLY | os.O_CREAT, 0o600)
try:
os.write(fd, password.value)
finally:
os.close(fd)
try:
yield
finally:
try:
os.remove(file_name)
except Exception:
logging.exception("Job %r error removing passwd file: %s",
job_id, file_name)
class ImportVm(object):
TERM_DELAY = 30
PROC_WAIT_TIMEOUT = 30
def __init__(self, vminfo, job_id, irs):
'''
do not use directly, use a factory method instead!
'''
self._vminfo = vminfo
self._id = job_id
self._irs = irs
self._status = STATUS.STARTING
self._description = ''
self._disk_progress = 0
self._disk_count = 1
self._current_disk = 1
self._aborted = False
self._prepared_volumes = []
self._uri = None
self._username = None
self._password = None
self._passwd_file = None
self._create_command = None
self._run_command = None
self._ova_path = None
@classmethod
def from_libvirt(cls, uri, username, password, vminfo, job_id, irs):
obj = cls(vminfo, job_id, irs)
obj._uri = uri
obj._username = username
obj._password = password
obj._passwd_file = os.path.join(_V2V_DIR, "%s.tmp" % job_id)
obj._create_command = obj._from_libvirt_command
obj._run_command = obj._run_with_password
return obj
@classmethod
def from_ova(cls, ova_path, vminfo, job_id, irs):
obj = cls(vminfo, job_id, irs)
obj._ova_path = ova_path
obj._create_command = obj._from_ova_command
obj._run_command = obj._run
return obj
def start(self):
t = threading.Thread(target=self._run_command)
t.daemon = True
t.start()
@property
def id(self):
return self._id
@property
def status(self):
return self._status
@property
def description(self):
return self._description
@property
def progress(self):
'''
progress is part of multiple disk_progress its
flat and not 100% accurate - each disk take its
portion ie if we have 2 disks the first will take
0-50 and the second 50-100
'''
completed = (self._disk_count - 1) * 100
return (completed + self._disk_progress) / self._disk_count
def _run_with_password(self):
with password_file(self._id, self._passwd_file, self._password):
self._run()
@traceback(msg="Error importing vm")
def _run(self):
try:
self._import()
except Exception as ex:
if self._aborted:
logging.debug("Job %r was aborted", self._id)
else:
logging.exception("Job %r failed", self._id)
self._status = STATUS.FAILED
self._description = ex.message
try:
self._abort()
except Exception as e:
logging.exception('Job %r, error trying to abort: %r',
self._id, e)
finally:
self._teardown_volumes()
def _import(self):
# TODO: use the process handling http://gerrit.ovirt.org/#/c/33909/
self._prepare_volumes()
cmd = self._create_command()
logging.info('Job %r starting import', self._id)
# This is the way we run qemu-img convert jobs. virt-v2v is invoking
# qemu-img convert to perform the migration.
self._proc = execCmd(cmd, sync=False, deathSignal=signal.SIGTERM,
nice=NICENESS.HIGH, ioclass=IOCLASS.IDLE,
env=self._execution_environments())
self._proc.blocking = True
self._watch_process_output()
self._wait_for_process()
if self._proc.returncode != 0:
raise V2VProcessError('Job %r process failed exit-code: %r'
', stderr: %s' %
(self._id, self._proc.returncode,
self._proc.stderr.read(1024)))
if self._status != STATUS.ABORTED:
self._status = STATUS.DONE
logging.info('Job %r finished import successfully', self._id)
def _execution_environments(self):
env = {'LIBGUESTFS_BACKEND': 'direct'}
if 'virtio_iso_path' in self._vminfo:
env['VIRTIO_WIN'] = self._vminfo['virtio_iso_path']
return env
def _wait_for_process(self):
if self._proc.returncode is not None:
return
logging.debug("Job %r waiting for virt-v2v process", self._id)
if not self._proc.wait(timeout=self.PROC_WAIT_TIMEOUT):
raise V2VProcessError("Job %r timeout waiting for process pid=%s",
self._id, self._proc.pid)
def _watch_process_output(self):
parser = OutputParser()
for event in parser.parse(self._proc.stdout):
if isinstance(event, ImportProgress):
self._status = STATUS.COPYING_DISK
logging.info("Job %r copying disk %d/%d",
self._id, event.current_disk, event.disk_count)
self._disk_progress = 0
self._current_disk = event.current_disk
self._disk_count = event.disk_count
self._description = event.description
elif isinstance(event, DiskProgress):
self._disk_progress = event.progress
if event.progress % 10 == 0:
logging.info("Job %r copy disk %d progress %d/100",
self._id, self._current_disk, event.progress)
else:
raise RuntimeError("Job %r got unexpected parser event: %s" %
(self._id, event))
def _from_libvirt_command(self):
cmd = [_VIRT_V2V.cmd,
'-ic', self._uri,
'-o', 'vdsm',
'-of', self._get_disk_format(),
'-oa', self._vminfo.get('allocation', 'sparse').lower()]
cmd.extend(self._generate_disk_parameters())
cmd.extend(['--password-file',
self._passwd_file,
'--vdsm-vm-uuid',
self._id,
'--vdsm-ovf-output',
_V2V_DIR,
'--machine-readable',
'-os',
get_storage_domain_path(self._prepared_volumes[0]['path']),
self._vminfo['vmName']])
return cmd
def _from_ova_command(self):
cmd = [_VIRT_V2V.cmd,
'-i', 'ova', self._ova_path,
'-o', 'vdsm',
'-of', self._get_disk_format(),
'-oa', self._vminfo.get('allocation', 'sparse').lower(),
'--vdsm-vm-uuid',
self._id,
'--vdsm-ovf-output',
_V2V_DIR,
'--machine-readable',
'-os',
get_storage_domain_path(self._prepared_volumes[0]['path'])]
cmd.extend(self._generate_disk_parameters())
return cmd
def abort(self):
self._status = STATUS.ABORTED
logging.info('Job %r aborting...', self._id)
self._abort()
def _abort(self):
self._aborted = True
if self._proc.returncode is None:
logging.debug('Job %r killing virt-v2v process', self._id)
try:
self._proc.kill()
except OSError as e:
if e.errno != errno.ESRCH:
raise
logging.debug('Job %r virt-v2v process not running',
self._id)
else:
logging.debug('Job %r virt-v2v process was killed',
self._id)
finally:
zombiereaper.autoReapPID(self._proc.pid)
def _get_disk_format(self):
fmt = self._vminfo.get('format', 'raw').lower()
if fmt == 'cow':
return 'qcow2'
return fmt
def _generate_disk_parameters(self):
parameters = []
for disk in self._vminfo['disks']:
try:
parameters.append('--vdsm-image-uuid')
parameters.append(disk['imageID'])
parameters.append('--vdsm-vol-uuid')
parameters.append(disk['volumeID'])
except KeyError as e:
raise InvalidInputError('Job %r missing required property: %s'
% (self._id, e))
return parameters
def _prepare_volumes(self):
if len(self._vminfo['disks']) < 1:
raise InvalidInputError('Job %r cannot import vm with no disk',
self._id)
for disk in self._vminfo['disks']:
drive = {'poolID': self._vminfo['poolID'],
'domainID': self._vminfo['domainID'],
'volumeID': disk['volumeID'],
'imageID': disk['imageID']}
res = self._irs.prepareImage(drive['domainID'],
drive['poolID'],
drive['imageID'],
drive['volumeID'])
if res['status']['code']:
raise VolumeError('Job %r bad volume specification: %s' %
(self._id, drive))
drive['path'] = res['path']
self._prepared_volumes.append(drive)
def _teardown_volumes(self):
for drive in self._prepared_volumes:
try:
self._irs.teardownImage(drive['domainID'],
drive['poolID'],
drive['imageID'])
except Exception as e:
logging.error('Job %r error tearing down drive: %s',
self._id, e)
class OutputParser(object):
COPY_DISK_RE = re.compile(r'.*(Copying disk (\d+)/(\d+)).*')
DISK_PROGRESS_RE = re.compile(r'\s+\((\d+).*')
def parse(self, stream):
for line in stream:
if 'Copying disk' in line:
description, current_disk, disk_count = self._parse_line(line)
yield ImportProgress(int(current_disk), int(disk_count),
description)
for chunk in self._iter_progress(stream):
progress = self._parse_progress(chunk)
yield DiskProgress(progress)
if progress == 100:
break
def _parse_line(self, line):
m = self.COPY_DISK_RE.match(line)
if m is None:
raise OutputParserError('unexpected format in "Copying disk"'
', line: %r' % line)
return m.group(1), m.group(2), m.group(3)
def _iter_progress(self, stream):
chunk = ''
while True:
c = stream.read(1)
chunk += c
if c == '\r':
yield chunk
chunk = ''
def _parse_progress(self, chunk):
m = self.DISK_PROGRESS_RE.match(chunk)
if m is None:
raise OutputParserError('error parsing progress, chunk: %r'
% chunk)
try:
return int(m.group(1))
except ValueError:
raise OutputParserError('error parsing progress regex: %r'
% m.groups)
def _mem_to_mib(size, unit):
lunit = unit.lower()
if lunit in ('bytes', 'b'):
return size / 1024 / 1024
elif lunit in ('kib', 'k'):
return size / 1024
elif lunit in ('mib', 'm'):
return size
elif lunit in ('gib', 'g'):
return size * 1024
elif lunit in ('tib', 't'):
return size * 1024 * 1024
else:
raise InvalidVMConfiguration("Invalid currentMemory unit attribute:"
" %r" % unit)
def _add_vm_info(vm, params):
params['vmName'] = vm.name()
if vm.state()[0] == libvirt.VIR_DOMAIN_SHUTOFF:
params['status'] = "Down"
else:
params['status'] = "Up"
def _add_general_info(root, params):
e = root.find('./uuid')
if e is not None:
params['vmId'] = e.text
e = root.find('./currentMemory')
if e is not None:
try:
size = int(e.text)
except ValueError:
raise InvalidVMConfiguration("Invalid 'currentMemory' value: %r"
% e.text)
unit = e.get('unit', 'KiB')
params['memSize'] = _mem_to_mib(size, unit)
e = root.find('./vcpu')
if e is not None:
try:
params['smp'] = int(e.text)
except ValueError:
raise InvalidVMConfiguration("Invalid 'vcpu' value: %r" % e.text)
e = root.find('./os/type/[@arch]')
if e is not None:
params['arch'] = e.get('arch')
def _add_disk_info(conn, disk):
if 'alias' in disk.keys():
try:
vol = conn.storageVolLookupByPath(disk['alias'])
_, capacity, alloc = vol.info()
except libvirt.libvirtError:
logging.exception("Error getting disk size")
disk['capacity'] = str(capacity)
disk['allocation'] = str(alloc)
def _add_disks(root, params):
params['disks'] = []
disks = root.findall('.//disk[@type="file"]')
for disk in disks:
d = {}
device = disk.get('device')
if device is not None:
d['type'] = device
target = disk.find('./target/[@dev]')
if target is not None:
d['dev'] = target.get('dev')
source = disk.find('./source/[@file]')
if source is not None:
d['alias'] = source.get('file')
params['disks'].append(d)
def _add_networks(root, params):
params['networks'] = []
interfaces = root.findall('.//interface')
for iface in interfaces:
i = {}
if 'type' in iface.attrib:
i['type'] = iface.attrib['type']
mac = iface.find('./mac/[@address]')
if mac is not None:
i['macAddr'] = mac.get('address')
source = iface.find('./source/[@bridge]')
if source is not None:
i['bridge'] = source.get('bridge')
target = iface.find('./target/[@dev]')
if target is not None:
i['dev'] = target.get('dev')
model = iface.find('./model/[@type]')
if model is not None:
i['model'] = model.get('type')
params['networks'].append(i)
def _read_ovf_from_ova(ova_path):
# FIXME: change to tarfile package when support --to-stdout
cmd = ['/usr/bin/tar', 'xf', ova_path, '*.ovf', '--to-stdout']
rc, output, error = execCmd(cmd)
if rc:
raise V2VError(error)
return ''.join(output)
def _add_general_ovf_info(vm, node, ns):
vm['status'] = 'Down'
vmName = node.find('./ovf:VirtualSystem/ovf:Name', ns)
if vmName is not None:
vm['vmName'] = vmName.text
else:
raise V2VError('Error parsing ovf information: no ovf:Name')
memSize = node.find('.//ovf:Item[rasd:ResourceType="%d"]/'
'rasd:VirtualQuantity' % _OVF_RESOURCE_MEMORY, ns)
if memSize is not None:
vm['memSize'] = int(memSize.text)
else:
raise V2VError('Error parsing ovf information: no memory size')
smp = node.find('.//ovf:Item[rasd:ResourceType="%d"]/'
'rasd:VirtualQuantity' % _OVF_RESOURCE_CPU, ns)
if smp is not None:
vm['smp'] = int(smp.text)
else:
raise V2VError('Error parsing ovf information: no cpu info')
def _add_disks_ovf_info(vm, node, ns):
vm['disks'] = []
for d in node.findall(".//ovf:DiskSection/ovf:Disk", ns):
disk = {'type': 'disk'}
capacity = d.attrib.get('{%s}capacity' % _OVF_NS)
disk['capacity'] = str(int(capacity) * 1024 * 1024 * 1024)
fileref = d.attrib.get('{%s}fileRef' % _OVF_NS)
alias = node.find('.//ovf:References/ovf:File[@ovf:id="%s"]' %
fileref, ns)
if alias is not None:
disk['alias'] = alias.attrib.get('{%s}href' % _OVF_NS)
disk['allocation'] = str(alias.attrib.get('{%s}size' % _OVF_NS))
else:
raise V2VError('Error parsing ovf information: disk href info')
vm['disks'].append(disk)
def _add_networks_ovf_info(vm, node, ns):
vm['networks'] = []
for n in node.findall('.//ovf:Item[rasd:ResourceType="%d"]'
% _OVF_RESOURCE_NETWORK, ns):
net = {}
dev = n.find('./rasd:ElementName', ns)
if dev is not None:
net['dev'] = dev.text
else:
raise V2VError('Error parsing ovf information: '
'network element name')
model = n.find('./rasd:ResourceSubType', ns)
if model is not None:
net['model'] = model.text
else:
raise V2VError('Error parsing ovf information: network model')
bridge = n.find('./rasd:Connection', ns)
if bridge is not None:
net['bridge'] = bridge.text
net['type'] = 'bridge'
else:
net['type'] = 'interface'
vm['networks'].append(net)
|
gpl-2.0
|
kwilliams-mo/iris
|
lib/iris/tests/test_util.py
|
1
|
13481
|
# (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test iris.util
"""
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import inspect
import os
import StringIO
import unittest
import numpy as np
import iris.analysis
import iris.coords
import iris.tests.stock as stock
import iris.util
class TestMonotonic(unittest.TestCase):
def assertMonotonic(self, array, direction=None, **kwargs):
if direction is not None:
mono, dir = iris.util.monotonic(array, return_direction=True, **kwargs)
if not mono:
self.fail('Array was not monotonic:/n %r' % array)
if dir != np.sign(direction):
self.fail('Array was monotonic but not in the direction expected:'
'/n + requested direction: %s/n + resultant direction: %s' % (direction, dir))
else:
mono = iris.util.monotonic(array, **kwargs)
if not mono:
self.fail('Array was not monotonic:/n %r' % array)
def assertNotMonotonic(self, array, **kwargs):
mono = iris.util.monotonic(array, **kwargs)
if mono:
self.fail("Array was monotonic when it shouldn't be:/n %r" % array)
def test_monotonic_pve(self):
a = np.array([3, 4, 5.3])
self.assertMonotonic(a)
self.assertMonotonic(a, direction=1)
# test the reverse for negative monotonic.
a = a[::-1]
self.assertMonotonic(a)
self.assertMonotonic(a, direction=-1)
def test_not_monotonic(self):
b = np.array([3, 5.3, 4])
self.assertNotMonotonic(b)
def test_monotonic_strict(self):
b = np.array([3, 5.3, 4])
self.assertNotMonotonic(b, strict=True)
b = np.array([3, 5.3, 5.3])
self.assertNotMonotonic(b, strict=True)
self.assertMonotonic(b)
b = np.array([0.0])
self.assertRaises(ValueError, iris.util.monotonic, b)
self.assertRaises(ValueError, iris.util.monotonic, b, strict=True)
b = np.array([0.0, 0.0])
self.assertNotMonotonic(b, strict=True)
self.assertMonotonic(b)
class TestReverse(unittest.TestCase):
def test_simple(self):
a = np.arange(12).reshape(3, 4)
np.testing.assert_array_equal(a[::-1], iris.util.reverse(a, 0))
np.testing.assert_array_equal(a[::-1, ::-1], iris.util.reverse(a, [0, 1]))
np.testing.assert_array_equal(a[:, ::-1], iris.util.reverse(a, 1))
np.testing.assert_array_equal(a[:, ::-1], iris.util.reverse(a, [1]))
self.assertRaises(ValueError, iris.util.reverse, a, [])
self.assertRaises(ValueError, iris.util.reverse, a, -1)
self.assertRaises(ValueError, iris.util.reverse, a, 10)
self.assertRaises(ValueError, iris.util.reverse, a, [-1])
self.assertRaises(ValueError, iris.util.reverse, a, [0, -1])
def test_single(self):
a = np.arange(36).reshape(3, 4, 3)
np.testing.assert_array_equal(a[::-1], iris.util.reverse(a, 0))
np.testing.assert_array_equal(a[::-1, ::-1], iris.util.reverse(a, [0, 1]))
np.testing.assert_array_equal(a[:, ::-1, ::-1], iris.util.reverse(a, [1, 2]))
np.testing.assert_array_equal(a[..., ::-1], iris.util.reverse(a, 2))
self.assertRaises(ValueError, iris.util.reverse, a, -1)
self.assertRaises(ValueError, iris.util.reverse, a, 10)
self.assertRaises(ValueError, iris.util.reverse, a, [-1])
self.assertRaises(ValueError, iris.util.reverse, a, [0, -1])
class TestClipString(unittest.TestCase):
def setUp(self):
self.test_string = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
self.rider = "**^^**$$..--__" # A good chance at being unique and not in the string to be tested!
def test_oversize_string(self):
# Test with a clip length that means the string will be clipped
clip_length = 109
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
# Check the length is between what we requested ( + rider length) and the length of the original string
self.assertTrue(clip_length + len(self.rider) <= len(result) < len(self.test_string), "String was not clipped.")
# Also test the rider was added
self.assertTrue(self.rider in result, "Rider was not added to the string when it should have been.")
def test_undersize_string(self):
# Test with a clip length that is longer than the string
clip_length = 10999
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
self.assertEqual(len(result), len(self.test_string), "String was clipped when it should not have been.")
# Also test that no rider was added on the end if the string was not clipped
self.assertFalse(self.rider in result, "Rider was adding to the string when it should not have been.")
def test_invalid_clip_lengths(self):
# Clip values less than or equal to zero are not valid
for clip_length in [0, -100]:
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
self.assertEqual(len(result), len(self.test_string), "String was clipped when it should not have been.")
def test_default_values(self):
# Get the default values specified in the function
argspec = inspect.getargspec(iris.util.clip_string)
arg_dict = dict(zip(argspec.args[-2:], argspec.defaults))
result = iris.util.clip_string(self.test_string, arg_dict["clip_length"], arg_dict["rider"])
self.assertLess(len(result), len(self.test_string), "String was not clipped.")
rider_returned = result[-len(arg_dict["rider"]):]
self.assertEquals(rider_returned, arg_dict["rider"], "Default rider was not applied.")
def test_trim_string_with_no_spaces(self):
clip_length = 200
no_space_string = "a" * 500
# Since this string has no spaces, clip_string will not be able to gracefully clip it
# but will instead clip it exactly where the user specified
result = iris.util.clip_string(no_space_string, clip_length, self.rider)
expected_length = clip_length + len(self.rider)
# Check the length of the returned string is equal to clip length + length of rider
self.assertEquals(len(result), expected_length, "Mismatch in expected length of clipped string. Length was %s, expected value is %s" % (len(result), expected_length))
class TestDescribeDiff(iris.tests.IrisTest):
def test_identical(self):
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
return_str_IO = StringIO.StringIO()
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'compatible_cubes.str.txt')
def test_different(self):
return_str_IO = StringIO.StringIO()
# test incompatible attributes
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.attributes['Conventions'] = 'CF-1.5'
test_cube_b.attributes['Conventions'] = 'CF-1.6'
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_attr.str.txt')
# test incompatible names
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.standard_name = "relative_humidity"
return_str_IO.truncate(0)
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_name.str.txt')
# test incompatible unit
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.units = iris.unit.Unit('m')
return_str_IO.truncate(0)
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_unit.str.txt')
# test incompatible methods
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d().collapsed('model_level_number', iris.analysis.MEAN)
return_str_IO.truncate(0)
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_meth.str.txt')
def test_output_file(self):
# test incompatible attributes
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d().collapsed('model_level_number', iris.analysis.MEAN)
test_cube_a.attributes['Conventions'] = 'CF-1.5'
test_cube_b.attributes['Conventions'] = 'CF-1.6'
test_cube_a.standard_name = "relative_humidity"
test_cube_a.units = iris.unit.Unit('m')
with self.temp_filename() as filename:
with open(filename, 'w') as f:
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=f)
f.close()
self.assertFilesEqual(filename,
'incompatible_cubes.str.txt')
class TestAsCompatibleShape(tests.IrisTest):
def test_slice(self):
cube = tests.stock.realistic_4d()
sliced = cube[1, :, 2, :-2]
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced, cube)
self.assertEqual(res, expected)
def test_transpose(self):
cube = tests.stock.realistic_4d()
transposed = cube.copy()
transposed.transpose()
expected = cube
res = iris.util.as_compatible_shape(transposed, cube)
self.assertEqual(res, expected)
def test_slice_and_transpose(self):
cube = tests.stock.realistic_4d()
sliced_and_transposed = cube[1, :, 2, :-2]
sliced_and_transposed.transpose()
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced_and_transposed, cube)
self.assertEqual(res, expected)
def test_collapsed(self):
cube = tests.stock.realistic_4d()
collapsed = cube.collapsed('model_level_number', iris.analysis.MEAN)
expected_shape = list(cube.shape)
expected_shape[1] = 1
expected_data = collapsed.data.reshape(expected_shape)
res = iris.util.as_compatible_shape(collapsed, cube)
self.assertCML(res, ('util', 'as_compatible_shape_collapsed.cml'),
checksum=False)
self.assertArrayEqual(expected_data, res.data)
self.assertArrayEqual(expected_data.mask, res.data.mask)
def test_reduce_dimensionality(self):
# Test that as_compatible_shape() can demote
# length one dimensions to scalars.
cube = tests.stock.realistic_4d()
src = cube[:, 2:3]
expected = reduced = cube[:, 2]
res = iris.util.as_compatible_shape(src, reduced)
self.assertEqual(res, expected)
def test_anonymous_dims(self):
cube = tests.stock.realistic_4d()
# Move all coords from dim_coords to aux_coords.
for coord in cube.dim_coords:
dim = cube.coord_dims(coord)
cube.remove_coord(coord)
cube.add_aux_coord(coord, dim)
sliced = cube[1, :, 2, :-2]
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced, cube)
self.assertEqual(res, expected)
def test_scalar_auxcoord(self):
def dim_to_aux(cube, coord_name):
"""Convert coordinate on cube from DimCoord to AuxCoord."""
coord = cube.coord(coord_name)
coord = iris.coords.AuxCoord.from_coord(coord)
cube.replace_coord(coord)
cube = tests.stock.realistic_4d()
src = cube[:, :, 3]
dim_to_aux(src, 'grid_latitude')
expected = cube[:, :, 3:4]
dim_to_aux(expected, 'grid_latitude')
res = iris.util.as_compatible_shape(src, cube)
self.assertEqual(res, expected)
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
sodastsai/depot-pm
|
depot_pm/check.py
|
2
|
1433
|
#
# Copyright 2015 sodastsai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals, division, absolute_import, print_function
from taskr.contrib.system import os_info, has_command
from taskr.contrib.system.osx import has_app
test_names = [
'has_command',
]
if os_info.is_osx:
test_names.append('has_app')
def check(test_name, *args):
if test_name == 'has_command':
if not args:
raise ValueError('At least one argument is required. (command_name)')
if not has_command(args[0]):
raise ValueError('No command named "{}".'.format(args[0]))
elif test_name == 'has_app':
if not args:
raise ValueError('At least one argument is required. (app_name)')
if not has_app(args[0]):
raise ValueError('No app named "{}".'.format(args[0]))
else:
raise ValueError('No such test. ({})'.format(test_name))
|
apache-2.0
|
blooparksystems/odoo
|
openerp/tools/amount_to_text_en.py
|
50
|
4147
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from translate import _
_logger = logging.getLogger(__name__)
#-------------------------------------------------------------
#ENGLISH
#-------------------------------------------------------------
to_19 = ( 'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six',
'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen',
'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen' )
tens = ( 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety')
denom = ( '',
'Thousand', 'Million', 'Billion', 'Trillion', 'Quadrillion',
'Quintillion', 'Sextillion', 'Septillion', 'Octillion', 'Nonillion',
'Decillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Novemdecillion', 'Vigintillion' )
def _convert_nn(val):
"""convert a value < 100 to English.
"""
if val < 20:
return to_19[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19[val % 10]
return dcap
def _convert_nnn(val):
"""
convert a value < 1000 to english, special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19[rem] + ' Hundred'
if mod > 0:
word += ' '
if mod > 0:
word += _convert_nn(mod)
return word
def english_number(val):
if val < 100:
return _convert_nn(val)
if val < 1000:
return _convert_nnn(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn(l) + ' ' + denom[didx]
if r > 0:
ret = ret + ', ' + english_number(r)
return ret
def amount_to_text(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = english_number(int(list[0]))
end_word = english_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and 'Cents' or 'Cent'
return ' '.join(filter(None, [start_word, units_name, (start_word or units_name) and (end_word or cents_name) and 'and', end_word, cents_name]))
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'en' : amount_to_text}
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='en', currency='euro'):
""" Converts an integer to its textual representation, using the language set in the context if any.
Example::
1654: thousands six cent cinquante-quatre.
"""
import openerp.loglevels as loglevels
# if nbr > 10000000:
# _logger.warning(_("Number too large '%d', can not translate it"))
# return str(nbr)
if not _translate_funcs.has_key(lang):
_logger.warning(_("no translation function found for lang: '%s'"), lang)
#TODO: (default should be en) same as above
lang = 'en'
return _translate_funcs[lang](abs(nbr), currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1,200):
print i, ">>", int_to_text(i, lang)
for i in range(200,999999,139):
print i, ">>", int_to_text(i, lang)
else:
print int_to_text(int(argv[1]), lang)
|
gpl-3.0
|
omriabnd/UCCA-App
|
Server/uccaApp/models/Users.py
|
1
|
1828
|
from datetime import datetime
from rest_framework.exceptions import ValidationError
from uccaApp.models import Tabs, Constants, Roles
from django.db import models
from django.contrib.auth.models import User, Group
class Users(models.Model):
id = models.AutoField(primary_key=True)
user_auth = models.OneToOneField(User,null=False, related_name="base_user", default=1, on_delete=models.CASCADE,unique=True)
# user_group = models.OneToOneField(Group,null=False, related_name="base_user", default=1, on_delete=models.CASCADE,unique=True)
first_name = models.CharField(max_length=100, default='')
last_name = models.CharField(max_length=100, default='')
email = models.EmailField(max_length=100,unique=True)
organization = models.CharField(max_length=Constants.ORGANIZATION_MAX_LENGTH)
affiliation = models.CharField(max_length=Constants.ORGANIZATION_MAX_LENGTH)
role = models.ForeignKey(Roles,max_length=256,db_column="role")
created_by = models.ForeignKey(User,null=True,blank=True, related_name="created_by_user",db_column="created_by")
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(default=datetime.now, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
def __unicode__(self):
return self.first_name
class Meta:
db_table="users"
def set_group(self,user_id,new_role_name):
# remove users permissions
User.objects.get(pk=user_id).groups.clear()
# grant new group to user
Group.objects.get(name=new_role_name).user_set.add(User.objects.get(pk=user_id))
def validate_email_unique(email):
exists = User.objects.filter(email=email)
if exists:
raise ValidationError("Email address %s already exits, must be unique" % email)
|
gpl-3.0
|
jammerful/buildbot
|
master/buildbot/statistics/__init__.py
|
17
|
1857
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.statistics.capture import CaptureBuildDuration
from buildbot.statistics.capture import CaptureBuildDurationAllBuilders
from buildbot.statistics.capture import CaptureBuildEndTime
from buildbot.statistics.capture import CaptureBuildEndTimeAllBuilders
from buildbot.statistics.capture import CaptureBuildStartTime
from buildbot.statistics.capture import CaptureBuildStartTimeAllBuilders
from buildbot.statistics.capture import CaptureData
from buildbot.statistics.capture import CaptureDataAllBuilders
from buildbot.statistics.capture import CaptureProperty
from buildbot.statistics.capture import CapturePropertyAllBuilders
from buildbot.statistics.stats_service import StatsService
from buildbot.statistics.storage_backends.influxdb_client import InfluxStorageService
__all__ = [
'CaptureBuildDuration',
'CaptureBuildDurationAllBuilders',
'CaptureBuildEndTime',
'CaptureBuildEndTimeAllBuilders',
'CaptureBuildStartTime',
'CaptureBuildStartTimeAllBuilders',
'CaptureData',
'CaptureDataAllBuilders',
'CaptureProperty',
'CapturePropertyAllBuilders',
'InfluxStorageService',
'StatsService'
]
|
gpl-2.0
|
wweiradio/django
|
django/contrib/admin/models.py
|
90
|
3131
|
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.admin.utils import quote
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.translation import ugettext, ugettext_lazy as _
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
use_in_migrations = True
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
e = self.model(
None, None, user_id, content_type_id, smart_text(object_id),
object_repr[:200], action_flag, change_message
)
e.save()
@python_2_unicode_compatible
class LogEntry(models.Model):
action_time = models.DateTimeField(_('action time'), auto_now=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
models.CASCADE,
verbose_name=_('user'),
)
content_type = models.ForeignKey(
ContentType,
models.SET_NULL,
verbose_name=_('content type'),
blank=True, null=True,
)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_text(self.action_time)
def __str__(self):
if self.is_addition():
return ugettext('Added "%(object)s".') % {'object': self.object_repr}
elif self.is_change():
return ugettext('Changed "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.change_message,
}
elif self.is_deletion():
return ugettext('Deleted "%(object)s."') % {'object': self.object_repr}
return ugettext('LogEntry Object')
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
"""
if self.content_type and self.object_id:
url_name = 'admin:%s_%s_change' % (self.content_type.app_label, self.content_type.model)
try:
return reverse(url_name, args=(quote(self.object_id),))
except NoReverseMatch:
pass
return None
|
bsd-3-clause
|
skosukhin/spack
|
lib/spack/spack/cmd/compiler.py
|
1
|
7883
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from __future__ import print_function
import argparse
import sys
from six import iteritems
import llnl.util.tty as tty
import spack.compilers
import spack.config
import spack.spec
from llnl.util.lang import index_by
from llnl.util.tty.colify import colify
from llnl.util.tty.color import colorize
from spack.spec import CompilerSpec, ArchSpec
description = "manage compilers"
section = "system"
level = "long"
def setup_parser(subparser):
sp = subparser.add_subparsers(
metavar='SUBCOMMAND', dest='compiler_command')
scopes = spack.config.config_scopes
# Find
find_parser = sp.add_parser(
'find', aliases=['add'],
help='search the system for compilers to add to Spack configuration')
find_parser.add_argument('add_paths', nargs=argparse.REMAINDER)
find_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_modify_scope,
help="configuration scope to modify")
# Remove
remove_parser = sp.add_parser(
'remove', aliases=['rm'], help='remove compiler by spec')
remove_parser.add_argument(
'-a', '--all', action='store_true',
help='remove ALL compilers that match spec')
remove_parser.add_argument('compiler_spec')
remove_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_modify_scope,
help="configuration scope to modify")
# List
list_parser = sp.add_parser('list', help='list available compilers')
list_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_list_scope,
help="configuration scope to read from")
# Info
info_parser = sp.add_parser('info', help='show compiler paths')
info_parser.add_argument('compiler_spec')
info_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_list_scope,
help="configuration scope to read from")
def compiler_find(args):
"""Search either $PATH or a list of paths OR MODULES for compilers and
add them to Spack's configuration.
"""
paths = args.add_paths
# Don't initialize compilers config via compilers.get_compiler_config.
# Just let compiler_find do the
# entire process and return an empty config from all_compilers
# Default for any other process is init_config=True
compilers = [c for c in spack.compilers.find_compilers(*paths)]
new_compilers = []
for c in compilers:
arch_spec = ArchSpec(None, c.operating_system, c.target)
same_specs = spack.compilers.compilers_for_spec(
c.spec, arch_spec, init_config=False)
if not same_specs:
new_compilers.append(c)
if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers,
scope=args.scope,
init_config=False)
n = len(new_compilers)
s = 's' if n > 1 else ''
filename = spack.config.get_config_filename(args.scope, 'compilers')
tty.msg("Added %d new compiler%s to %s" % (n, s, filename))
colify(reversed(sorted(c.spec for c in new_compilers)), indent=4)
else:
tty.msg("Found no new compilers")
tty.msg("Compilers are defined in the following files:")
colify(spack.compilers.compiler_config_files(), indent=4)
def compiler_remove(args):
cspec = CompilerSpec(args.compiler_spec)
compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
if not compilers:
tty.die("No compilers match spec %s" % cspec)
elif not args.all and len(compilers) > 1:
tty.error("Multiple compilers match spec %s. Choose one:" % cspec)
colify(reversed(sorted([c.spec for c in compilers])), indent=4)
tty.msg("Or, use `spack compiler remove -a` to remove all of them.")
sys.exit(1)
for compiler in compilers:
spack.compilers.remove_compiler_from_config(
compiler.spec, scope=args.scope)
tty.msg("Removed compiler %s" % compiler.spec)
def compiler_info(args):
"""Print info about all compilers matching a spec."""
cspec = CompilerSpec(args.compiler_spec)
compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
if not compilers:
tty.error("No compilers match spec %s" % cspec)
else:
for c in compilers:
print(str(c.spec) + ":")
print("\ttarget: " + c.target)
print("\toperating_system: " + c.operating_system)
print("\tpaths:")
for cpath in ['cc', 'cxx', 'f77', 'fc']:
print("\t\t%s: %s" % (cpath, getattr(c, cpath, None)))
if any(c.flags):
print("\tflags:")
for flag, flag_value in iteritems(c.flags):
print("\t\t%s: %s" % (flag, flag_value))
else:
print("\tflags: " + str(type(c.flags)()))
if any(c.environment):
print("\tenvironment:")
for command in c.environment:
print("\t\t%s" % command)
else:
print("\tenvironment: " + str(type(c.environment)()))
if any(c.extra_rpaths):
print("\tExtra RPATHs:")
for extra_rpath in c.extra_rpaths:
print("\t\t" + extra_rpath)
else:
print("\tExtra RPATHs: " + str(type(c.extra_rpaths)()))
if any(c.modules):
print("\tmodules:")
for module in c.modules:
print("\t\t" + module)
else:
print("\tmodules: " + str(type(c.modules)()))
def compiler_list(args):
tty.msg("Available compilers")
index = index_by(spack.compilers.all_compilers(scope=args.scope),
lambda c: (c.spec.name, c.operating_system, c.target))
ordered_sections = sorted(index.items(), key=lambda item: item[0])
for i, (key, compilers) in enumerate(ordered_sections):
if i >= 1:
print()
name, os, target = key
os_str = os
if target:
os_str += "-%s" % target
cname = "%s{%s} %s" % (spack.spec.compiler_color, name, os_str)
tty.hline(colorize(cname), char='-')
colify(reversed(sorted(c.spec for c in compilers)))
def compiler(parser, args):
action = {'add': compiler_find,
'find': compiler_find,
'remove': compiler_remove,
'rm': compiler_remove,
'info': compiler_info,
'list': compiler_list}
action[args.compiler_command](args)
|
lgpl-2.1
|
malefice/django-websocket-redis
|
ws4redis/settings.py
|
6
|
2037
|
# -*- coding: utf-8 -*-
from django.conf import settings
WEBSOCKET_URL = getattr(settings, 'WEBSOCKET_URL', '/ws/')
WS4REDIS_CONNECTION = getattr(settings, 'WS4REDIS_CONNECTION', {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': None,
})
"""
A string to prefix elements in the Redis datastore, to avoid naming conflicts with other services.
"""
WS4REDIS_PREFIX = getattr(settings, 'WS4REDIS_PREFIX', None)
"""
The time in seconds, items shall be persisted by the Redis datastore.
"""
WS4REDIS_EXPIRE = getattr(settings, 'WS4REDIS_EXPIRE', 3600)
"""
Replace the subscriber class by a customized version.
"""
WS4REDIS_SUBSCRIBER = getattr(settings, 'WS4REDIS_SUBSCRIBER', 'ws4redis.subscriber.RedisSubscriber')
"""
This set the magic string to recognize heartbeat messages. If set, this message string is ignored
by the server and also shall be ignored on the client.
If WS4REDIS_HEARTBEAT is not None, the server sends at least every 4 seconds a heartbeat message.
It is then up to the client to decide, what to do with these messages.
"""
WS4REDIS_HEARTBEAT = getattr(settings, 'WS4REDIS_HEARTBEAT', None)
"""
If set, this callback function is called right after the initialization of the Websocket.
This function can be used to restrict the subscription/publishing channels for the current client.
As its first parameter, it takes the current ``request`` object.
The second parameter is a list of desired subscription channels.
This callback function shall return a list of allowed channels or throw a ``PermissionDenied``
exception.
Remember that this function is not allowed to perform any blocking requests, such as accessing the
database!
"""
WS4REDIS_ALLOWED_CHANNELS = getattr(settings, 'WS4REDIS_ALLOWED_CHANNELS', None)
"""
If set, this callback function is called instead of the default process_request function in WebsocketWSGIServer.
This function can be used to enforce custom authentication flow. i.e. JWT
"""
WS4REDIS_PROCESS_REQUEST = getattr(settings, 'WS4REDIS_PROCESS_REQUEST', None)
|
mit
|
jasco/WeasyPrint
|
weasyprint/tests/test_layout.py
|
4
|
177892
|
# coding: utf8
"""
weasyprint.tests.layout
-----------------------
Tests for layout, ie. positioning and dimensioning of boxes,
line breaks, page breaks.
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
import math
import pytest
from .testing_utils import FONTS, assert_no_logs, capture_logs, almost_equal
from ..formatting_structure import boxes
from .test_boxes import render_pages as parse
from .test_draw import requires_cairo, assert_pixels
def body_children(page):
"""Take a ``page`` and return its <body>’s children."""
html, = page.children
assert html.element_tag == 'html'
body, = html.children
assert body.element_tag == 'body'
return body.children
def outer_area(box):
"""Return the (x, y, w, h) rectangle for the outer area of a box."""
return (box.position_x, box.position_y,
box.margin_width(), box.margin_height())
@assert_no_logs
def test_page_size():
"""Test the layout for ``@page`` properties."""
pages = parse('<p>')
page = pages[0]
assert isinstance(page, boxes.PageBox)
assert int(page.margin_width()) == 793 # A4: 210 mm in pixels
assert int(page.margin_height()) == 1122 # A4: 297 mm in pixels
page, = parse('<style>@page { size: 2in 10in; }</style>')
assert page.margin_width() == 192
assert page.margin_height() == 960
page, = parse('<style>@page { size: 242px; }</style>')
assert page.margin_width() == 242
assert page.margin_height() == 242
page, = parse('<style>@page { size: letter; }</style>')
assert page.margin_width() == 816 # 8.5in
assert page.margin_height() == 1056 # 11in
page, = parse('<style>@page { size: letter portrait; }</style>')
assert page.margin_width() == 816 # 8.5in
assert page.margin_height() == 1056 # 11in
page, = parse('<style>@page { size: letter landscape; }</style>')
assert page.margin_width() == 1056 # 11in
assert page.margin_height() == 816 # 8.5in
page, = parse('<style>@page { size: portrait; }</style>')
assert int(page.margin_width()) == 793 # A4: 210 mm
assert int(page.margin_height()) == 1122 # A4: 297 mm
page, = parse('<style>@page { size: landscape; }</style>')
assert int(page.margin_width()) == 1122 # A4: 297 mm
assert int(page.margin_height()) == 793 # A4: 210 mm
page, = parse('''
<style>@page { size: 200px 300px; margin: 10px 10% 20% 1in }
body { margin: 8px }
</style>
<p style="margin: 0">
''')
assert page.margin_width() == 200
assert page.margin_height() == 300
assert page.position_x == 0
assert page.position_y == 0
assert page.width == 84 # 200px - 10% - 1 inch
assert page.height == 230 # 300px - 10px - 20%
html, = page.children
assert html.element_tag == 'html'
assert html.position_x == 96 # 1in
assert html.position_y == 10 # root element’s margins do not collapse
assert html.width == 84
body, = html.children
assert body.element_tag == 'body'
assert body.position_x == 96 # 1in
assert body.position_y == 10
# body has margins in the UA stylesheet
assert body.margin_left == 8
assert body.margin_right == 8
assert body.margin_top == 8
assert body.margin_bottom == 8
assert body.width == 68
paragraph, = body.children
assert paragraph.element_tag == 'p'
assert paragraph.position_x == 104 # 1in + 8px
assert paragraph.position_y == 18 # 10px + 8px
assert paragraph.width == 68
page, = parse('''
<style>
@page { size: 100px; margin: 1px 2px; padding: 4px 8px;
border-width: 16px 32px; border-style: solid }
</style>
<body>
''')
assert page.width == 16 # 100 - 2 * 42
assert page.height == 58 # 100 - 2 * 21
html, = page.children
assert html.element_tag == 'html'
assert html.position_x == 42 # 2 + 8 + 32
assert html.position_y == 21 # 1 + 4 + 16
page, = parse('''<style>@page {
size: 106px 206px; width: 80px; height: 170px;
padding: 1px; border: 2px solid; margin: auto;
}</style>''')
assert page.margin_top == 15 # (206 - 2*1 - 2*2 - 170) / 2
assert page.margin_right == 10 # (106 - 2*1 - 2*2 - 80) / 2
assert page.margin_bottom == 15 # (206 - 2*1 - 2*2 - 170) / 2
assert page.margin_left == 10 # (106 - 2*1 - 2*2 - 80) / 2
page, = parse('''<style>@page {
size: 106px 206px; width: 80px; height: 170px;
padding: 1px; border: 2px solid; margin: 5px 5px auto auto;
}</style>''')
assert page.margin_top == 5
assert page.margin_right == 5
assert page.margin_bottom == 25 # 206 - 2*1 - 2*2 - 170 - 5
assert page.margin_left == 15 # 106 - 2*1 - 2*2 - 80 - 5
# Over-constrained: the containing block is resized
page, = parse('''<style>@page {
size: 4px 10000px; width: 100px; height: 100px;
padding: 1px; border: 2px solid; margin: 3px;
}</style>''')
assert page.margin_width() == 112 # 100 + 2*1 + 2*2 + 2*3
assert page.margin_height() == 112
page, = parse('''<style>@page {
size: 1000px; margin: 100px;
max-width: 500px; min-height: 1500px;
}</style>''')
assert page.margin_width() == 700
assert page.margin_height() == 1700
page, = parse('''<style>@page {
size: 1000px; margin: 100px;
min-width: 1500px; max-height: 500px;
}</style>''')
assert page.margin_width() == 1700
assert page.margin_height() == 700
@assert_no_logs
def test_block_widths():
"""Test the blocks widths."""
page, = parse('''
<style>
@page { margin: 0; size: 120px 2000px }
body { margin: 0 }
div { margin: 10px }
p { padding: 2px; border-width: 1px; border-style: solid }
</style>
<div>
<p></p>
<p style="width: 50px"></p>
</div>
<div style="direction: rtl">
<p style="width: 50px; direction: rtl"></p>
</div>
<div>
<p style="margin: 0 10px 0 20px"></p>
<p style="width: 50px; margin-left: 20px; margin-right: auto"></p>
<p style="width: 50px; margin-left: auto; margin-right: 20px"></p>
<p style="width: 50px; margin: auto"></p>
<p style="margin-left: 20px; margin-right: auto"></p>
<p style="margin-left: auto; margin-right: 20px"></p>
<p style="margin: auto"></p>
<p style="width: 200px; margin: auto"></p>
<p style="min-width: 200px; margin: auto"></p>
<p style="max-width: 50px; margin: auto"></p>
<p style="min-width: 50px; margin: auto"></p>
<p style="width: 70%"></p>
</div>
''')
html, = page.children
assert html.element_tag == 'html'
body, = html.children
assert body.element_tag == 'body'
assert body.width == 120
divs = body.children
paragraphs = []
for div in divs:
assert isinstance(div, boxes.BlockBox)
assert div.element_tag == 'div'
assert div.width == 100
for paragraph in div.children:
assert isinstance(paragraph, boxes.BlockBox)
assert paragraph.element_tag == 'p'
assert paragraph.padding_left == 2
assert paragraph.padding_right == 2
assert paragraph.border_left_width == 1
assert paragraph.border_right_width == 1
paragraphs.append(paragraph)
assert len(paragraphs) == 15
# width is 'auto'
assert paragraphs[0].width == 94
assert paragraphs[0].margin_left == 0
assert paragraphs[0].margin_right == 0
# No 'auto', over-constrained equation with ltr, the initial
# 'margin-right: 0' was ignored.
assert paragraphs[1].width == 50
assert paragraphs[1].margin_left == 0
# No 'auto', over-constrained equation with rtl, the initial
# 'margin-left: 0' was ignored.
assert paragraphs[2].width == 50
assert paragraphs[2].margin_right == 0
# width is 'auto'
assert paragraphs[3].width == 64
assert paragraphs[3].margin_left == 20
# margin-right is 'auto'
assert paragraphs[4].width == 50
assert paragraphs[4].margin_left == 20
# margin-left is 'auto'
assert paragraphs[5].width == 50
assert paragraphs[5].margin_left == 24
# Both margins are 'auto', remaining space is split in half
assert paragraphs[6].width == 50
assert paragraphs[6].margin_left == 22
# width is 'auto', other 'auto' are set to 0
assert paragraphs[7].width == 74
assert paragraphs[7].margin_left == 20
# width is 'auto', other 'auto' are set to 0
assert paragraphs[8].width == 74
assert paragraphs[8].margin_left == 0
# width is 'auto', other 'auto' are set to 0
assert paragraphs[9].width == 94
assert paragraphs[9].margin_left == 0
# sum of non-auto initially is too wide, set auto values to 0
assert paragraphs[10].width == 200
assert paragraphs[10].margin_left == 0
# Constrained by min-width, same as above
assert paragraphs[11].width == 200
assert paragraphs[11].margin_left == 0
# Constrained by max-width, same as paragraphs[6]
assert paragraphs[12].width == 50
assert paragraphs[12].margin_left == 22
# NOT constrained by min-width
assert paragraphs[13].width == 94
assert paragraphs[13].margin_left == 0
# 70%
assert paragraphs[14].width == 70
assert paragraphs[14].margin_left == 0
@assert_no_logs
def test_block_heights():
"""Test the blocks heights."""
page, = parse('''
<style>
@page { margin: 0; size: 100px 20000px }
html, body { margin: 0 }
div { margin: 4px; border-width: 2px; border-style: solid;
padding: 4px }
/* Only use top margins so that margin collapsing does not change
the result: */
p { margin: 16px 0 0; border-width: 4px; border-style: solid;
padding: 8px; height: 50px }
</style>
<div>
<p></p>
<!-- These two are not in normal flow: the do not contribute to
the parent’s height. -->
<p style="position: absolute"></p>
<p style="float: left"></p>
</div>
<div>
<p></p>
<p></p>
<p></p>
</div>
<div style="height: 20px">
<p></p>
</div>
<div style="height: 120px">
<p></p>
</div>
<div style="max-height: 20px">
<p></p>
</div>
<div style="min-height: 120px">
<p></p>
</div>
<div style="min-height: 20px">
<p></p>
</div>
<div style="max-height: 120px">
<p></p>
</div>
''')
heights = [div.height for div in body_children(page)]
assert heights == [90, 90 * 3, 20, 120, 20, 120, 90, 90]
page, = parse('''
<style>
body { height: 200px; font-size: 0; }
</style>
<div>
<img src=pattern.png style="height: 40px">
</div>
<div style="height: 10%">
<img src=pattern.png style="height: 40px">
</div>
<div style="max-height: 20px">
<img src=pattern.png style="height: 40px">
</div>
<div style="max-height: 10%">
<img src=pattern.png style="height: 40px">
</div>
<div style="min-height: 20px"></div>
<div style="min-height: 10%"></div>
''')
heights = [div.height for div in body_children(page)]
assert heights == [40, 20, 20, 20, 20, 20]
# Same but with no height on body: percentage *-height is ignored
page, = parse('''
<style>
body { font-size: 0; }
</style>
<div>
<img src=pattern.png style="height: 40px">
</div>
<div style="height: 10%">
<img src=pattern.png style="height: 40px">
</div>
<div style="max-height: 20px">
<img src=pattern.png style="height: 40px">
</div>
<div style="max-height: 10%">
<img src=pattern.png style="height: 40px">
</div>
<div style="min-height: 20px"></div>
<div style="min-height: 10%"></div>
''')
heights = [div.height for div in body_children(page)]
assert heights == [40, 40, 20, 40, 20, 0]
@assert_no_logs
def test_block_percentage_heights():
"""Test the blocks heights set in percents."""
page, = parse('''
<style>
html, body { margin: 0 }
body { height: 50% }
</style>
<body>
''')
html, = page.children
assert html.element_tag == 'html'
body, = html.children
assert body.element_tag == 'body'
# Since html’s height depend on body’s, body’s 50% means 'auto'
assert body.height == 0
page, = parse('''
<style>
html, body { margin: 0 }
html { height: 300px }
body { height: 50% }
</style>
<body>
''')
html, = page.children
assert html.element_tag == 'html'
body, = html.children
assert body.element_tag == 'body'
# This time the percentage makes sense
assert body.height == 150
@assert_no_logs
def test_inline_block_sizes():
"""Test the inline-block elements sizes."""
page, = parse('''
<style>
@page { margin: 0; size: 200px 2000px }
body { margin: 0 }
div { display: inline-block; }
</style>
<div> </div>
<div>a</div>
<div style="margin: 10px; height: 100px"></div>
<div style="margin-left: 10px; margin-top: -50px;
padding-right: 20px;"></div>
<div>
Ipsum dolor sit amet,
consectetur adipiscing elit.
Sed sollicitudin nibh
et turpis molestie tristique.
</div>
<div style="width: 100px; height: 100px;
padding-left: 10px; margin-right: 10px;
margin-top: -10px; margin-bottom: 50px"></div>
<div style="font-size: 0">
<div style="min-width: 10px; height: 10px"></div>
<div style="width: 10%">
<div style="width: 10px; height: 10px"></div>
</div>
</div>
<div style="min-width: 185px">foo</div>
<div style="max-width: 10px
">Supercalifragilisticexpialidocious</div>''')
html, = page.children
assert html.element_tag == 'html'
body, = html.children
assert body.element_tag == 'body'
assert body.width == 200
line_1, line_2, line_3, line_4 = body.children
# First line:
# White space in-between divs ends up preserved in TextBoxes
div_1, _, div_2, _, div_3, _, div_4, _ = line_1.children
# First div, one ignored space collapsing with next space
assert div_1.element_tag == 'div'
assert div_1.width == 0
# Second div, one letter
assert div_2.element_tag == 'div'
assert 0 < div_2.width < 20
# Third div, empty with margin
assert div_3.element_tag == 'div'
assert div_3.width == 0
assert div_3.margin_width() == 20
assert div_3.height == 100
# Fourth div, empty with margin and padding
assert div_4.element_tag == 'div'
assert div_4.width == 0
assert div_4.margin_width() == 30
# Second line:
div_5, = line_2.children
# Fifth div, long text, full-width div
assert div_5.element_tag == 'div'
assert len(div_5.children) > 1
assert div_5.width == 200
# Third line:
div_6, _, div_7, _ = line_3.children
# Sixth div, empty div with fixed width and height
assert div_6.element_tag == 'div'
assert div_6.width == 100
assert div_6.margin_width() == 120
assert div_6.height == 100
assert div_6.margin_height() == 140
# Seventh div
assert div_7.element_tag == 'div'
assert div_7.width == 20
child_line, = div_7.children
# Spaces have font-size: 0, they get removed
child_div_1, child_div_2 = child_line.children
assert child_div_1.element_tag == 'div'
assert child_div_1.width == 10
assert child_div_2.element_tag == 'div'
assert child_div_2.width == 2
grandchild, = child_div_2.children
assert grandchild.element_tag == 'div'
assert grandchild.width == 10
div_8, _, div_9 = line_4.children
assert div_8.width == 185
assert div_9.width == 10
# Previously, the hinting for in shrink-to-fit did not match that
# of the layout, which often resulted in a line break just before
# the last word.
page, = parse('''
<p style="display: inline-block">Lorem ipsum dolor sit amet …</p>''')
html, = page.children
body, = html.children
outer_line, = body.children
paragraph, = outer_line.children
inner_lines = paragraph.children
assert len(inner_lines) == 1
text_box, = inner_lines[0].children
assert text_box.text == 'Lorem ipsum dolor sit amet …'
@assert_no_logs
def test_inline_table():
"""Test the inline-table elements sizes."""
page, = parse('''
<table style="display: inline-table; border-spacing: 10px;
margin: 5px">
<tr>
<td><img src=pattern.png style="width: 20px"></td>
<td><img src=pattern.png style="width: 30px"></td>
</tr>
</table>
foo
''')
html, = page.children
body, = html.children
line, = body.children
table_wrapper, text = line.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert table_wrapper.position_x == 0
assert table.position_x == 5 # 0 + margin-left
assert td_1.position_x == 15 # 0 + border-spacing
assert td_1.width == 20
assert td_2.position_x == 45 # 15 + 20 + border-spacing
assert td_2.width == 30
assert table.width == 80 # 20 + 30 + 3 * border-spacing
assert table_wrapper.margin_width() == 90 # 80 + 2 * margin
assert text.position_x == 90
@assert_no_logs
def test_implicit_width_table():
"""Test table with implicit width."""
# See https://github.com/Kozea/WeasyPrint/issues/169
page, = parse('''
<table>
<col style="width:25%"></col>
<col></col>
<tr>
<td></td>
<td></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
page, = parse('''
<table>
<tr>
<td style="width:25%"></td>
<td></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
@assert_no_logs
def test_fixed_layout_table():
"""Test the fixed layout table elements sizes."""
page, = parse('''
<table style="table-layout: fixed; border-spacing: 10px;
margin: 5px">
<colgroup>
<col style="width: 20px" />
</colgroup>
<tr>
<td></td>
<td style="width: 40px">a</td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert table_wrapper.position_x == 0
assert table.position_x == 5 # 0 + margin-left
assert td_1.position_x == 15 # 5 + border-spacing
assert td_1.width == 20
assert td_2.position_x == 45 # 15 + 20 + border-spacing
assert td_2.width == 40
assert table.width == 90 # 20 + 40 + 3 * border-spacing
page, = parse('''
<table style="table-layout: fixed; border-spacing: 10px;
width: 200px; margin: 5px">
<tr>
<td style="width: 20px">a</td>
<td style="width: 40px"></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert table_wrapper.position_x == 0
assert table.position_x == 5 # 0 + margin-left
assert td_1.position_x == 15 # 5 + border-spacing
assert td_1.width == 75 # 20 + ((200 - 20 - 40 - 3 * border-spacing) / 2)
assert td_2.position_x == 100 # 15 + 75 + border-spacing
assert td_2.width == 95 # 40 + ((200 - 20 - 40 - 3 * border-spacing) / 2)
assert table.width == 200
page, = parse('''
<table style="table-layout: fixed; border-spacing: 10px;
width: 110px; margin: 5px">
<tr>
<td style="width: 40px">a</td>
<td>b</td>
</tr>
<tr>
<td style="width: 50px">a</td>
<td style="width: 30px">b</td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row_1, row_2 = row_group.children
td_1, td_2 = row_1.children
td_3, td_4 = row_2.children
assert table_wrapper.position_x == 0
assert table.position_x == 5 # 0 + margin-left
assert td_1.position_x == 15 # 0 + border-spacing
assert td_3.position_x == 15
assert td_1.width == 40
assert td_2.width == 40
assert td_2.position_x == 65 # 15 + 40 + border-spacing
assert td_4.position_x == 65
assert td_3.width == 40
assert td_4.width == 40
assert table.width == 110 # 20 + 40 + 3 * border-spacing
page, = parse('''
<table style="table-layout: fixed; border-spacing: 0;
width: 100px; margin: 10px">
<colgroup>
<col />
<col style="width: 20px" />
</colgroup>
<tr>
<td></td>
<td style="width: 40px">a</td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert table_wrapper.position_x == 0
assert table.position_x == 10 # 0 + margin-left
assert td_1.position_x == 10
assert td_1.width == 80 # 100 - 20
assert td_2.position_x == 90 # 10 + 80
assert td_2.width == 20
assert table.width == 100
# With border-collapse
page, = parse('''
<style>
/* Do not apply: */
colgroup, col, tbody, tr, td { margin: 1000px }
</style>
<table style="table-layout: fixed;
border-collapse: collapse; border: 10px solid;
/* ignored with collapsed borders: */
border-spacing: 10000px; padding: 1000px">
<colgroup>
<col style="width: 30px" />
</colgroup>
<tbody>
<tr>
<td style="padding: 2px"></td>
<td style="width: 34px; padding: 10px; border: 2px solid"></td>
</tr>
</tbody>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert table_wrapper.position_x == 0
assert table.position_x == 0
assert table.border_left_width == 5 # half of the collapsed 10px border
assert td_1.position_x == 5 # border-spacing is ignored
assert td_1.margin_width() == 30 # as <col>
assert td_1.width == 20 # 30 - 5 (border-left) - 1 (border-right) - 2*2
assert td_2.position_x == 35
assert td_2.width == 34
assert td_2.margin_width() == 60 # 34 + 2*10 + 5 + 1
assert table.width == 90 # 30 + 60
assert table.margin_width() == 100 # 90 + 2*5 (border)
@assert_no_logs
def test_auto_layout_table():
"""Test the auto layout table elements sizes."""
page, = parse('''
<body style="width: 100px">
<table style="border-spacing: 10px; margin: auto">
<tr>
<td><img src=pattern.png></td>
<td><img src=pattern.png></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert table_wrapper.position_x == 0
assert table_wrapper.width == 38 # Same as table, see below
assert table_wrapper.margin_left == 31 # 0 + margin-left = (100 - 38) / 2
assert table_wrapper.margin_right == 31
assert table.position_x == 31
assert td_1.position_x == 41 # 31 + spacing
assert td_1.width == 4
assert td_2.position_x == 55 # 31 + 4 + spacing
assert td_2.width == 4
assert table.width == 38 # 3 * spacing + 2 * 4
page, = parse('''
<body style="width: 50px">
<table style="border-spacing: 1px; margin: 10%">
<tr>
<td style="border: 3px solid black"><img src=pattern.png></td>
<td style="border: 3px solid black">
<img src=pattern.png><img src=pattern.png>
</td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert table_wrapper.position_x == 0
assert table.position_x == 5 # 0 + margin-left
assert td_1.position_x == 6 # 5 + border-spacing
assert td_1.width == 4
assert td_2.position_x == 17 # 6 + 4 + spacing + 2 * border
assert td_2.width == 8
assert table.width == 27 # 3 * spacing + 4 + 8 + 4 * border
page, = parse('''
<table style="border-spacing: 1px; margin: 5px; font-size: 0">
<tr>
<td></td>
<td><img src=pattern.png><img src=pattern.png></td>
</tr>
<tr>
<td>
<img src=pattern.png>
<img src=pattern.png>
<img src=pattern.png>
</td>
<td><img src=pattern.png></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row1, row2 = row_group.children
td_11, td_12 = row1.children
td_21, td_22 = row2.children
assert table_wrapper.position_x == 0
assert table.position_x == 5 # 0 + margin-left
assert td_11.position_x == td_21.position_x == 6 # 5 + spacing
assert td_11.width == td_21.width == 12
assert td_12.position_x == td_22.position_x == 19 # 6 + 12 + spacing
assert td_12.width == td_22.width == 8
assert table.width == 23 # 3 * spacing + 12 + 8
page, = parse('''
<table style="border-spacing: 1px; margin: 5px">
<tr>
<td style="border: 1px solid black"><img src=pattern.png></td>
<td style="border: 2px solid black; padding: 1px">
<img src=pattern.png>
</td>
</tr>
<tr>
<td style="border: 5px solid black"><img src=pattern.png></td>
<td><img src=pattern.png></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row1, row2 = row_group.children
td_11, td_12 = row1.children
td_21, td_22 = row2.children
assert table_wrapper.position_x == 0
assert table.position_x == 5 # 0 + margin-left
assert td_11.position_x == td_21.position_x == 6 # 5 + spacing
assert td_11.width == 12 # 4 + 2 * 5 - 2 * 1
assert td_21.width == 4
assert td_12.position_x == td_22.position_x == 21 # 6 + 4 + 2 * b1 + sp
assert td_12.width == 4
assert td_22.width == 10 # 4 + 2 * 3
assert table.width == 27 # 3 * spacing + 4 + 4 + 2 * b1 + 2 * b2
page, = parse('''
<style>
@page { size: 100px 1000px; }
</style>
<table style="border-spacing: 1px; margin-right: 79px; font-size: 0">
<tr>
<td><img src=pattern.png></td>
<td>
<img src=pattern.png> <img src=pattern.png>
<img src=pattern.png> <img src=pattern.png>
<img src=pattern.png> <img src=pattern.png>
<img src=pattern.png> <img src=pattern.png>
<img src=pattern.png>
</td>
</tr>
<tr>
<td></td>
</tr>
</table>
''')
# Preferred minimum width is 2 * 4 + 3 * 1 = 11
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row1, row2 = row_group.children
td_11, td_12 = row1.children
td_21, = row2.children
assert table_wrapper.position_x == 0
assert table.position_x == 0
assert td_11.position_x == td_21.position_x == 1 # spacing
assert td_11.width == td_21.width == 5 # 4 + (width - pmw) * 1 / 10
assert td_12.position_x == 7 # 1 + 5 + sp
assert td_12.width == 13 # 4 + (width - pmw) * 9 / 10
assert table.width == 21
page, = parse('''
<table style="border-spacing: 10px; margin: 5px">
<colgroup>
<col style="width: 20px" />
</colgroup>
<tr>
<td></td>
<td style="width: 40px">a</td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert table_wrapper.position_x == 0
assert table.position_x == 5 # 0 + margin-left
assert td_1.position_x == 15 # 0 + border-spacing
assert td_1.width == 20
assert td_2.position_x == 45 # 15 + 20 + border-spacing
assert td_2.width == 40
assert table.width == 90 # 20 + 40 + 3 * border-spacing
page, = parse('''
<table style="border-spacing: 10px; width: 120px; margin: 5px;
font-size: 0">
<tr>
<td style="width: 20px"><img src=pattern.png></td>
<td><img src=pattern.png style="width: 40px"></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert table_wrapper.position_x == 0
assert table.position_x == 5 # 0 + margin-left
assert td_1.position_x == 15 # 5 + border-spacing
assert td_1.width == 30 # 20 + ((120 - 20 - 40 - 3 * sp) * 1 / 3)
assert td_2.position_x == 55 # 15 + 30 + border-spacing
assert td_2.width == 60 # 40 + ((120 - 20 - 40 - 3 * sp) * 2 / 3)
assert table.width == 120
page, = parse('''
<table style="border-spacing: 10px; width: 110px; margin: 5px">
<tr>
<td style="width: 60px"></td>
<td></td>
</tr>
<tr>
<td style="width: 50px"></td>
<td style="width: 30px"></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row_1, row_2 = row_group.children
td_1, td_2 = row_1.children
td_3, td_4 = row_2.children
assert table_wrapper.position_x == 0
assert table.position_x == 5 # 0 + margin-left
assert td_1.position_x == 15 # 0 + border-spacing
assert td_3.position_x == 15
assert td_1.width == 60
assert td_2.width == 30
assert td_2.position_x == 85 # 15 + 60 + border-spacing
assert td_4.position_x == 85
assert td_3.width == 60
assert td_4.width == 30
assert table.width == 120 # 60 + 30 + 3 * border-spacing
page, = parse('''
<table style="border-spacing: 0; width: 14px; margin: 10px">
<colgroup>
<col />
<col style="width: 6px" />
</colgroup>
<tr>
<td><img src=pattern.png><img src=pattern.png></td>
<td style="width: 8px"></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert table_wrapper.position_x == 0
assert table.position_x == 10 # 0 + margin-left
assert td_1.position_x == 10
assert td_1.width == 5 # 4 + ((14 - 4 - 8) * 8 / 16)
assert td_2.position_x == 15 # 10 + 5
assert td_2.width == 9 # 8 + ((14 - 4 - 8) * 8 / 16)
assert table.width == 14
page, = parse('''
<table style="border-spacing: 0">
<tr>
<td style="width: 10px"></td>
<td colspan="3"></td>
</tr>
<tr>
<td colspan="2" style="width: 22px"></td>
<td style="width: 8px"></td>
<td style="width: 8px"></td>
</tr>
<tr>
<td></td>
<td></td>
<td colspan="2"></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row1, row2, row3 = row_group.children
td_11, td_12 = row1.children
td_21, td_22, td_23 = row2.children
td_31, td_32, td_33 = row3.children
assert table_wrapper.position_x == 0
assert table.position_x == 0
assert td_11.width == 16 # 10 + (22 - 10) / 2
assert td_12.width == 22 # (0 + (22 - 10) / 2) + 8 + 8
assert td_21.width == 22
assert td_22.width == 8
assert td_23.width == 8
assert td_31.width == 16
assert td_32.width == 6
assert td_33.width == 16
assert table.width == 38
page, = parse('''
<table style="border-spacing: 10px">
<tr>
<td style="width: 10px"></td>
<td colspan="3"></td>
</tr>
<tr>
<td colspan="2" style="width: 32px"></td>
<td style="width: 8px"></td>
<td style="width: 8px"></td>
</tr>
<tr>
<td></td>
<td></td>
<td colspan="2"></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row1, row2, row3 = row_group.children
td_11, td_12 = row1.children
td_21, td_22, td_23 = row2.children
td_31, td_32, td_33 = row3.children
assert table_wrapper.position_x == 0
assert table.position_x == 0
assert td_11.width == 16 # 10 + (22 - 10) / 2
assert td_12.width == 42 # (0 + (22 - 10) / 2) + 8 + 8
assert td_21.width == 32
assert td_22.width == 8
assert td_23.width == 8
assert td_31.width == 16
assert td_32.width == 6
assert td_33.width == 26
assert table.width == 88
# Regression tests: these used to crash
page, = parse('''
<table style="width: 30px">
<tr>
<td colspan=2></td>
<td></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert td_1.width == 20
assert td_2.width == 10
assert table.width == 30
page, = parse('''
<table style="width: 20px">
<col />
<col />
<tr>
<td></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, = row.children
assert td_1.width == 10 # TODO: should this be 20?
assert table.width == 20
page, = parse('''
<table style="width: 20px">
<col />
<col />
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
column_group, = table.column_groups
column_1, column_2 = column_group.children
assert column_1.width == 10
assert column_2.width == 10
# Absolute table
page, = parse('''
<table style="width: 30px; position: absolute">
<tr>
<td colspan=2></td>
<td></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert td_1.width == 20
assert td_2.width == 10
assert table.width == 30
# With border-collapse
page, = parse('''
<style>
/* Do not apply: */
colgroup, col, tbody, tr, td { margin: 1000px }
</style>
<table style="border-collapse: collapse; border: 10px solid;
/* ignored with collapsed borders: */
border-spacing: 10000px; padding: 1000px">
<colgroup>
<col style="width: 30px" />
</colgroup>
<tbody>
<tr>
<td style="padding: 2px"></td>
<td style="width: 34px; padding: 10px; border: 2px solid"></td>
</tr>
</tbody>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert table_wrapper.position_x == 0
assert table.position_x == 0
assert table.border_left_width == 5 # half of the collapsed 10px border
assert td_1.position_x == 5 # border-spacing is ignored
assert td_1.margin_width() == 30 # as <col>
assert td_1.width == 20 # 30 - 5 (border-left) - 1 (border-right) - 2*2
assert td_2.position_x == 35
assert td_2.width == 34
assert td_2.margin_width() == 60 # 34 + 2*10 + 5 + 1
assert table.width == 90 # 30 + 60
assert table.margin_width() == 100 # 90 + 2*5 (border)
# Column widths as percentage
page, = parse('''
<table style="width: 200px">
<colgroup>
<col style="width: 70%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr>
<td>a</td>
<td>abc</td>
</tr>
</tbody>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert td_1.width == 140
assert td_2.width == 60
assert table.width == 200
# Column group width
page, = parse('''
<table style="width: 200px">
<colgroup style="width: 100px">
<col />
<col />
</colgroup>
<col style="width: 100px" />
<tbody>
<tr>
<td>a</td>
<td>a</td>
<td>abc</td>
</tr>
</tbody>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2, td_3 = row.children
assert td_1.width == 50
assert td_2.width == 50
assert td_3.width == 100
assert table.width == 200
# Column group width as percentage
page, = parse('''
<table style="width: 200px">
<colgroup style="width: 100px">
<col />
<col />
</colgroup>
<colgroup style="width: 50%">
<col />
<col />
</colgroup>
<tbody>
<tr>
<td>a</td>
<td>a</td>
<td>abc</td>
<td>abc</td>
</tr>
</tbody>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2, td_3, td_4 = row.children
assert td_1.width == 50
assert td_2.width == 50
assert td_3.width == 50
assert td_4.width == 50
assert table.width == 200
# Wrong column group width
page, = parse('''
<table style="width: 200px">
<colgroup style="width: 80%">
<col />
<col />
</colgroup>
<tbody>
<tr>
<td>a</td>
<td>a</td>
</tr>
</tbody>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert td_1.width == 100
assert td_2.width == 100
assert table.width == 200
# Column width as percentage and cell width in pixels
page, = parse('''
<table style="width: 200px">
<colgroup>
<col style="width: 70%" />
<col />
</colgroup>
<tbody>
<tr>
<td>a</td>
<td style="width: 60px">abc</td>
</tr>
</tbody>
</table>
''')
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert td_1.width == 140
assert td_2.width == 60
assert table.width == 200
# Column width and cell width as percentage
page, = parse('''
<div style="width: 400px">
<table style="width: 50%">
<colgroup>
<col style="width: 70%" />
<col />
</colgroup>
<tbody>
<tr>
<td>a</td>
<td style="width: 30%">abc</td>
</tr>
</tbody>
</table>
</div>
''')
html, = page.children
body, = html.children
div, = body.children
table_wrapper, = div.children
table, = table_wrapper.children
row_group, = table.children
row, = row_group.children
td_1, td_2 = row.children
assert td_1.width == 140
assert td_2.width == 60
assert table.width == 200
# Test regression on a crash: https://github.com/Kozea/WeasyPrint/pull/152
page, = parse('''
<table>
<td style="width: 50%">
</table>
''')
@assert_no_logs
def test_lists():
"""Test the lists."""
page, = parse('''
<style>
body { margin: 0 }
ul { margin-left: 50px; list-style: inside circle }
</style>
<ul>
<li>abc</li>
</ul>
''')
unordered_list, = body_children(page)
list_item, = unordered_list.children
line, = list_item.children
marker, content = line.children
assert marker.text == '◦'
assert marker.margin_left == 0
assert marker.margin_right == 8
assert content.text == 'abc'
page, = parse('''
<style>
body { margin: 0 }
ul { margin-left: 50px; }
</style>
<ul>
<li>abc</li>
</ul>
''')
unordered_list, = body_children(page)
list_item, = unordered_list.children
marker = list_item.outside_list_marker
font_size = marker.style.font_size
assert marker.margin_right == 0.5 * font_size # 0.5em
assert marker.position_x == (
list_item.padding_box_x() - marker.width - marker.margin_right)
assert marker.position_y == list_item.position_y
assert marker.text == '•'
line, = list_item.children
content, = line.children
assert content.text == 'abc'
@assert_no_logs
def test_empty_linebox():
"""Test lineboxes with no content other than space-like characters."""
page, = parse('<p> </p>')
paragraph, = body_children(page)
assert len(paragraph.children) == 0
assert paragraph.height == 0
# Whitespace removed at the beginning of the line => empty line => no line
page, = parse('''
<style>
p { width: 1px }
</style>
<p><br> </p>
''')
paragraph, = body_children(page)
# TODO: The second line should be removed
pytest.xfail()
assert len(paragraph.children) == 1
@assert_no_logs
def test_breaking_linebox():
"""Test lineboxes breaks with a lot of text and deep nesting."""
page, = parse('''
<style>
p { font-size: 13px;
width: 300px;
font-family: %(fonts)s;
background-color: #393939;
color: #FFFFFF;
line-height: 1;
text-decoration: underline overline line-through;}
</style>
<p><em>Lorem<strong> Ipsum <span>is very</span>simply</strong><em>
dummy</em>text of the printing and. naaaa </em> naaaa naaaa naaaa
naaaa naaaa naaaa naaaa naaaa</p>
''' % {'fonts': FONTS})
html, = page.children
body, = html.children
paragraph, = body.children
assert len(list(paragraph.children)) == 3
lines = paragraph.children
for line in lines:
assert line.style.font_size == 13
assert line.element_tag == 'p'
for child in line.children:
assert child.element_tag in ('em', 'p')
assert child.style.font_size == 13
if isinstance(child, boxes.ParentBox):
for child_child in child.children:
assert child.element_tag in ('em', 'strong', 'span')
assert child.style.font_size == 13
# See http://unicode.org/reports/tr14/
page, = parse('<pre>a\nb\rc\r\nd\u2029e</pre>')
html, = page.children
body, = html.children
pre, = body.children
lines = pre.children
texts = []
for line in lines:
text_box, = line.children
texts.append(text_box.text)
assert texts == ['a', 'b', 'c', 'd', 'e']
@assert_no_logs
def test_linebox_text():
"""Test the creation of line boxes."""
page, = parse('''
<style>
p { width: 165px; font-family:%(fonts)s;}
</style>
<p><em>Lorem Ipsum</em>is very <strong>coool</strong></p>
''' % {'fonts': FONTS})
paragraph, = body_children(page)
lines = list(paragraph.children)
assert len(lines) == 2
text = ' '.join(
(''.join(box.text for box in line.descendants()
if isinstance(box, boxes.TextBox)))
for line in lines)
assert text == 'Lorem Ipsumis very coool'
@assert_no_logs
def test_linebox_positions():
"""Test the position of line boxes."""
for width, expected_lines in [(165, 2), (1, 5), (0, 5)]:
page = '''
<style>
p { width:%(width)spx; font-family:%(fonts)s;
line-height: 20px }
</style>
<p>this is test for <strong>Weasyprint</strong></p>'''
page, = parse(page % {'fonts': FONTS, 'width': width})
paragraph, = body_children(page)
lines = list(paragraph.children)
assert len(lines) == expected_lines
ref_position_y = lines[0].position_y
ref_position_x = lines[0].position_x
for line in lines:
assert ref_position_y == line.position_y
assert ref_position_x == line.position_x
for box in line.children:
assert ref_position_x == box.position_x
ref_position_x += box.width
assert ref_position_y == box.position_y
assert ref_position_x - line.position_x <= line.width
ref_position_x = line.position_x
ref_position_y += line.height
@assert_no_logs
def test_forced_line_breaks():
"""Test <pre> and <br>."""
# These lines should be small enough to fit on the default A4 page
# with the default 12pt font-size.
page, = parse('''
<style> pre { line-height: 42px }</style>
<pre>Lorem ipsum dolor sit amet,
consectetur adipiscing elit.
Sed sollicitudin nibh
et turpis molestie tristique.</pre>
''')
pre, = body_children(page)
assert pre.element_tag == 'pre'
lines = pre.children
assert all(isinstance(line, boxes.LineBox) for line in lines)
assert len(lines) == 7
assert [line.height for line in lines] == [42] * 7
page, = parse('''
<style> p { line-height: 42px }</style>
<p>Lorem ipsum dolor sit amet,<br>
consectetur adipiscing elit.<br><br><br>
Sed sollicitudin nibh<br>
<br>
et turpis molestie tristique.</p>
''')
pre, = body_children(page)
assert pre.element_tag == 'p'
lines = pre.children
assert all(isinstance(line, boxes.LineBox) for line in lines)
assert len(lines) == 7
assert [line.height for line in lines] == [42] * 7
@assert_no_logs
def test_page_breaks():
"""Test the page breaks."""
pages = parse('''
<style>
@page { size: 100px; margin: 10px }
body { margin: 0 }
div { height: 30px; font-size: 20px; }
</style>
<div>1</div>
<div>2</div>
<div>3</div>
<div>4</div>
<div>5</div>
''')
page_divs = []
for page in pages:
divs = body_children(page)
assert all([div.element_tag == 'div' for div in divs])
assert all([div.position_x == 10 for div in divs])
page_divs.append(divs)
del divs
positions_y = [[div.position_y for div in divs] for divs in page_divs]
assert positions_y == [[10, 40], [10, 40], [10]]
# Same as above, but no content inside each <div>.
# This used to produce no page break.
pages = parse('''
<style>
@page { size: 100px; margin: 10px }
body { margin: 0 }
div { height: 30px }
</style>
<div></div><div></div><div></div><div></div><div></div>
''')
page_divs = []
for page in pages:
divs = body_children(page)
assert all([div.element_tag == 'div' for div in divs])
assert all([div.position_x == 10 for div in divs])
page_divs.append(divs)
del divs
positions_y = [[div.position_y for div in divs] for divs in page_divs]
assert positions_y == [[10, 40], [10, 40], [10]]
pages = parse('''
<style>
@page { size: 100px; margin: 10px }
img { height: 30px; display: block }
</style>
<body>
<img src=pattern.png>
<img src=pattern.png>
<img src=pattern.png>
<img src=pattern.png>
<img src=pattern.png>
''')
page_images = []
for page in pages:
images = body_children(page)
assert all([img.element_tag == 'img' for img in images])
assert all([img.position_x == 10 for img in images])
page_images.append(images)
del images
positions_y = [[img.position_y for img in images]
for images in page_images]
assert positions_y == [[10, 40], [10, 40], [10]]
page_1, page_2, page_3, page_4 = parse('''
<style>
@page { margin: 10px }
@page :left { margin-left: 50px }
@page :right { margin-right: 50px }
html { page-break-before: left }
div { page-break-after: left }
ul { page-break-before: always }
</style>
<div>1</div>
<p>2</p>
<p>3</p>
<article>
<section>
<ul><li>4</li></ul>
</section>
</article>
''')
# The first page is a right page on rtl, but not here because of
# page-break-before on the root element.
assert page_1.margin_left == 50 # left page
assert page_1.margin_right == 10
html, = page_1.children
body, = html.children
div, = body.children
line, = div.children
text, = line.children
assert div.element_tag == 'div'
assert text.text == '1'
html, = page_2.children
assert page_2.margin_left == 10
assert page_2.margin_right == 50 # right page
assert not html.children # empty page to get to a left page
assert page_3.margin_left == 50 # left page
assert page_3.margin_right == 10
html, = page_3.children
body, = html.children
p_1, p_2 = body.children
assert p_1.element_tag == 'p'
assert p_2.element_tag == 'p'
assert page_4.margin_left == 10
assert page_4.margin_right == 50 # right page
html, = page_4.children
body, = html.children
article, = body.children
section, = article.children
ulist, = section.children
assert ulist.element_tag == 'ul'
# Reference for the following test:
# Without any 'avoid', this breaks after the <div>
page_1, page_2 = parse('''
<style>
@page { size: 140px; margin: 0 }
img { height: 25px; vertical-align: top }
p { orphans: 1; widows: 1 }
</style>
<body>
<img src=pattern.png>
<div>
<p><img src=pattern.png><br/><img src=pattern.png><p>
<p><img src=pattern.png><br/><img src=pattern.png><p>
</div><!-- page break here -->
<img src=pattern.png>
''')
html, = page_1.children
body, = html.children
img_1, div = body.children
assert img_1.position_y == 0
assert img_1.height == 25
assert div.position_y == 25
assert div.height == 100
html, = page_2.children
body, = html.children
img_2, = body.children
assert img_2.position_y == 0
assert img_2.height == 25
# Adding a few page-break-*: avoid, the only legal break is
# before the <div>
page_1, page_2 = parse('''
<style>
@page { size: 140px; margin: 0 }
img { height: 25px; vertical-align: top }
p { orphans: 1; widows: 1 }
</style>
<body>
<img src=pattern.png><!-- page break here -->
<div>
<p style="page-break-inside: avoid">
><img src=pattern.png><br/><img src=pattern.png></p>
<p style="page-break-before: avoid; page-break-after: avoid;
widows: 2"
><img src=pattern.png><br/><img src=pattern.png></p>
</div>
<img src=pattern.png>
''')
html, = page_1.children
body, = html.children
img_1, = body.children
assert img_1.position_y == 0
assert img_1.height == 25
html, = page_2.children
body, = html.children
div, img_2 = body.children
assert div.position_y == 0
assert div.height == 100
assert img_2.position_y == 100
assert img_2.height == 25
page_1, page_2 = parse('''
<style>
@page { size: 140px; margin: 0 }
img { height: 25px; vertical-align: top }
p { orphans: 1; widows: 1 }
</style>
<body>
<img src=pattern.png><!-- page break here -->
<div>
<div>
<p style="page-break-inside: avoid">
><img src=pattern.png><br/><img src=pattern.png></p>
<p style="page-break-before: avoid;
page-break-after: avoid;
widows: 2"
><img src=pattern.png><br/><img src=pattern.png></p>
</div>
<img src=pattern.png>
</div>
''')
html, = page_1.children
body, = html.children
img_1, = body.children
assert img_1.position_y == 0
assert img_1.height == 25
html, = page_2.children
body, = html.children
outer_div, = body.children
inner_div, img_2 = outer_div.children
assert inner_div.position_y == 0
assert inner_div.height == 100
assert img_2.position_y == 100
assert img_2.height == 25
# Reference for the next test
page_1, page_2, page_3 = parse('''
<style>
@page { size: 100px; margin: 0 }
img { height: 30px; display: block; }
p { orphans: 1; widows: 1 }
</style>
<body>
<div>
<img src=pattern.png style="page-break-after: always">
<section>
<img src=pattern.png>
<img src=pattern.png>
</section>
</div>
<img src=pattern.png><!-- page break here -->
<img src=pattern.png>
''')
html, = page_1.children
body, = html.children
div, = body.children
assert div.height == 30
html, = page_2.children
body, = html.children
div, img_4 = body.children
assert div.height == 60
assert img_4.height == 30
html, = page_3.children
body, = html.children
img_5, = body.children
assert img_5.height == 30
page_1, page_2, page_3 = parse('''
<style>
@page { size: 100px; margin: 0 }
img { height: 30px; display: block; }
p { orphans: 1; widows: 1 }
</style>
<body>
<div>
<img src=pattern.png style="page-break-after: always">
<section>
<img src=pattern.png><!-- page break here -->
<img src=pattern.png style="page-break-after: avoid">
</section>
</div>
<img src=pattern.png style="page-break-after: avoid">
<img src=pattern.png>
''')
html, = page_1.children
body, = html.children
div, = body.children
assert div.height == 30
html, = page_2.children
body, = html.children
div, = body.children
section, = div.children
img_2, = section.children
assert img_2.height == 30
# TODO: currently this is 60: we do not decrease the used height of
# blocks with 'height: auto' when we remove children from them for
# some page-break-*: avoid.
# assert div.height == 30
html, = page_3.children
body, = html.children
div, img_4, img_5, = body.children
assert div.height == 30
assert img_4.height == 30
assert img_5.height == 30
page_1, page_2, page_3 = parse('''
<style>
@page {
@bottom-center { content: counter(page) }
}
@page:blank {
@bottom-center { content: none }
}
</style>
<p style="page-break-after: right">foo</p>
<p>bar</p>
''')
assert len(page_1.children) == 2 # content and @bottom-center
assert len(page_2.children) == 1 # content only
assert len(page_3.children) == 2 # content and @bottom-center
page_1, page_2 = parse('''
<style>
@page { size: 75px; margin: 0 }
div { height: 20px }
</style>
<body>
<div></div>
<section>
<div></div>
<div style="page-break-after: avoid">
<div style="position: absolute"></div>
<div style="position: fixed"></div>
</div>
</section>
<div></div>
''')
html, = page_1.children
body, _div = html.children
div_1, section = body.children
div_2, = section.children
assert div_1.position_y == 0
assert div_2.position_y == 20
assert div_1.height == 20
assert div_2.height == 20
html, = page_2.children
body, = html.children
section, div_4 = body.children
div_3, = section.children
absolute, fixed = div_3.children
assert div_3.position_y == 0
assert div_4.position_y == 20
assert div_3.height == 20
assert div_4.height == 20
@assert_no_logs
def test_orphans_widows_avoid():
"""Test orphans and widows control."""
def line_distribution(css):
pages = parse('''
<style>
@page { size: 200px }
h1 { height: 120px }
p { line-height: 20px;
width: 1px; /* line break at each word */
%s }
</style>
<h1>Tasty test</h1>
<!-- There is room for 4 lines after h1 on the fist page -->
<p>
one
two
three
four
five
six
seven
</p>
''' % css)
line_counts = []
for i, page in enumerate(pages):
html, = page.children
body, = html.children
if i == 0:
body_children = body.children[1:] # skip h1
else:
body_children = body.children
if body_children:
paragraph, = body_children
line_counts.append(len(paragraph.children))
else:
line_counts.append(0)
return line_counts
assert line_distribution('orphans: 2; widows: 2') == [4, 3]
assert line_distribution('orphans: 5; widows: 2') == [0, 7]
assert line_distribution('orphans: 2; widows: 4') == [3, 4]
assert line_distribution('orphans: 4; widows: 4') == [0, 7]
assert line_distribution(
'orphans: 2; widows: 2; page-break-inside: avoid') == [0, 7]
@assert_no_logs
def test_table_page_breaks():
"""Test the page breaks inside tables."""
def run(html):
pages = parse(html)
rows_per_page = []
rows_position_y = []
for i, page in enumerate(pages):
html, = page.children
body, = html.children
if i == 0:
body_children = body.children[1:] # skip h1
else:
body_children = body.children
if not body_children:
rows_per_page.append(0)
continue
table_wrapper, = body_children
table, = table_wrapper.children
rows_in_this_page = 0
for group in table.children:
assert group.children, 'found an empty table group'
for row in group.children:
rows_in_this_page += 1
rows_position_y.append(row.position_y)
cell, = row.children
line, = cell.children
text, = line.children
assert text.text == 'row %i' % len(rows_position_y)
rows_per_page.append(rows_in_this_page)
return rows_per_page, rows_position_y
rows_per_page, rows_position_y = run('''
<style>
@page { size: 120px }
table { table-layout: fixed; width: 100% }
h1 { height: 30px }
td { height: 40px }
</style>
<h1>Dummy title</h1>
<table>
<tr><td>row 1</td></tr>
<tr><td>row 2</td></tr>
<tr><td>row 3</td></tr>
<tr><td>row 4</td></tr>
<tr><td>row 5</td></tr>
<tr><td style="height: 300px"> <!-- overflow the page -->
row 6</td></tr>
<tr><td>row 7</td></tr>
<tr><td>row 8</td></tr>
</table>
''')
assert rows_per_page == [2, 3, 1, 2]
assert rows_position_y == [30, 70, 0, 40, 80, 0, 0, 40]
rows_per_page, rows_position_y = run('''
<style>
@page { size: 120px }
h1 { height: 30px}
td { height: 40px }
table { table-layout: fixed; width: 100%;
page-break-inside: avoid }
</style>
<h1>Dummy title</h1>
<table>
<tr><td>row 1</td></tr>
<tr><td>row 2</td></tr>
<tr><td>row 3</td></tr>
<tr><td>row 4</td></tr>
</table>
''')
assert rows_per_page == [0, 3, 1]
assert rows_position_y == [0, 40, 80, 0]
rows_per_page, rows_position_y = run('''
<style>
@page { size: 120px }
h1 { height: 30px}
td { height: 40px }
table { table-layout: fixed; width: 100%;
page-break-inside: avoid }
</style>
<h1>Dummy title</h1>
<table>
<tbody>
<tr><td>row 1</td></tr>
<tr><td>row 2</td></tr>
<tr><td>row 3</td></tr>
</tbody>
<tr><td>row 4</td></tr>
</table>
''')
assert rows_per_page == [0, 3, 1]
assert rows_position_y == [0, 40, 80, 0]
rows_per_page, rows_position_y = run('''
<style>
@page { size: 120px }
h1 { height: 30px}
td { height: 40px }
table { table-layout: fixed; width: 100% }
</style>
<h1>Dummy title</h1>
<table>
<tr><td>row 1</td></tr>
<tbody style="page-break-inside: avoid">
<tr><td>row 2</td></tr>
<tr><td>row 3</td></tr>
</tbody>
</table>
''')
assert rows_per_page == [1, 2]
assert rows_position_y == [30, 0, 40]
pages = parse('''
<style>
@page { size: 100px }
</style>
<h1 style="margin: 0; height: 30px">Lipsum</h1>
<!-- Leave 70px on the first page: enough for the header or row1
but not both. -->
<table style="border-spacing: 0; font-size: 5px">
<thead>
<tr><td style="height: 20px">Header</td></tr>
</thead>
<tbody>
<tr><td style="height: 60px">Row 1</td></tr>
<tr><td style="height: 10px">Row 2</td></tr>
<tr><td style="height: 50px">Row 3</td></tr>
<tr><td style="height: 61px">Row 4</td></tr>
<tr><td style="height: 90px">Row 5</td></tr>
</tbody>
<tfoot>
<tr><td style="height: 20px">Footer</td></tr>
</tfoot>
</table>
''')
rows_per_page = []
for i, page in enumerate(pages):
groups = []
html, = page.children
body, = html.children
table_wrapper, = body.children
if i == 0:
assert table_wrapper.element_tag == 'h1'
else:
table, = table_wrapper.children
for group in table.children:
assert group.children, 'found an empty table group'
rows = []
for row in group.children:
cell, = row.children
line, = cell.children
text, = line.children
rows.append(text.text)
groups.append(rows)
rows_per_page.append(groups)
assert rows_per_page == [
[],
[['Header'], ['Row 1'], ['Footer']],
[['Header'], ['Row 2', 'Row 3'], ['Footer']],
[['Header'], ['Row 4']],
[['Row 5']]
]
@assert_no_logs
def test_inlinebox_spliting():
"""Test the inline boxes spliting."""
for width in [10000, 100, 10, 0]:
page, = parse('''
<style>p { font-family:%(fonts)s; width: %(width)spx; }</style>
<p><strong>WeasyPrint is a free software visual rendering engine
for HTML and CSS.</strong></p>
''' % {'fonts': FONTS, 'width': width})
html, = page.children
body, = html.children
paragraph, = body.children
lines = paragraph.children
if width == 10000:
assert len(lines) == 1
else:
assert len(lines) > 1
text_parts = []
for line in lines:
strong, = line.children
text, = strong.children
text_parts.append(text.text)
assert ' '.join(text_parts) == ('WeasyPrint is a free software visual '
'rendering engine for HTML and CSS.')
@assert_no_logs
def test_page_and_linebox_breaking():
"""Test the linebox text after spliting linebox and page."""
# The empty <span/> tests a corner case
# in skip_first_whitespace()
pages = parse('''
<style>
div { font-family:%(fonts)s; font-size:22px}
@page { size: 100px; margin:2px; border:1px solid }
body { margin: 0 }
</style>
<div><span/>1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15</div>
''' % {'fonts': FONTS})
texts = []
for page in pages:
html, = page.children
body, = html.children
div, = body.children
lines = div.children
for line in lines:
line_texts = []
for child in line.descendants():
if isinstance(child, boxes.TextBox):
line_texts.append(child.text)
texts.append(''.join(line_texts))
assert len(pages) == 2
assert ' '.join(texts) == \
'1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15'
@assert_no_logs
def test_whitespace_processing():
"""Test various spaces and tabulations processing."""
for source in ['a', ' a ', ' \n \ta', ' a\t ']:
page, = parse('<p><em>%s</em></p>' % source)
html, = page.children
body, = html.children
p, = body.children
line, = p.children
em, = line.children
text, = em.children
assert text.text == 'a', 'source was %r' % (source,)
page, = parse('<p style="white-space: pre-line">\n\n<em>%s</em></pre>'
% source.replace('\n', ' '))
html, = page.children
body, = html.children
p, = body.children
_line1, _line2, line3 = p.children
em, = line3.children
text, = em.children
assert text.text == 'a', 'source was %r' % (source,)
@assert_no_logs
def test_images():
"""Test that width, height and ratio of images are respected."""
def get_img(html):
page, = parse(html)
html, = page.children
body, = html.children
line, = body.children
img, = line.children
return body, img
# Try a few image formats
for html in [
'<img src="%s">' % url for url in [
'pattern.png', 'pattern.gif', 'blue.jpg', 'pattern.svg',
"data:image/svg+xml,<svg width='4' height='4'></svg>",
"DatA:image/svg+xml,<svg width='4px' height='4px'></svg>",
]
] + [
'<embed src=pattern.png>',
'<embed src=pattern.svg>',
'<embed src=really-a-png.svg type=image/png>',
'<embed src=really-a-svg.png type=image/svg+xml>',
'<object data=pattern.png>',
'<object data=pattern.svg>',
'<object data=really-a-png.svg type=image/png>',
'<object data=really-a-svg.png type=image/svg+xml>',
]:
body, img = get_img(html)
assert img.width == 4
assert img.height == 4
# With physical units
url = "data:image/svg+xml,<svg width='2.54cm' height='0.5in'></svg>"
body, img = get_img('<img src="%s">' % url)
assert img.width == 96
assert img.height == 48
# Invalid images
for url in [
'nonexistent.png',
'unknownprotocol://weasyprint.org/foo.png',
'data:image/unknowntype,Not an image',
# Invalid protocol
'datå:image/svg+xml,<svg width="4" height="4"></svg>',
# zero-byte images
'data:image/png,',
'data:image/jpeg,',
'data:image/svg+xml,',
# Incorrect format
'data:image/png,Not a PNG',
'data:image/jpeg,Not a JPEG',
'data:image/svg+xml,<svg>invalid xml',
'really-a-svg.png',
]:
with capture_logs() as logs:
body, img = get_img("<img src='%s' alt='invalid image'>" % url)
assert len(logs) == 1
assert 'WARNING: Failed to load image' in logs[0]
assert isinstance(img, boxes.InlineBox) # not a replaced box
text, = img.children
assert text.text == 'invalid image', url
with capture_logs() as logs:
parse('<img src=nonexistent.png><img src=nonexistent.png>')
# Failures are cached too: only one warning
assert len(logs) == 1
assert 'WARNING: Failed to load image' in logs[0]
# Layout rules try to preserve the ratio, so the height should be 40px too:
body, img = get_img('''<body style="font-size: 0">
<img src="pattern.png" style="width: 40px">''')
assert body.height == 40
assert img.position_y == 0
assert img.width == 40
assert img.height == 40
body, img = get_img('''<body style="font-size: 0">
<img src="pattern.png" style="height: 40px">''')
assert body.height == 40
assert img.position_y == 0
assert img.width == 40
assert img.height == 40
# Same with percentages
body, img = get_img('''<body style="font-size: 0"><p style="width: 200px">
<img src="pattern.png" style="width: 20%">''')
assert body.height == 40
assert img.position_y == 0
assert img.width == 40
assert img.height == 40
body, img = get_img('''<body style="font-size: 0">
<img src="pattern.png" style="min-width: 40px">''')
assert body.height == 40
assert img.position_y == 0
assert img.width == 40
assert img.height == 40
body, img = get_img('<img src="pattern.png" style="max-width: 2px">')
assert img.width == 2
assert img.height == 2
# display: table-cell is ignored. XXX Should it?
page, = parse('''<body style="font-size: 0">
<img src="pattern.png" style="width: 40px">
<img src="pattern.png" style="width: 60px; display: table-cell">
''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
assert body.height == 60
assert img_1.width == 40
assert img_1.height == 40
assert img_2.width == 60
assert img_2.height == 60
assert img_1.position_y == 20
assert img_2.position_y == 0
# Block-level image:
page, = parse('''
<style>
@page { size: 100px }
img { width: 40px; margin: 10px auto; display: block }
</style>
<body>
<img src="pattern.png">
''')
html, = page.children
body, = html.children
img, = body.children
assert img.element_tag == 'img'
assert img.position_x == 0
assert img.position_y == 0
assert img.width == 40
assert img.height == 40
assert img.content_box_x() == 30 # (100 - 40) / 2 == 30px for margin-left
assert img.content_box_y() == 10
page, = parse('''
<style>
@page { size: 100px }
img { min-width: 40%; margin: 10px auto; display: block }
</style>
<body>
<img src="pattern.png">
''')
html, = page.children
body, = html.children
img, = body.children
assert img.element_tag == 'img'
assert img.position_x == 0
assert img.position_y == 0
assert img.width == 40
assert img.height == 40
assert img.content_box_x() == 30 # (100 - 40) / 2 == 30px for margin-left
assert img.content_box_y() == 10
page, = parse('''
<style>
@page { size: 100px }
img { min-width: 40px; margin: 10px auto; display: block }
</style>
<body>
<img src="pattern.png">
''')
html, = page.children
body, = html.children
img, = body.children
assert img.element_tag == 'img'
assert img.position_x == 0
assert img.position_y == 0
assert img.width == 40
assert img.height == 40
assert img.content_box_x() == 30 # (100 - 40) / 2 == 30px for margin-left
assert img.content_box_y() == 10
page, = parse('''
<style>
@page { size: 100px }
img { min-height: 30px; max-width: 2px;
margin: 10px auto; display: block }
</style>
<body>
<img src="pattern.png">
''')
html, = page.children
body, = html.children
img, = body.children
assert img.element_tag == 'img'
assert img.position_x == 0
assert img.position_y == 0
assert img.width == 2
assert img.height == 30
assert img.content_box_x() == 49 # (100 - 2) / 2 == 49px for margin-left
assert img.content_box_y() == 10
page, = parse('''
<body style="float: left">
<img style="height: 200px; margin: 10px; display: block" src="
data:image/svg+xml,
<svg width='150' height='100'></svg>
">
''')
html, = page.children
body, = html.children
img, = body.children
assert body.width == 320
assert body.height == 220
assert img.element_tag == 'img'
assert img.width == 300
assert img.height == 200
@assert_no_logs
def test_vertical_align():
"""Test various values of vertical-align."""
"""
+-------+ <- position_y = 0
+-----+ |
40px | | | 60px
| | |
+-----+-------+ <- baseline
"""
page, = parse('''
<span>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px"
></span>''')
html, = page.children
body, = html.children
line, = body.children
span, = line.children
img_1, img_2 = span.children
assert img_1.height == 40
assert img_2.height == 60
assert img_1.position_y == 20
assert img_2.position_y == 0
# 60px + the descent of the font below the baseline
assert 60 < line.height < 70
assert body.height == line.height
"""
+-------+ <- position_y = 0
35px | |
+-----+ | 60px
40px | | |
| +-------+ <- baseline
+-----+ 15px
"""
page, = parse('''
<span>
<img src="pattern.png" style="width: 40px; vertical-align: -15px"
><img src="pattern.png" style="width: 60px"></span>''')
html, = page.children
body, = html.children
line, = body.children
span, = line.children
img_1, img_2 = span.children
assert img_1.height == 40
assert img_2.height == 60
assert img_1.position_y == 35
assert img_2.position_y == 0
assert line.height == 75
assert body.height == line.height
# Same as previously, but with percentages
page, = parse('''
<span style="line-height: 10px">
<img src="pattern.png" style="width: 40px; vertical-align: -150%"
><img src="pattern.png" style="width: 60px"></span>''')
html, = page.children
body, = html.children
line, = body.children
span, = line.children
img_1, img_2 = span.children
assert img_1.height == 40
assert img_2.height == 60
assert img_1.position_y == 35
assert img_2.position_y == 0
assert line.height == 75
assert body.height == line.height
# Same again, but have the vertical-align on an inline box.
page, = parse('''
<span style="line-height: 10px">
<span style="line-height: 10px; vertical-align: -15px">
<img src="pattern.png" style="width: 40px"></span>
<img src="pattern.png" style="width: 60px"></span>''')
html, = page.children
body, = html.children
line, = body.children
span_1, = line.children
span_2, _whitespace, img_1 = span_1.children
img_1, = span_2.children
assert img_1.height == 40
assert img_2.height == 60
assert img_1.position_y == 35
assert img_2.position_y == 0
assert line.height == 75
assert body.height == line.height
# Same as previously, but with percentages
page, = parse('''
<span style="line-height: 12px; font-size: 12px; font-family: 'ahem'">
<img src="pattern.png" style="width: 40px; vertical-align: middle"
><img src="pattern.png" style="width: 60px"></span>''')
html, = page.children
body, = html.children
line, = body.children
span, = line.children
img_1, img_2 = span.children
assert img_1.height == 40
assert img_2.height == 60
# middle of the image (position_y + 20) is at half the ex-height above
# the baseline of the parent. The ex-height of Ahem is something like 0.8em
assert img_1.position_y == 35.2 # 60 - 0.5 * 0.8 * font-size - 40/2
assert img_2.position_y == 0
assert line.height == 75.2
assert body.height == line.height
# sup and sub currently mean +/- 0.5 em
# With the initial 16px font-size, that’s 8px.
page, = parse('''
<span style="line-height: 10px">
<img src="pattern.png" style="width: 60px"
><img src="pattern.png" style="width: 40px; vertical-align: super"
><img src="pattern.png" style="width: 40px; vertical-align: sub"
></span>''')
html, = page.children
body, = html.children
line, = body.children
span, = line.children
img_1, img_2, img_3 = span.children
assert img_1.height == 60
assert img_2.height == 40
assert img_3.height == 40
assert img_1.position_y == 0
assert img_2.position_y == 12 # 20 - 16 * 0.5
assert img_3.position_y == 28 # 20 + 16 * 0.5
assert line.height == 68
assert body.height == line.height
page, = parse('''
<body style="line-height: 10px">
<span>
<img src="pattern.png" style="vertical-align: text-top"
><img src="pattern.png" style="vertical-align: text-bottom"
></span>''')
html, = page.children
body, = html.children
line, = body.children
span, = line.children
img_1, img_2 = span.children
assert img_1.height == 4
assert img_2.height == 4
assert img_1.position_y == 0
assert img_2.position_y == 12 # 16 - 4
assert line.height == 16
assert body.height == line.height
# This case used to cause an exception:
# The second span has no children but should count for line heights
# since it has padding.
page, = parse('''<span style="line-height: 1.5">
<span style="padding: 1px"></span></span>''')
html, = page.children
body, = html.children
line, = body.children
span_1, = line.children
span_2, = span_1.children
assert span_1.height == 16
assert span_2.height == 16
# The line’s strut does not has 'line-height: normal' but the result should
# be smaller than 1.5.
assert span_1.margin_height() == 24
assert span_2.margin_height() == 24
assert line.height == 24
page, = parse('''
<span>
<img src="pattern.png" style="width: 40px; vertical-align: -15px"
><img src="pattern.png" style="width: 60px"
></span><div style="display: inline-block; vertical-align: 3px">
<div>
<div style="height: 100px">foo</div>
<div>
<img src="pattern.png" style="
width: 40px; vertical-align: -15px"
><img src="pattern.png" style="width: 60px"
></div>
</div>
</div>''')
html, = page.children
body, = html.children
line, = body.children
span, div_1 = line.children
assert line.height == 178
assert body.height == line.height
# Same as earlier
img_1, img_2 = span.children
assert img_1.height == 40
assert img_2.height == 60
assert img_1.position_y == 138
assert img_2.position_y == 103
div_2, = div_1.children
div_3, div_4 = div_2.children
div_line, = div_4.children
div_img_1, div_img_2 = div_line.children
assert div_1.position_y == 0
assert div_1.height == 175
assert div_3.height == 100
assert div_line.height == 75
assert div_img_1.height == 40
assert div_img_2.height == 60
assert div_img_1.position_y == 135
assert div_img_2.position_y == 100
# The first two images bring the top of the line box 30px above
# the baseline and 10px below.
# Each of the inner span
page, = parse('''
<span style="font-size: 0">
<img src="pattern.png" style="vertical-align: 26px">
<img src="pattern.png" style="vertical-align: -10px">
<span style="vertical-align: top">
<img src="pattern.png" style="vertical-align: -10px">
<span style="vertical-align: -10px">
<img src="pattern.png" style="vertical-align: bottom">
</span>
</span>
<span style="vertical-align: bottom">
<img src="pattern.png" style="vertical-align: 6px">
</span>
</span>''')
html, = page.children
body, = html.children
line, = body.children
span_1, = line.children
img_1, img_2, span_2, span_4 = span_1.children
img_3, span_3 = span_2.children
img_4, = span_3.children
img_5, = span_4.children
assert body.height == line.height
assert line.height == 40
assert img_1.position_y == 0
assert img_2.position_y == 36
assert img_3.position_y == 6
assert img_4.position_y == 36
assert img_5.position_y == 30
page, = parse('''
<span style="font-size: 0">
<img src="pattern.png" style="vertical-align: bottom">
<img src="pattern.png" style="vertical-align: top; height: 100px">
</span>
''')
html, = page.children
body, = html.children
line, = body.children
span, = line.children
img_1, img_2 = span.children
assert img_1.position_y == 96
assert img_2.position_y == 0
# Reference for the next test
page, = parse('''
<span style="font-size: 0; vertical-align: top">
<img src="pattern.png">
</span>
''')
html, = page.children
body, = html.children
line, = body.children
span, = line.children
img_1, = span.children
assert img_1.position_y == 0
# Should be the same as above
page, = parse('''
<span style="font-size: 0; vertical-align: top; display: inline-block">
<img src="pattern.png">
</span>''')
html, = page.children
body, = html.children
line_1, = body.children
span, = line_1.children
line_2, = span.children
img_1, = line_2.children
assert img_1.element_tag == 'img'
assert img_1.position_y == 0
@assert_no_logs
def test_text_align_left():
"""Test the left text alignment."""
"""
<--------------------> page, body
+-----+
+---+ |
| | |
+---+-----+
^ ^ ^ ^
x=0 x=40 x=100 x=200
"""
page, = parse('''
<style>
@page { size: 200px }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
# initial value for text-align: left (in ltr text)
assert img_1.position_x == 0
assert img_2.position_x == 40
@assert_no_logs
def test_text_align_right():
"""Test the right text alignment."""
"""
<--------------------> page, body
+-----+
+---+ |
| | |
+---+-----+
^ ^ ^ ^
x=0 x=100 x=200
x=140
"""
page, = parse('''
<style>
@page { size: 200px }
body { text-align: right }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
assert img_1.position_x == 100 # 200 - 60 - 40
assert img_2.position_x == 140 # 200 - 60
@assert_no_logs
def test_text_align_center():
"""Test the center text alignment."""
"""
<--------------------> page, body
+-----+
+---+ |
| | |
+---+-----+
^ ^ ^ ^
x= x=50 x=150
x=90
"""
page, = parse('''
<style>
@page { size: 200px }
body { text-align: center }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
assert img_1.position_x == 50
assert img_2.position_x == 90
@assert_no_logs
def test_text_align_justify():
"""Test justified text."""
page, = parse('''
<style>
@page { size: 300px 1000px }
body { text-align: justify }
</style>
<p><img src="pattern.png" style="width: 40px">
<strong>
<img src="pattern.png" style="width: 60px">
<img src="pattern.png" style="width: 10px">
<img src="pattern.png" style="width: 100px"
></strong><img src="pattern.png" style="width: 290px"
><!-- Last image will be on its own line. -->''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
image_1, space_1, strong = line_1.children
image_2, space_2, image_3, space_3, image_4 = strong.children
image_5, = line_2.children
assert space_1.text == ' '
assert space_2.text == ' '
assert space_3.text == ' '
assert image_1.position_x == 0
assert space_1.position_x == 40
assert strong.position_x == 70
assert image_2.position_x == 70
assert space_2.position_x == 130
assert image_3.position_x == 160
assert space_3.position_x == 170
assert image_4.position_x == 200
assert strong.width == 230
assert image_5.position_x == 0
# single-word line (zero spaces)
page, = parse('''
<style>
body { text-align: justify; width: 50px }
</style>
<p>Supercalifragilisticexpialidocious bar</p>
''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
text, = line_1.children
assert text.position_x == 0
@assert_no_logs
def test_word_spacing():
"""Test word-spacing."""
# keep the empty <style> as a regression test: element.text is None
# (Not a string.)
page, = parse('''
<style></style>
<body><strong>Lorem ipsum dolor<em>sit amet</em></strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_1, = line.children
# TODO: Pango gives only half of word-spacing to a space at the end
# of a TextBox. Is this what we want?
page, = parse('''
<style>strong { word-spacing: 11px }</style>
<body><strong>Lorem ipsum dolor<em>sit amet</em></strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_2, = line.children
assert strong_2.width - strong_1.width == 33
@assert_no_logs
def test_letter_spacing():
"""Test letter-spacing."""
page, = parse('''
<body><strong>Supercalifragilisticexpialidocious</strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_1, = line.children
page, = parse('''
<style>strong { letter-spacing: 11px }</style>
<body><strong>Supercalifragilisticexpialidocious</strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_2, = line.children
assert strong_2.width - strong_1.width == 33 * 11
@assert_no_logs
def test_text_indent():
"""Test the text-indent property."""
for indent in ['12px', '6%']: # 6% of 200px is 12px
page, = parse('''
<style>
@page { size: 220px }
body { margin: 10px; text-indent: %(indent)s }
</style>
<p>Some text that is long enough that it take at least three line,
but maybe more.
''' % {'indent': indent})
html, = page.children
body, = html.children
paragraph, = body.children
lines = paragraph.children
text_1, = lines[0].children
text_2, = lines[1].children
text_3, = lines[2].children
assert text_1.position_x == 22 # 10px margin-left + 12px indent
assert text_2.position_x == 10 # No indent
assert text_3.position_x == 10 # No indent
@assert_no_logs
def test_inline_replaced_auto_margins():
"""Test that auto margins are ignored for inline replaced boxes."""
page, = parse('''
<style>
@page { size: 200px }
img { display: inline; margin: auto; width: 50px }
</style>
<body><img src="pattern.png" />''')
html, = page.children
body, = html.children
line, = body.children
img, = line.children
assert img.margin_top == 0
assert img.margin_right == 0
assert img.margin_bottom == 0
assert img.margin_left == 0
@assert_no_logs
def test_empty_inline_auto_margins():
"""Test that horizontal auto margins are ignored for empty inline boxes."""
page, = parse('''
<style>
@page { size: 200px }
span { margin: auto }
</style>
<body><span></span>''')
html, = page.children
body, = html.children
block, = body.children
span, = block.children
assert span.margin_top != 0
assert span.margin_right == 0
assert span.margin_bottom != 0
assert span.margin_left == 0
@assert_no_logs
def test_box_sizing():
"""Test the box-sizing property.
http://www.w3.org/TR/css3-ui/#box-sizing
"""
page, = parse('''
<style>
@page { size: 100000px }
body { width: 10000px; margin: 0 }
div { width: 10%; height: 1000px;
margin: 100px; padding: 10px; border: 1px solid }
div:nth-child(2) { box-sizing: content-box }
div:nth-child(3) { box-sizing: padding-box }
div:nth-child(4) { box-sizing: border-box }
</style>
<div></div>
<div></div>
<div></div>
<div></div>
''')
html, = page.children
body, = html.children
div_1, div_2, div_3, div_4 = body.children
for div in div_1, div_2:
assert div.style.box_sizing == 'content-box'
assert div.width == 1000
assert div.height == 1000
assert div.padding_width() == 1020
assert div.padding_height() == 1020
assert div.border_width() == 1022
assert div.border_height() == 1022
assert div.margin_height() == 1222
# margin_width() is the width of the containing block
# padding-box
assert div_3.style.box_sizing == 'padding-box'
assert div_3.width == 980 # 1000 - 20
assert div_3.height == 980
assert div_3.padding_width() == 1000
assert div_3.padding_height() == 1000
assert div_3.border_width() == 1002
assert div_3.border_height() == 1002
assert div_3.margin_height() == 1202
# border-box
assert div_4.style.box_sizing == 'border-box'
assert div_4.width == 978 # 1000 - 20 - 2
assert div_4.height == 978
assert div_4.padding_width() == 998
assert div_4.padding_height() == 998
assert div_4.border_width() == 1000
assert div_4.border_height() == 1000
assert div_4.margin_height() == 1200
@assert_no_logs
def test_table_column_width():
source = '''
<style>
body { width: 20000px; margin: 0 }
table {
width: 10000px; margin: 0 auto; border-spacing: 100px 0;
table-layout: fixed
}
td { border: 10px solid; padding: 1px }
</style>
<table>
<col style="width: 10%">
<tr>
<td style="width: 30%" colspan=3>
<td>
</tr>
<tr>
<td>
<td>
<td>
<td>
</tr>
<tr>
<td>
<td colspan=12>This cell will be truncated to grid width
<td>This cell will be removed as it is beyond the grid width
</tr>
</table>
'''
with capture_logs() as logs:
page, = parse(source)
assert len(logs) == 1
assert logs[0].startswith('WARNING: This table row has more columns than '
'the table, ignored 1 cell')
html, = page.children
body, = html.children
wrapper, = body.children
table, = wrapper.children
row_group, = table.children
first_row, second_row, third_row = row_group.children
cells = [first_row.children, second_row.children, third_row.children]
assert len(first_row.children) == 2
assert len(second_row.children) == 4
# Third cell here is completly removed
assert len(third_row.children) == 2
assert body.position_x == 0
assert wrapper.position_x == 0
assert wrapper.margin_left == 5000
assert wrapper.content_box_x() == 5000 # auto margin-left
assert wrapper.width == 10000
assert table.position_x == 5000
assert table.width == 10000
assert row_group.position_x == 5100 # 5000 + border_spacing
assert row_group.width == 9800 # 10000 - 2*border-spacing
assert first_row.position_x == row_group.position_x
assert first_row.width == row_group.width
# This cell has colspan=3
assert cells[0][0].position_x == 5100 # 5000 + border-spacing
# `width` on a cell sets the content width
assert cells[0][0].width == 3000 # 30% of 10000px
assert cells[0][0].border_width() == 3022 # 3000 + borders + padding
# Second cell of the first line, but on the fourth and last column
assert cells[0][1].position_x == 8222 # 5100 + 3022 + border-spacing
assert cells[0][1].border_width() == 6678 # 10000 - 3022 - 3*100
assert cells[0][1].width == 6656 # 6678 - borders - padding
assert cells[1][0].position_x == 5100 # 5000 + border-spacing
# `width` on a column sets the border width of cells
assert cells[1][0].border_width() == 1000 # 10% of 10000px
assert cells[1][0].width == 978 # 1000 - borders - padding
assert cells[1][1].position_x == 6200 # 5100 + 1000 + border-spacing
assert cells[1][1].border_width() == 911 # (3022 - 1000 - 2*100) / 2
assert cells[1][1].width == 889 # 911 - borders - padding
assert cells[1][2].position_x == 7211 # 6200 + 911 + border-spacing
assert cells[1][2].border_width() == 911 # (3022 - 1000 - 2*100) / 2
assert cells[1][2].width == 889 # 911 - borders - padding
# Same as cells[0][1]
assert cells[1][3].position_x == 8222 # Also 7211 + 911 + border-spacing
assert cells[1][3].border_width() == 6678
assert cells[1][3].width == 6656
# Same as cells[1][0]
assert cells[2][0].position_x == 5100
assert cells[2][0].border_width() == 1000
assert cells[2][0].width == 978
assert cells[2][1].position_x == 6200 # Same as cells[1][1]
assert cells[2][1].border_width() == 8700 # 1000 - 1000 - 3*border-spacing
assert cells[2][1].width == 8678 # 8700 - borders - padding
assert cells[2][1].colspan == 3 # truncated to grid width
page, = parse('''
<style>
table { width: 1000px; border-spacing: 100px; table-layout: fixed }
</style>
<table>
<tr>
<td style="width: 50%">
<td style="width: 60%">
<td>
</tr>
</table>
''')
html, = page.children
body, = html.children
wrapper, = body.children
table, = wrapper.children
row_group, = table.children
row, = row_group.children
assert row.children[0].width == 500
assert row.children[1].width == 600
assert row.children[2].width == 0
assert table.width == 1500 # 500 + 600 + 4 * border-spacing
# Sum of columns width larger that the table width:
# increase the table width
page, = parse('''
<style>
table { width: 1000px; border-spacing: 100px; table-layout: fixed }
td { width: 60% }
</style>
<table>
<tr>
<td>
<td>
</tr>
</table>
''')
html, = page.children
body, = html.children
wrapper, = body.children
table, = wrapper.children
row_group, = table.children
row, = row_group.children
cell_1, cell_2 = row.children
assert cell_1.width == 600 # 60% of 1000px
assert cell_2.width == 600
assert table.width == 1500 # 600 + 600 + 3*border-spacing
assert wrapper.width == table.width
@assert_no_logs
def test_table_row_height():
page, = parse('''
<table style="width: 1000px; border-spacing: 0 100px;
font: 20px/1em serif; margin: 3px; table-layout: fixed">
<tr>
<td rowspan=0 style="height: 420px; vertical-align: top"></td>
<td>X<br>X<br>X</td>
<td><table style="margin-top: 20px; border-spacing: 0">
<tr><td>X</td></tr></table></td>
<td style="vertical-align: top">X</td>
<td style="vertical-align: middle">X</td>
<td style="vertical-align: bottom">X</td>
</tr>
<tr>
<!-- cells with no text (no line boxes) is a corner case
in cell baselines -->
<td style="padding: 15px"></td>
<td><div style="height: 10px"></div></td>
</tr>
<tr></tr>
<tr>
<td style="vertical-align: bottom"></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
wrapper, = body.children
table, = wrapper.children
row_group, = table.children
assert wrapper.position_y == 0
assert table.position_y == 3 # 0 + margin-top
assert table.height == 620 # sum of row heigths + 5*border-spacing
assert wrapper.height == table.height
assert row_group.position_y == 103 # 3 + border-spacing
assert row_group.height == 420 # 620 - 2*border-spacing
assert [row.height for row in row_group.children] == [
80, 30, 0, 10]
assert [row.position_y for row in row_group.children] == [
# cumulative sum of previous row heights and border-spacings
103, 283, 413, 513]
assert [[cell.height for cell in row.children]
for row in row_group.children] == [
[420, 60, 40, 20, 20, 20],
[0, 10],
[],
[0]
]
assert [[cell.border_height() for cell in row.children]
for row in row_group.children] == [
[420, 80, 80, 80, 80, 80],
[30, 30],
[],
[10]
]
# The baseline of the first row is at 40px because of the third column.
# The second column thus gets a top padding of 20px pushes the bottom
# to 80px.The middle is at 40px.
assert [[cell.padding_top for cell in row.children]
for row in row_group.children] == [
[0, 20, 0, 0, 30, 60],
[15, 5],
[],
[10]
]
assert [[cell.padding_bottom for cell in row.children]
for row in row_group.children] == [
[0, 0, 40, 60, 30, 0],
[15, 15],
[],
[0]
]
assert [[cell.position_y for cell in row.children]
for row in row_group.children] == [
[103, 103, 103, 103, 103, 103],
[283, 283],
[],
[513]
]
# A cell box cannot extend beyond the last row box of a table.
page, = parse('''
<table style="border-spacing: 0">
<tr style="height: 10px">
<td rowspan=5></td>
<td></td>
</tr>
<tr style="height: 10px">
<td></td>
</tr>
</table>
''')
html, = page.children
body, = html.children
wrapper, = body.children
table, = wrapper.children
row_group, = table.children
@assert_no_logs
@requires_cairo('1.12')
def test_table_vertical_align():
from .test_draw import _, r, B
assert_pixels('table_vertical_align', 28, 10, [
r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r,
r+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+r,
r+B+r+B+B+_+B+B+_+B+B+_+B+B+_+B+B+r+r+B+B+r+r+B+B+_+B+r,
r+B+r+B+B+_+B+B+_+B+B+r+B+B+r+B+B+r+r+B+B+r+r+B+B+r+B+r,
r+B+_+B+B+r+B+B+_+B+B+r+B+B+r+B+B+r+r+B+B+r+r+B+B+r+B+r,
r+B+_+B+B+r+B+B+_+B+B+_+B+B+_+B+B+r+r+B+B+r+r+B+B+_+B+r,
r+B+_+B+B+_+B+B+r+B+B+_+B+B+_+B+B+_+_+B+B+_+_+B+B+_+B+r,
r+B+_+B+B+_+B+B+r+B+B+_+B+B+_+B+B+_+_+B+B+_+_+B+B+_+B+r,
r+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+B+r,
r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r+r,
], '''
<style>
@page { size: 28px 10px }
html { background: #fff; font-size: 1px; color: red }
body { margin: 0; width: 28px; height: 10px }
td {
width: 1em;
padding: 0 !important;
border: 1px solid blue;
line-height: 1em;
font-family: ahem;
}
</style>
<table style="border: 1px solid red; border-spacing: 0">
<tr>
<!-- Test vertical-align: top, auto height -->
<td style="vertical-align: top">o o</td>
<!-- Test vertical-align: middle, auto height -->
<td style="vertical-align: middle">o o</td>
<!-- Test vertical-align: bottom, fixed useless height -->
<td style="vertical-align: bottom; height: 2em">o o</td>
<!-- Test default vertical-align value (baseline),
fixed useless height -->
<td style="height: 5em">o o</td>
<!-- Test vertical-align: baseline with baseline set by next cell,
auto height -->
<td style="vertical-align: baseline">o o</td>
<!-- Set baseline height to 2px, auto height -->
<td style="vertical-align: baseline; font-size: 2em">o o</td>
<!-- Test padding-bottom, fixed useless height,
set the height of the cells to 2 lines * 2em + 2px = 6px -->
<td style="vertical-align: baseline; height: 1em;
font-size: 2em; padding-bottom: 2px !important">
o o
</td>
<!-- Test padding-top, auto height -->
<td style="vertical-align: top; padding-top: 0.5em !important">
o o
</td>
</tr>
</table>
''')
@assert_no_logs
def test_table_wrapper():
page, = parse('''
<style>
@page { size: 1000px }
table { width: 600px; height: 500px; table-layout: fixed;
padding: 1px; border: 10px solid; margin: 100px; }
</style>
<table></table>
''')
html, = page.children
body, = html.children
wrapper, = body.children
table, = wrapper.children
assert body.width == 1000
assert wrapper.width == 600 # Not counting borders or padding
assert wrapper.margin_left == 100
assert table.margin_width() == 600
assert table.width == 578 # 600 - 2*10 - 2*1, no margin
# box-sizing in the UA stylesheet makes `height: 500px` set this
assert table.border_height() == 500
assert table.height == 478 # 500 - 2*10 - 2*1
assert table.margin_height() == 500 # no margin
assert wrapper.height == 500
assert wrapper.margin_height() == 700 # 500 + 2*100
# Non-regression test: this used to cause an exception
page, = parse('<html style="display: table">')
@assert_no_logs
def test_margin_boxes_fixed_dimension():
# Corner boxes
page, = parse('''
<style>
@page {
@top-left-corner {
content: 'top_left';
padding: 10px;
}
@top-right-corner {
content: 'top_right';
padding: 10px;
}
@bottom-left-corner {
content: 'bottom_left';
padding: 10px;
}
@bottom-right-corner {
content: 'bottom_right';
padding: 10px;
}
size: 1000px;
margin-top: 10%;
margin-bottom: 40%;
margin-left: 20%;
margin-right: 30%;
}
</style>
''')
html, top_left, top_right, bottom_left, bottom_right = page.children
for margin_box, text in zip(
[top_left, top_right, bottom_left, bottom_right],
['top_left', 'top_right', 'bottom_left', 'bottom_right']):
line, = margin_box.children
text, = line.children
assert text == text
# Check positioning and Rule 1 for fixed dimensions
assert top_left.position_x == 0
assert top_left.position_y == 0
assert top_left.margin_width() == 200 # margin-left
assert top_left.margin_height() == 100 # margin-top
assert top_right.position_x == 700 # size-x - margin-right
assert top_right.position_y == 0
assert top_right.margin_width() == 300 # margin-right
assert top_right.margin_height() == 100 # margin-top
assert bottom_left.position_x == 0
assert bottom_left.position_y == 600 # size-y - margin-bottom
assert bottom_left.margin_width() == 200 # margin-left
assert bottom_left.margin_height() == 400 # margin-bottom
assert bottom_right.position_x == 700 # size-x - margin-right
assert bottom_right.position_y == 600 # size-y - margin-bottom
assert bottom_right.margin_width() == 300 # margin-right
assert bottom_right.margin_height() == 400 # margin-bottom
# Test rules 2 and 3
page, = parse('''
<style>
@page {
margin: 100px 200px;
@bottom-left-corner {
content: "";
margin: 60px
}
}
</style>
''')
html, margin_box = page.children
assert margin_box.margin_width() == 200
assert margin_box.margin_left == 60
assert margin_box.margin_right == 60
assert margin_box.width == 80 # 200 - 60 - 60
assert margin_box.margin_height() == 100
# total was too big, the outside margin was ignored:
assert margin_box.margin_top == 60
assert margin_box.margin_bottom == 40 # Not 60
assert margin_box.height == 0 # But not negative
# Test rule 3 with a non-auto inner dimension
page, = parse('''
<style>
@page {
margin: 100px;
@left-middle {
content: "";
margin: 10px;
width: 130px;
}
}
</style>
''')
html, margin_box = page.children
assert margin_box.margin_width() == 100
assert margin_box.margin_left == -40 # Not 10px
assert margin_box.margin_right == 10
assert margin_box.width == 130 # As specified
# Test rule 4
page, = parse('''
<style>
@page {
margin: 100px;
@left-bottom {
content: "";
margin-left: 10px;
margin-right: auto;
width: 70px;
}
}
</style>
''')
html, margin_box = page.children
assert margin_box.margin_width() == 100
assert margin_box.margin_left == 10 # 10px this time, no over-constrain
assert margin_box.margin_right == 20
assert margin_box.width == 70 # As specified
# Test rules 2, 3 and 4
page, = parse('''
<style>
@page {
margin: 100px;
@right-top {
content: "";
margin-right: 10px;
margin-left: auto;
width: 130px;
}
}
</style>
''')
html, margin_box = page.children
assert margin_box.margin_width() == 100
assert margin_box.margin_left == 0 # rule 2
assert margin_box.margin_right == -30 # rule 3, after rule 2
assert margin_box.width == 130 # As specified
# Test rule 5
page, = parse('''
<style>
@page {
margin: 100px;
@top-left {
content: "";
margin-top: 10px;
margin-bottom: auto;
}
}
</style>
''')
html, margin_box = page.children
assert margin_box.margin_height() == 100
assert margin_box.margin_top == 10
assert margin_box.margin_bottom == 0
assert margin_box.height == 90
# Test rule 5
page, = parse('''
<style>
@page {
margin: 100px;
@top-center {
content: "";
margin: auto 0;
}
}
</style>
''')
html, margin_box = page.children
assert margin_box.margin_height() == 100
assert margin_box.margin_top == 0
assert margin_box.margin_bottom == 0
assert margin_box.height == 100
# Test rule 6
page, = parse('''
<style>
@page {
margin: 100px;
@bottom-right {
content: "";
margin: auto;
height: 70px;
}
}
</style>
''')
html, margin_box = page.children
assert margin_box.margin_height() == 100
assert margin_box.margin_top == 15
assert margin_box.margin_bottom == 15
assert margin_box.height == 70
# Rule 2 inhibits rule 6
page, = parse('''
<style>
@page {
margin: 100px;
@bottom-center {
content: "";
margin: auto 0;
height: 150px;
}
}
</style>
''')
html, margin_box = page.children
assert margin_box.margin_height() == 100
assert margin_box.margin_top == 0
assert margin_box.margin_bottom == -50 # outside
assert margin_box.height == 150
@assert_no_logs
def test_preferred_widths():
"""Unit tests for preferred widths."""
def get_float_width(body_width):
page, = parse('''
<body style="width: %spx; font-family: ahem">
<p style="white-space: pre-line; float: left">
Lorem ipsum dolor sit amet,
consectetur elit
</p>
<!-- ^ No-break space here -->
''' % body_width)
html, = page.children
body, = html.children
paragraph, = body.children
return paragraph.width
# Preferred minimum width:
assert get_float_width(10) == len('consectetur elit') * 16
# Preferred width:
assert get_float_width(1000000) == len('Lorem ipsum dolor sit amet,') * 16
# Non-regression test:
# Incorrect whitespace handling in preferred width used to cause
# unnecessary line break.
page, = parse('''
<p style="float: left">Lorem <em>ipsum</em> dolor.</p>
''')
html, = page.children
body, = html.children
paragraph, = body.children
assert len(paragraph.children) == 1
assert isinstance(paragraph.children[0], boxes.LineBox)
page, = parse('''
<style>img { width: 20px }</style>
<p style="float: left">
<img src=pattern.png><img src=pattern.png><br>
<img src=pattern.png></p>
''')
html, = page.children
body, = html.children
paragraph, = body.children
assert paragraph.width == 40
page, = parse('''<style>p { font: 20px Ahem }</style>
<p style="float: left">XX<br>XX<br>X</p>''')
html, = page.children
body, = html.children
paragraph, = body.children
assert paragraph.width == 40
# The space is the start of the line is collapsed.
page, = parse('''<style>p { font: 20px Ahem }</style>
<p style="float: left">XX<br> XX<br>X</p>''')
html, = page.children
body, = html.children
paragraph, = body.children
assert paragraph.width == 40
@assert_no_logs
def test_margin_boxes_variable_dimension():
def get_widths(css):
"""Take some CSS to have inside @page
Return margin-widths of the sub-sequence of the three margin boxes
that are generated.
The containing block’s width is 600px. It starts at x = 100 and ends
at x = 700.
"""
expected_at_keywords = [
at_keyword for at_keyword in [
'@top-left', '@top-center', '@top-right']
if at_keyword + ' { content: ' in css]
page, = parse('''
<style>
@page {
size: 800px;
margin: 100px;
padding: 42px;
border: 7px solid;
%s
}
</style>
''' % css)
assert page.children[0].element_tag == 'html'
margin_boxes = page.children[1:]
assert [box.at_keyword for box in margin_boxes] == expected_at_keywords
offsets = {'@top-left': 0, '@top-center': 0.5, '@top-right': 1}
for box in margin_boxes:
assert box.position_x == 100 + offsets[box.at_keyword] * (
600 - box.margin_width())
return [box.margin_width() for box in margin_boxes]
def images(*widths):
return ' '.join(
'url(\'data:image/svg+xml,<svg width="%i" height="10"></svg>\')'
% width for width in widths)
# Use preferred widths if they fit
css = '''
@top-left { content: %s }
@top-center { content: %s }
@top-right { content: %s }
''' % (images(50, 50), images(50, 50), images(50, 50))
assert get_widths(css) == [100, 100, 100]
# 'auto' margins are set to 0
css = '''
@top-left { content: %s; margin: auto }
@top-center { content: %s }
@top-right { content: %s }
''' % (images(50, 50), images(50, 50), images(50, 50))
assert get_widths(css) == [100, 100, 100]
# Use at least minimum widths, even if boxes overlap
css = '''
@top-left { content: %s }
@top-center { content: %s }
@top-right { content: 'foo'; width: 200px }
''' % (images(100, 50), images(300, 150))
# @top-center is 300px wide and centered: this leaves 150 on either side
# There is 50px of overlap with @top-right
assert get_widths(css) == [150, 300, 200]
# In the intermediate case, distribute the remaining space proportionally
css = '''
@top-left { content: %s }
@top-center { content: %s }
@top-right { content: %s }
''' % (images(150, 150), images(150, 150), images(150, 150))
assert get_widths(css) == [200, 200, 200]
css = '''
@top-left { content: %s }
@top-center { content: %s }
@top-right { content: %s }
''' % (images(100, 100, 100), images(100, 100), images(10))
assert get_widths(css) == [220, 160, 10]
css = '''
@top-left { content: %s; width: 205px }
@top-center { content: %s }
@top-right { content: %s }
''' % (images(100, 100, 100), images(100, 100), images(10))
assert get_widths(css) == [205, 190, 10]
# 'width' and other properties have no effect without 'content'
css = '''
@top-left { width: 1000px; margin: 1000px; padding: 1000px;
border: 1000px solid }
@top-center { content: %s }
@top-right { content: %s }
''' % (images(100, 100), images(10))
assert get_widths(css) == [200, 10]
# This leaves 150px for @top-right’s shrink-to-fit
css = '''
@top-left { content: ''; width: 200px }
@top-center { content: ''; width: 300px }
@top-right { content: %s }
''' % images(50, 50)
assert get_widths(css) == [200, 300, 100]
css = '''
@top-left { content: ''; width: 200px }
@top-center { content: ''; width: 300px }
@top-right { content: %s }
''' % images(100, 100, 100)
assert get_widths(css) == [200, 300, 150]
css = '''
@top-left { content: ''; width: 200px }
@top-center { content: ''; width: 300px }
@top-right { content: %s }
''' % images(170, 175)
assert get_widths(css) == [200, 300, 175]
css = '''
@top-left { content: ''; width: 200px }
@top-center { content: ''; width: 300px }
@top-right { content: %s }
''' % images(170, 175)
assert get_widths(css) == [200, 300, 175]
# Without @top-center
css = '''
@top-left { content: ''; width: 200px }
@top-right { content: ''; width: 500px }
'''
assert get_widths(css) == [200, 500]
css = '''
@top-left { content: ''; width: 200px }
@top-right { content: %s }
''' % images(150, 50, 150)
assert get_widths(css) == [200, 350]
css = '''
@top-left { content: ''; width: 200px }
@top-right { content: %s }
''' % images(150, 50, 150, 200)
assert get_widths(css) == [200, 400]
css = '''
@top-left { content: %s }
@top-right { content: ''; width: 200px }
''' % images(150, 50, 450)
assert get_widths(css) == [450, 200]
css = '''
@top-left { content: %s }
@top-right { content: %s }
''' % (images(150, 100), images(10, 120))
assert get_widths(css) == [250, 130]
css = '''
@top-left { content: %s }
@top-right { content: %s }
''' % (images(550, 100), images(10, 120))
assert get_widths(css) == [550, 120]
css = '''
@top-left { content: %s }
@top-right { content: %s }
''' % (images(250, 60), images(250, 180))
# 250 + (100 * 1 / 4), 250 + (100 * 3 / 4)
assert get_widths(css) == [275, 325]
@assert_no_logs
def test_margin_boxes_vertical_align():
"""
3 px -> +-----+
| 1 |
+-----+
43 px -> +-----+
53 px -> | 2 |
+-----+
83 px -> +-----+
| 3 |
103px -> +-----+
"""
page, = parse('''
<style>
@page {
size: 800px;
margin: 106px; /* margin boxes’ content height is 100px */
@top-left {
content: "foo"; line-height: 20px; border: 3px solid;
vertical-align: top;
}
@top-center {
content: "foo"; line-height: 20px; border: 3px solid;
vertical-align: middle;
}
@top-right {
content: "foo"; line-height: 20px; border: 3px solid;
vertical-align: bottom;
}
}
</style>
''')
html, top_left, top_center, top_right = page.children
line_1, = top_left.children
line_2, = top_center.children
line_3, = top_right.children
assert line_1.position_y == 3
assert line_2.position_y == 43
assert line_3.position_y == 83
@assert_no_logs
def test_margin_collapsing():
"""
The vertical space between to sibling blocks is the max of their margins,
not the sum. But that’s only the simplest case...
"""
def assert_collapsing(vertical_space):
assert vertical_space('10px', '15px') == 15 # not 25
# "The maximum of the absolute values of the negative adjoining margins
# is deducted from the maximum of the positive adjoining margins"
assert vertical_space('-10px', '15px') == 5
assert vertical_space('10px', '-15px') == -5
assert vertical_space('-10px', '-15px') == -15
assert vertical_space('10px', 'auto') == 10 # 'auto' is 0
return vertical_space
def assert_NOT_collapsing(vertical_space):
assert vertical_space('10px', '15px') == 25
assert vertical_space('-10px', '15px') == 5
assert vertical_space('10px', '-15px') == -5
assert vertical_space('-10px', '-15px') == -25
assert vertical_space('10px', 'auto') == 10 # 'auto' is 0
return vertical_space
# Siblings
@assert_collapsing
def vertical_space_1(p1_margin_bottom, p2_margin_top):
page, = parse('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#p1 { margin-bottom: %s }
#p2 { margin-top: %s }
</style>
<p id=p1>Lorem ipsum
<p id=p2>dolor sit amet
''' % (p1_margin_bottom, p2_margin_top))
html, = page.children
body, = html.children
p1, p2 = body.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
return p2_top - p1_bottom
# Not siblings, first is nested
@assert_collapsing
def vertical_space_2(p1_margin_bottom, p2_margin_top):
page, = parse('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#p1 { margin-bottom: %s }
#p2 { margin-top: %s }
</style>
<div>
<p id=p1>Lorem ipsum
</div>
<p id=p2>dolor sit amet
''' % (p1_margin_bottom, p2_margin_top))
html, = page.children
body, = html.children
div, p2 = body.children
p1, = div.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
return p2_top - p1_bottom
# Not siblings, second is nested
@assert_collapsing
def vertical_space_3(p1_margin_bottom, p2_margin_top):
page, = parse('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#p1 { margin-bottom: %s }
#p2 { margin-top: %s }
</style>
<p id=p1>Lorem ipsum
<div>
<p id=p2>dolor sit amet
</div>
''' % (p1_margin_bottom, p2_margin_top))
html, = page.children
body, = html.children
p1, div = body.children
p2, = div.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
return p2_top - p1_bottom
# Not siblings, second is doubly nested
@assert_collapsing
def vertical_space_4(p1_margin_bottom, p2_margin_top):
page, = parse('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#p1 { margin-bottom: %s }
#p2 { margin-top: %s }
</style>
<p id=p1>Lorem ipsum
<div>
<div>
<p id=p2>dolor sit amet
</div>
</div>
''' % (p1_margin_bottom, p2_margin_top))
html, = page.children
body, = html.children
p1, div1 = body.children
div2, = div1.children
p2, = div2.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
return p2_top - p1_bottom
# Collapsing with children
@assert_collapsing
def vertical_space_5(margin_1, margin_2):
page, = parse('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#div1 { margin-top: %s }
#div2 { margin-top: %s }
</style>
<p>Lorem ipsum
<div id=div1>
<div id=div2>
<p id=p2>dolor sit amet
</div>
</div>
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
p1, div1 = body.children
div2, = div1.children
p2, = div2.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
# Parent and element edge are the same:
assert div1.border_box_y() == p2.border_box_y()
assert div2.border_box_y() == p2.border_box_y()
return p2_top - p1_bottom
# Block formatting context: Not collapsing with children
@assert_NOT_collapsing
def vertical_space_6(margin_1, margin_2):
page, = parse('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#div1 { margin-top: %s; overflow: hidden }
#div2 { margin-top: %s }
</style>
<p>Lorem ipsum
<div id=div1>
<div id=div2>
<p id=p2>dolor sit amet
</div>
</div>
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
p1, div1 = body.children
div2, = div1.children
p2, = div2.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
return p2_top - p1_bottom
# Collapsing through an empty div
@assert_collapsing
def vertical_space_7(p1_margin_bottom, p2_margin_top):
page, = parse('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#p1 { margin-bottom: %s }
#p2 { margin-top: %s }
div { margin-bottom: %s; margin-top: %s }
</style>
<p id=p1>Lorem ipsum
<div></div>
<p id=p2>dolor sit amet
''' % (2 * (p1_margin_bottom, p2_margin_top)))
html, = page.children
body, = html.children
p1, div, p2 = body.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
return p2_top - p1_bottom
# The root element does not collapse
@assert_NOT_collapsing
def vertical_space_8(margin_1, margin_2):
page, = parse('''
<html>
<style>
html { margin-top: %s }
body { margin-top: %s }
</style>
<body>
<p>Lorem ipsum
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
p1, = body.children
p1_top = p1.content_box_y()
# Vertical space from y=0
return p1_top
# <body> DOES collapse
@assert_collapsing
def vertical_space_9(margin_1, margin_2):
page, = parse('''
<html>
<style>
body { margin-top: %s }
div { margin-top: %s }
</style>
<body>
<div>
<p>Lorem ipsum
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
div, = body.children
p1, = div.children
p1_top = p1.content_box_y()
# Vertical space from y=0
return p1_top
@assert_no_logs
def test_relative_positioning():
page, = parse('''
<style>
p { height: 20px }
</style>
<p>1</p>
<div style="position: relative; top: 10px">
<p>2</p>
<p style="position: relative; top: -5px; left: 5px">3</p>
<p>4</p>
<p style="position: relative; bottom: 5px; right: 5px">5</p>
<p style="position: relative">6</p>
<p>7</p>
</div>
<p>8</p>
''')
html, = page.children
body, = html.children
p1, div, p8 = body.children
p2, p3, p4, p5, p6, p7 = div.children
assert (p1.position_x, p1.position_y) == (0, 0)
assert (div.position_x, div.position_y) == (0, 30)
assert (p2.position_x, p2.position_y) == (0, 30)
assert (p3.position_x, p3.position_y) == (5, 45) # (0 + 5, 50 - 5)
assert (p4.position_x, p4.position_y) == (0, 70)
assert (p5.position_x, p5.position_y) == (-5, 85) # (0 - 5, 90 - 5)
assert (p6.position_x, p6.position_y) == (0, 110)
assert (p7.position_x, p7.position_y) == (0, 130)
assert (p8.position_x, p8.position_y) == (0, 140)
assert div.height == 120
page, = parse('''
<style>
img { width: 20px }
body { font-size: 0 } /* Remove spaces */
</style>
<body>
<span><img src=pattern.png></span>
<span style="position: relative; left: 10px">
<img src=pattern.png>
<img src=pattern.png
style="position: relative; left: -5px; top: 5px">
<img src=pattern.png>
<img src=pattern.png
style="position: relative; right: 5px; bottom: 5px">
<img src=pattern.png style="position: relative">
<img src=pattern.png>
</span>
<span><img src=pattern.png></span>
''')
html, = page.children
body, = html.children
line, = body.children
span1, span2, span3 = line.children
img1, = span1.children
img2, img3, img4, img5, img6, img7 = span2.children
img8, = span3.children
assert (img1.position_x, img1.position_y) == (0, 0)
# Don't test the span2.position_y because it depends on fonts
assert span2.position_x == 30
assert (img2.position_x, img2.position_y) == (30, 0)
assert (img3.position_x, img3.position_y) == (45, 5) # (50 - 5, y + 5)
assert (img4.position_x, img4.position_y) == (70, 0)
assert (img5.position_x, img5.position_y) == (85, -5) # (90 - 5, y - 5)
assert (img6.position_x, img6.position_y) == (110, 0)
assert (img7.position_x, img7.position_y) == (130, 0)
assert (img8.position_x, img8.position_y) == (140, 0)
assert span2.width == 120
@assert_no_logs
def test_absolute_positioning():
page, = parse('''
<div style="margin: 3px">
<div style="height: 20px; width: 20px; position: absolute"></div>
<div style="height: 20px; width: 20px; position: absolute;
left: 0"></div>
<div style="height: 20px; width: 20px; position: absolute;
top: 0"></div>
</div>
''')
html, = page.children
body, = html.children
div1, = body.children
div2, div3, div4 = div1.children
assert div1.height == 0
assert (div1.position_x, div1.position_y) == (0, 0)
assert (div2.width, div2.height) == (20, 20)
assert (div2.position_x, div2.position_y) == (3, 3)
assert (div3.width, div3.height) == (20, 20)
assert (div3.position_x, div3.position_y) == (0, 3)
assert (div4.width, div4.height) == (20, 20)
assert (div4.position_x, div4.position_y) == (3, 0)
page, = parse('''
<div style="position: relative; width: 20px">
<div style="height: 20px; width: 20px; position: absolute"></div>
<div style="height: 20px; width: 20px"></div>
</div>
''')
html, = page.children
body, = html.children
div1, = body.children
div2, div3 = div1.children
for div in (div1, div2, div3):
assert (div.position_x, div.position_y) == (0, 0)
assert (div.width, div.height) == (20, 20)
page, = parse('''
<body style="font-size: 0">
<img src=pattern.png>
<span style="position: relative">
<span style="position: absolute">2</span>
<span style="position: absolute">3</span>
<span>4</span>
</span>
''')
html, = page.children
body, = html.children
line, = body.children
img, span1 = line.children
span2, span3, span4 = span1.children
assert span1.position_x == 4
assert (span2.position_x, span2.position_y) == (4, 0)
assert (span3.position_x, span3.position_y) == (4, 0)
assert span4.position_x == 4
page, = parse('''
<style> img { width: 5px; height: 20px} </style>
<body style="font-size: 0">
<img src=pattern.png>
<span style="position: absolute">2</span>
<img src=pattern.png>
''')
html, = page.children
body, = html.children
line, = body.children
img1, span, img2 = line.children
assert (img1.position_x, img1.position_y) == (0, 0)
assert (span.position_x, span.position_y) == (5, 0)
assert (img2.position_x, img2.position_y) == (5, 0)
page, = parse('''
<style> img { width: 5px; height: 20px} </style>
<body style="font-size: 0">
<img src=pattern.png>
<span style="position: absolute; display: block">2</span>
<img src=pattern.png>
''')
html, = page.children
body, = html.children
line, = body.children
img1, span, img2 = line.children
assert (img1.position_x, img1.position_y) == (0, 0)
assert (span.position_x, span.position_y) == (0, 20)
assert (img2.position_x, img2.position_y) == (5, 0)
page, = parse('''
<div style="position: relative; width: 20px; height: 60px;
border: 10px solid; padding-top: 6px; top: 5px; left: 1px">
<div style="height: 20px; width: 20px; position: absolute;
bottom: 50%"></div>
<div style="height: 20px; width: 20px; position: absolute;
top: 13px"></div>
</div>
''')
html, = page.children
body, = html.children
div1, = body.children
div2, div3 = div1.children
assert (div1.position_x, div1.position_y) == (1, 5)
assert (div1.width, div1.height) == (20, 60)
assert (div1.border_width(), div1.border_height()) == (40, 86)
assert (div2.position_x, div2.position_y) == (11, 28)
assert (div2.width, div2.height) == (20, 20)
assert (div3.position_x, div3.position_y) == (11, 28)
assert (div3.width, div3.height) == (20, 20)
page, = parse('''
<style>
@page { size: 1000px 2000px }
html { font-size: 0 }
p { height: 20px }
</style>
<p>1</p>
<div style="width: 100px">
<p>2</p>
<p style="position: absolute; top: -5px; left: 5px">3</p>
<p style="margin: 3px">4</p>
<p style="position: absolute; bottom: 5px; right: 15px;
width: 50px; height: 10%;
padding: 3px; margin: 7px">5
<span>
<img src="pattern.png">
<span style="position: absolute"></span>
<span style="position: absolute; top: -10px; right: 5px;
width: 20px; height: 15px"></span>
</span>
</p>
<p style="margin-top: 8px">6</p>
</div>
<p>7</p>
''')
html, = page.children
body, = html.children
p1, div, p7 = body.children
p2, p3, p4, p5, p6 = div.children
line, = p5.children
span1, = line.children
img, span2, span3 = span1.children
assert (p1.position_x, p1.position_y) == (0, 0)
assert (div.position_x, div.position_y) == (0, 20)
assert (p2.position_x, p2.position_y) == (0, 20)
assert (p3.position_x, p3.position_y) == (5, -5)
assert (p4.position_x, p4.position_y) == (0, 40)
# p5 x = page width - right - margin/padding/border - width
# = 1000 - 15 - 2 * 10 - 50
# = 915
# p5 y = page height - bottom - margin/padding/border - height
# = 2000 - 5 - 2 * 10 - 200
# = 1775
assert (p5.position_x, p5.position_y) == (915, 1775)
assert (img.position_x, img.position_y) == (925, 1785)
assert (span2.position_x, span2.position_y) == (929, 1785)
# span3 x = p5 right - p5 margin - span width - span right
# = 985 - 7 - 20 - 5
# = 953
# span3 y = p5 y + p5 margin top + span top
# = 1775 + 7 + -10
# = 1772
assert (span3.position_x, span3.position_y) == (953, 1772)
# p6 y = p4 y + p4 margin height - margin collapsing
# = 40 + 26 - 3
# = 63
assert (p6.position_x, p6.position_y) == (0, 63)
assert div.height == 71 # 20*3 + 2*3 + 8 - 3
assert (p7.position_x, p7.position_y) == (0, 91)
@assert_no_logs
def test_absolute_images():
page, = parse('''
<style>
img { display: block; position: absolute }
</style>
<div style="margin: 10px">
<img src=pattern.png />
<img src=pattern.png style="left: 15px" />
</div>
''')
html, = page.children
body, = html.children
div, = body.children
img1, img2 = div.children
assert div.height == 0
assert (div.position_x, div.position_y) == (0, 0)
assert (img1.position_x, img1.position_y) == (10, 10)
assert (img1.width, img1.height) == (4, 4)
assert (img2.position_x, img2.position_y) == (15, 10)
assert (img2.width, img2.height) == (4, 4)
# TODO: test the various cases in absolute_replaced()
@assert_no_logs
def test_fixed_positioning():
# TODO:test page-break-before: left/right
page_1, page_2, page_3 = parse('''
a
<div style="page-break-before: always; page-break-after: always">
<p style="position: fixed">b</p>
</div>
c
''')
html, = page_1.children
assert [c.element_tag for c in html.children] == ['body', 'p']
html, = page_2.children
body, = html.children
div, = body.children
assert [c.element_tag for c in div.children] == ['p']
html, = page_3.children
assert [c.element_tag for c in html.children] == ['p', 'body']
@assert_no_logs
def test_floats():
# adjacent-floats-001
page, = parse('''
<style>
div { float: left }
img { width: 100px; vertical-align: top }
</style>
<div><img src=pattern.png /></div>
<div><img src=pattern.png /></div>''')
html, = page.children
body, = html.children
div_1, div_2 = body.children
assert outer_area(div_1) == (0, 0, 100, 100)
assert outer_area(div_2) == (100, 0, 100, 100)
# c414-flt-fit-000
page, = parse('''
<style>
body { width: 290px }
div { float: left; width: 100px; }
img { width: 60px; vertical-align: top }
</style>
<div><img src=pattern.png /><!-- 1 --></div>
<div><img src=pattern.png /><!-- 2 --></div>
<div><img src=pattern.png /><!-- 4 --></div>
<img src=pattern.png /><!-- 3
--><img src=pattern.png /><!-- 5 -->''')
html, = page.children
body, = html.children
div_1, div_2, div_4, anon_block = body.children
line_3, line_5 = anon_block.children
img_3, = line_3.children
img_5, = line_5.children
assert outer_area(div_1) == (0, 0, 100, 60)
assert outer_area(div_2) == (100, 0, 100, 60)
assert outer_area(img_3) == (200, 0, 60, 60)
assert outer_area(div_4) == (0, 60, 100, 60)
assert outer_area(img_5) == (100, 60, 60, 60)
# c414-flt-fit-002
page, = parse('''
<style type="text/css">
body { width: 200px }
p { width: 70px; height: 20px }
.left { float: left }
.right { float: right }
</style>
<p class="left"> ⇦ A 1 </p>
<p class="left"> ⇦ B 2 </p>
<p class="left"> ⇦ A 3 </p>
<p class="right"> B 4 ⇨ </p>
<p class="left"> ⇦ A 5 </p>
<p class="right"> B 6 ⇨ </p>
<p class="right"> B 8 ⇨ </p>
<p class="left"> ⇦ A 7 </p>
<p class="left"> ⇦ A 9 </p>
<p class="left"> ⇦ B 10 </p>
''')
html, = page.children
body, = html.children
positions = [(paragraph.position_x, paragraph.position_y)
for paragraph in body.children]
assert positions == [
(0, 0), (70, 0), (0, 20), (130, 20), (0, 40), (130, 40),
(130, 60), (0, 60), (0, 80), (70, 80), ]
# c414-flt-wrap-000 ... more or less
page, = parse('''
<style>
body { width: 100px }
p { float: left; height: 100px }
img { width: 60px; vertical-align: top }
</style>
<p style="width: 20px"></p>
<p style="width: 100%"></p>
<img src=pattern.png /><img src=pattern.png />
''')
html, = page.children
body, = html.children
p_1, p_2, anon_block = body.children
line_1, line_2 = anon_block.children
assert anon_block.position_y == 0
assert (line_1.position_x, line_1.position_y) == (20, 0)
assert (line_2.position_x, line_2.position_y) == (0, 200)
# c414-flt-wrap-000 with text ... more or less
page, = parse('''
<style>
body { width: 100px; font: 60px Ahem; }
p { float: left; height: 100px }
img { width: 60px; vertical-align: top }
</style>
<p style="width: 20px"></p>
<p style="width: 100%"></p>
A B
''')
html, = page.children
body, = html.children
p_1, p_2, anon_block = body.children
line_1, line_2 = anon_block.children
assert anon_block.position_y == 0
assert (line_1.position_x, line_1.position_y) == (20, 0)
assert (line_2.position_x, line_2.position_y) == (0, 200)
# floats-placement-vertical-001b
page, = parse('''
<style>
body { width: 90px; font-size: 0 }
img { vertical-align: top }
</style>
<body>
<span>
<img src=pattern.png style="width: 50px" />
<img src=pattern.png style="width: 50px" />
<img src=pattern.png style="float: left; width: 30px" />
</span>
''')
html, = page.children
body, = html.children
line_1, line_2 = body.children
span_1, = line_1.children
span_2, = line_2.children
img_1, = span_1.children
img_2, img_3 = span_2.children
assert outer_area(img_1) == (0, 0, 50, 50)
assert outer_area(img_2) == (30, 50, 50, 50)
assert outer_area(img_3) == (0, 50, 30, 30)
# Variant of the above: no <span>
page, = parse('''
<style>
body { width: 90px; font-size: 0 }
img { vertical-align: top }
</style>
<body>
<img src=pattern.png style="width: 50px" />
<img src=pattern.png style="width: 50px" />
<img src=pattern.png style="float: left; width: 30px" />
''')
html, = page.children
body, = html.children
line_1, line_2 = body.children
img_1, = line_1.children
img_2, img_3 = line_2.children
assert outer_area(img_1) == (0, 0, 50, 50)
assert outer_area(img_2) == (30, 50, 50, 50)
assert outer_area(img_3) == (0, 50, 30, 30)
# Floats do no affect other pages
page_1, page_2 = parse('''
<style>
body { width: 90px; font-size: 0 }
img { vertical-align: top }
</style>
<body>
<img src=pattern.png style="float: left; width: 30px" />
<img src=pattern.png style="width: 50px" />
<div style="page-break-before: always"></div>
<img src=pattern.png style="width: 50px" />
''')
html, = page_1.children
body, = html.children
float_img, anon_block, = body.children
line, = anon_block.children
img_1, = line.children
assert outer_area(float_img) == (0, 0, 30, 30)
assert outer_area(img_1) == (30, 0, 50, 50)
html, = page_2.children
body, = html.children
div, anon_block = body.children
line, = anon_block.children
img_2, = line.children
assert outer_area(img_2) == (0, 0, 50, 50)
@assert_no_logs
def test_floats_page_breaks():
"""Tests the page breaks when floated boxes
do not fit the page."""
# Tests floated images shorter than the page
pages = parse('''
<style>
@page { size: 100px; margin: 10px }
img { height: 45px; width:70px; float: left;}
</style>
<body>
<img src=pattern.png>
<!-- page break should be here !!! -->
<img src=pattern.png>
''')
assert len(pages) == 2
page_images = []
for page in pages:
images = [d for d in page.descendants() if d.element_tag == 'img']
assert all([img.element_tag == 'img' for img in images])
assert all([img.position_x == 10 for img in images])
page_images.append(images)
del images
positions_y = [[img.position_y for img in images]
for images in page_images]
assert positions_y == [[10], [10]]
# Tests floated images taller than the page
pages = parse('''
<style>
@page { size: 100px; margin: 10px }
img { height: 81px; width:70px; float: left;}
</style>
<body>
<img src=pattern.png>
<!-- page break should be here !!! -->
<img src=pattern.png>
''')
assert len(pages) == 2
page_images = []
for page in pages:
images = [d for d in page.descendants() if d.element_tag == 'img']
assert all([img.element_tag == 'img' for img in images])
assert all([img.position_x == 10 for img in images])
page_images.append(images)
del images
positions_y = [[img.position_y for img in images]
for images in page_images]
assert positions_y == [[10], [10]]
# Tests floated images shorter than the page
pages = parse('''
<style>
@page { size: 100px; margin: 10px }
img { height: 30px; width:70px; float: left;}
</style>
<body>
<img src=pattern.png>
<img src=pattern.png>
<!-- page break should be here !!! -->
<img src=pattern.png>
<img src=pattern.png>
<!-- page break should be here !!! -->
<img src=pattern.png>
''')
assert len(pages) == 3
page_images = []
for page in pages:
images = [d for d in page.descendants() if d.element_tag == 'img']
assert all([img.element_tag == 'img' for img in images])
assert all([img.position_x == 10 for img in images])
page_images.append(images)
del images
positions_y = [[img.position_y for img in images]
for images in page_images]
assert positions_y == [[10, 40], [10, 40], [10]]
# last float does not fit, pushed to next page
pages = parse('''
<style>
@page{
size: 110px;
margin: 10px;
padding: 0;
}
.large {
width: 10px;
height: 60px;
}
.small {
width: 10px;
height: 20px;
}
</style>
<body>
<div class="large"></div>
<div class="small"></div>
<div class="large"></div>
''')
assert len(pages) == 2
page_divs = []
for page in pages:
divs = [div for div in page.descendants() if div.element_tag == 'div']
assert all([div.element_tag == 'div' for div in divs])
page_divs.append(divs)
del divs
positions_y = [[div.position_y for div in divs] for divs in page_divs]
assert positions_y == [[10, 70], [10]]
# last float does not fit, pushed to next page
# center div must not
pages = parse('''
<style>
@page{
size: 110px;
margin: 10px;
padding: 0;
}
.large {
width: 10px;
height: 60px;
}
.small {
width: 10px;
height: 20px;
page-break-after: avoid;
}
</style>
<body>
<div class="large"></div>
<div class="small"></div>
<div class="large"></div>
''')
assert len(pages) == 2
page_divs = []
for page in pages:
divs = [div for div in page.descendants() if div.element_tag == 'div']
assert all([div.element_tag == 'div' for div in divs])
page_divs.append(divs)
del divs
positions_y = [[div.position_y for div in divs] for divs in page_divs]
assert positions_y == [[10], [10, 30]]
# center div must be the last element,
# but float won't fit and will get pushed anyway
pages = parse('''
<style>
@page{
size: 110px;
margin: 10px;
padding: 0;
}
.large {
width: 10px;
height: 80px;
}
.small {
width: 10px;
height: 20px;
page-break-after: avoid;
}
</style>
<body>
<div class="large"></div>
<div class="small"></div>
<div class="large"></div>
''')
assert len(pages) == 3
page_divs = []
for page in pages:
divs = [div for div in page.descendants() if div.element_tag == 'div']
assert all([div.element_tag == 'div' for div in divs])
page_divs.append(divs)
del divs
positions_y = [[div.position_y for div in divs] for divs in page_divs]
assert positions_y == [[10], [10], [10]]
@assert_no_logs
def test_font_stretch():
page, = parse('''
<style>p { float: left }</style>
<p>Hello, world!</p>
<p style="font-stretch: semi-condensed">Hello, world!</p>
<p style="font-stretch: semi-expanded">Hello, world!</p>
''')
html, = page.children
body, = html.children
p_1, p_2, p_3 = body.children
normal = p_1.width
condensed = p_2.width
assert condensed < normal
# TODO: when @font-face is supported use a font with an expanded variant.
# expanded = p_3.width
# assert normal < expanded
@assert_no_logs
def test_box_decoration_break():
# http://www.w3.org/TR/css3-background/#the-box-decoration-break
# Property not implemented yet, always "slice".
page_1, page_2 = parse('''
<style>
@page { size: 100px }
p { padding: 2px; border: 3px solid; margin: 5px }
img { height: 40px; vertical-align: top }
</style>
<p>
<img src=pattern.png><br>
<img src=pattern.png><br>
<img src=pattern.png><br>
<img src=pattern.png><br>''')
html, = page_1.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
assert paragraph.position_y == 0
assert paragraph.margin_top == 5
assert paragraph.border_top_width == 3
assert paragraph.padding_top == 2
assert paragraph.content_box_y() == 10
assert line_1.position_y == 10
assert line_2.position_y == 50
assert paragraph.height == 80
assert paragraph.margin_bottom == 0
assert paragraph.border_bottom_width == 0
assert paragraph.padding_bottom == 0
assert paragraph.margin_height() == 90
html, = page_2.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
assert paragraph.position_y == 0
assert paragraph.margin_top == 0
assert paragraph.border_top_width == 0
assert paragraph.padding_top == 0
assert paragraph.content_box_y() == 0
assert line_1.position_y == 0
assert line_2.position_y == 40
assert paragraph.height == 80
assert paragraph.padding_bottom == 2
assert paragraph.border_bottom_width == 3
assert paragraph.margin_bottom == 5
assert paragraph.margin_height() == 90
@assert_no_logs
def test_hyphenation():
def line_count(source):
page, = parse('<html style="width: 5em; font-family: ahem">' + source)
html, = page.children
body, = html.children
lines = body.children
return len(lines)
# Default: no hyphenation
assert line_count('<body>hyphenation') == 1
# lang only: no hyphenation
assert line_count(
'<body lang=en>hyphenation') == 1
# `hyphens: auto` only: no hyphenation
assert line_count(
'<body style="-weasy-hyphens: auto">hyphenation') == 1
# lang + `hyphens: auto`: hyphenation
assert line_count(
'<body style="-weasy-hyphens: auto" lang=en>hyphenation') > 1
# Hyphenation with soft hyphens
assert line_count('<body>hyp­henation') == 2
# … unless disabled
assert line_count(
'<body style="-weasy-hyphens: none">hyp­henation') == 1
@assert_no_logs
def test_hyphenate_character():
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'!\'" lang=en>'
'hyphenation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('!')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('!', '') == 'hyphenation'
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'é\'" lang=en>'
'hyphenation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('é')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('é', '') == 'hyphenation'
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'ù ù\'" lang=en>'
'hyphenation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('ù ù')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace(' ', '').replace('ù', '') == 'hyphenation'
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'\'" lang=en>'
'hyphenation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'hyphenation'
# TODO: strange error with some characters
# page, = parse(
# '<html style="width: 5em">'
# '<body style="-weasy-hyphens: auto;'
# '-weasy-hyphenate-character: \'———\'" lang=en>'
# 'hyphenation')
# html, = page.children
# body, = html.children
# lines = body.children
# assert len(lines) > 1
# assert lines[0].children[0].text.endswith('———')
# full_text = ''.join(line.children[0].text for line in lines)
# assert full_text.replace('—', '') == 'hyphenation'
@assert_no_logs
def test_hyphenate_limit_zone():
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 0" lang=en>'
'mmmmm hyphenation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 2
assert lines[0].children[0].text.endswith('‐')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('‐', '') == 'mmmmm hyphenation'
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 9em" lang=en>'
'mmmmm hyphenation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('mm')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'mmmmmhyphenation'
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 5%" lang=en>'
'mmmmm hyphenation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 2
assert lines[0].children[0].text.endswith('‐')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('‐', '') == 'mmmmm hyphenation'
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 95%" lang=en>'
'mmmmm hyphenation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('mm')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'mmmmmhyphenation'
@assert_no_logs
def test_hyphenate_limit_chars():
def line_count(limit_chars):
page, = parse((
'<html style="width: 1em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-chars: %s" lang=en>'
'hyphen') % limit_chars)
html, = page.children
body, = html.children
lines = body.children
return len(lines)
assert line_count('auto') == 2
assert line_count('auto auto 0') == 2
assert line_count('0 0 0') == 2
assert line_count('4 4 auto') == 1
assert line_count('6 2 4') == 2
assert line_count('auto 1 auto') == 2
assert line_count('7 auto auto') == 1
assert line_count('6 auto auto') == 2
assert line_count('5 2') == 2
assert line_count('3') == 2
assert line_count('2 4 6') == 1
assert line_count('auto 4') == 1
assert line_count('auto 2') == 2
@assert_no_logs
def test_overflow_wrap():
def get_lines(wrap, text):
page, = parse('''
<style>
body {width: 80px; overflow: hidden; font-family: ahem; }
span {overflow-wrap: %s; white-space: normal; }
</style>
<body style="-weasy-hyphens: auto;" lang="en">
<span>%s
''' % (wrap, text))
html, = page.children
body, = html.children
body_lines = []
for line in body.children:
box, = line.children
textBox, = box.children
body_lines.append(textBox.text)
return body_lines
# break-word
lines = get_lines('break-word', 'aaaaaaaa')
assert len(lines) > 1
full_text = ''.join(line for line in lines)
assert full_text == 'aaaaaaaa'
# normal
lines = get_lines('normal', 'aaaaaaaa')
assert len(lines) == 1
full_text = ''.join(line for line in lines)
assert full_text == 'aaaaaaaa'
# break-word after hyphenation
lines = get_lines('break-word', 'hyphenations')
assert len(lines) > 3
full_text = ''.join(line for line in lines)
assert full_text == "hy\u2010phen\u2010ations"
# break word after normal white-space wrap and hyphenation
lines = get_lines(
'break-word', "A splitted word. An hyphenated word.")
assert len(lines) > 8
full_text = ''.join(line for line in lines)
assert full_text == "Asplittedword.Anhy\u2010phen\u2010atedword."
@assert_no_logs
def test_white_space():
"""Test the white-space property."""
def lines(width, space):
page, = parse('''
<style>
body { font-size: 100px; width: %ipx }
span { white-space: %s }
</style>
<body><span>This \n is text''' % (width, space))
html, = page.children
body, = html.children
return body.children
line1, line2, line3 = lines(1, 'normal')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This'
box2, = line2.children
text2, = box2.children
assert text2.text == 'is'
box3, = line3.children
text3, = box3.children
assert text3.text == 'text'
line1, line2 = lines(1, 'pre')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This\xA0\xA0\xA0\xA0'
box2, = line2.children
text2, = box2.children
assert text2.text == '\xA0\xA0\xA0\xA0is\xA0text'
line1, = lines(1, 'nowrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This\xA0is\xA0text'
line1, line2, line3, line4 = lines(1, 'pre-wrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This\xA0\xA0\xA0\xA0\u200b'
box2, = line2.children
text2, = box2.children
assert text2.text == '\xA0\xA0\xA0\xA0\u200b'
box3, = line3.children
text3, = box3.children
assert text3.text == 'is\xA0\u200b'
box4, = line4.children
text4, = box4.children
assert text4.text == 'text'
line1, line2, line3 = lines(1, 'pre-line')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This'
box2, = line2.children
text2, = box2.children
assert text2.text == 'is'
box3, = line3.children
text3, = box3.children
assert text3.text == 'text'
line1, = lines(1000000, 'normal')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This is text'
line1, line2 = lines(1000000, 'pre')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This\xA0\xA0\xA0\xA0'
box2, = line2.children
text2, = box2.children
assert text2.text == '\xA0\xA0\xA0\xA0is\xA0text'
line1, = lines(1000000, 'nowrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This\xA0is\xA0text'
line1, line2 = lines(1000000, 'pre-wrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This\xA0\xA0\xA0\xA0\u200b'
box2, = line2.children
text2, = box2.children
assert text2.text == '\xA0\xA0\xA0\xA0\u200bis\xA0\u200btext'
line1, line2 = lines(1000000, 'pre-line')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This'
box2, = line2.children
text2, = box2.children
assert text2.text == 'is text'
@assert_no_logs
def test_linear_gradient():
red = (1, 0, 0, 1)
lime = (0, 1, 0, 1)
blue = (0, 0, 1, 1)
def layout(gradient_css, type_='linear', init=(),
positions=[0, 1], colors=[blue, lime], scale=(1, 1)):
page, = parse('<style>@page { background: ' + gradient_css)
layer, = page.background.layers
scale_x, scale_y = scale
result = layer.image.layout(
400, 300, lambda dx, dy: (dx * scale_x, dy * scale_y))
expected = 1, type_, init, positions, colors
assert almost_equal(result, expected), (result, expected)
layout('linear-gradient(blue)', 'solid', blue, [], [])
layout('repeating-linear-gradient(blue)', 'solid', blue, [], [])
layout('repeating-linear-gradient(blue, lime 1.5px)',
'solid', (0, .5, .5, 1), [], [])
layout('linear-gradient(blue, lime)', init=(200, 0, 200, 300))
layout('repeating-linear-gradient(blue, lime)', init=(200, 0, 200, 300))
layout('repeating-linear-gradient(blue, lime 20px)',
init=(200, 0, 200, 20))
layout('repeating-linear-gradient(blue, lime 20px)',
'solid', (0, .5, .5, 1), [], [], scale=(1/20, 1/20))
layout('linear-gradient(to bottom, blue, lime)', init=(200, 0, 200, 300))
layout('linear-gradient(to top, blue, lime)', init=(200, 300, 200, 0))
layout('linear-gradient(to right, blue, lime)', init=(0, 150, 400, 150))
layout('linear-gradient(to left, blue, lime)', init=(400, 150, 0, 150))
layout('linear-gradient(to top left, blue, lime)',
init=(344, 342, 56, -42))
layout('linear-gradient(to top right, blue, lime)',
init=(56, 342, 344, -42))
layout('linear-gradient(to bottom left, blue, lime)',
init=(344, -42, 56, 342))
layout('linear-gradient(to bottom right, blue, lime)',
init=(56, -42, 344, 342))
layout('linear-gradient(270deg, blue, lime)', init=(400, 150, 0, 150))
layout('linear-gradient(.75turn, blue, lime)', init=(400, 150, 0, 150))
layout('linear-gradient(45deg, blue, lime)', init=(25, 325, 375, -25))
layout('linear-gradient(.125turn, blue, lime)', init=(25, 325, 375, -25))
layout('linear-gradient(.375turn, blue, lime)', init=(25, -25, 375, 325))
layout('linear-gradient(.625turn, blue, lime)', init=(375, -25, 25, 325))
layout('linear-gradient(.875turn, blue, lime)', init=(375, 325, 25, -25))
layout('linear-gradient(blue 2em, lime 20%)', init=(200, 32, 200, 60))
layout('linear-gradient(blue 100px, red, blue, red 160px, lime)',
init=(200, 100, 200, 300), colors=[blue, red, blue, red, lime],
positions=[0, .1, .2, .3, 1])
layout('linear-gradient(blue -100px, blue 0, red -12px, lime 50%)',
init=(200, -100, 200, 150), colors=[blue, blue, red, lime],
positions=[0, .4, .4, 1])
layout('linear-gradient(blue, blue, red, lime -7px)',
init=(200, 0, 200, 100), colors=[blue, blue, red, lime],
positions=[0, 0, 0, 0])
layout('repeating-linear-gradient(blue, blue, lime, lime -7px)',
'solid', (0, .5, .5, 1), [], [])
@assert_no_logs
def test_radial_gradient():
red = (1, 0, 0, 1)
lime = (0, 1, 0, 1)
blue = (0, 0, 1, 1)
def layout(gradient_css, type_='radial', init=(),
positions=[0, 1], colors=[blue, lime], scale_y=1,
ctm_scale=(1, 1)):
if type_ == 'radial':
center_x, center_y, radius0, radius1 = init
init = (center_x, center_y / scale_y, radius0,
center_x, center_y / scale_y, radius1)
page, = parse('<style>@page { background: ' + gradient_css)
layer, = page.background.layers
ctm_scale_x, ctm_scale_y = ctm_scale
result = layer.image.layout(
400, 300, lambda dx, dy: (dx * ctm_scale_x, dy * ctm_scale_y))
expected = scale_y, type_, init, positions, colors
assert almost_equal(result, expected), (result, expected)
layout('radial-gradient(blue)', 'solid', blue, [], [])
layout('repeating-radial-gradient(blue)', 'solid', blue, [], [])
layout('radial-gradient(100px, blue, lime)',
init=(200, 150, 0, 100))
layout('radial-gradient(100px at right 20px bottom 30px, lime, red)',
init=(380, 270, 0, 100), colors=[lime, red])
layout('radial-gradient(0 0, blue, lime)',
init=(200, 150, 0, 1e-7))
layout('radial-gradient(1px 0, blue, lime)',
init=(200, 150, 0, 1e7), scale_y=1e-14)
layout('radial-gradient(0 1px, blue, lime)',
init=(200, 150, 0, 1e-7), scale_y=1e14)
layout('repeating-radial-gradient(20px 40px, blue, lime)',
init=(200, 150, 0, 20), scale_y=40/20)
layout('repeating-radial-gradient(20px 40px, blue, lime)',
init=(200, 150, 0, 20), scale_y=40/20, ctm_scale=(1/9, 1))
layout('repeating-radial-gradient(20px 40px, blue, lime)',
init=(200, 150, 0, 20), scale_y=40/20, ctm_scale=(1, 1/19))
layout('repeating-radial-gradient(20px 40px, blue, lime)',
'solid', (0, .5, .5, 1), [], [], ctm_scale=(1/11, 1))
layout('repeating-radial-gradient(20px 40px, blue, lime)',
'solid', (0, .5, .5, 1), [], [], ctm_scale=(1, 1/21))
layout('repeating-radial-gradient(42px, blue -20px, lime 10px)',
init=(200, 150, 10, 40))
layout('repeating-radial-gradient(42px, blue -140px, lime -110px)',
init=(200, 150, 10, 40))
layout('radial-gradient(42px, blue -20px, lime -1px)',
'solid', lime, [], [])
layout('radial-gradient(42px, blue -20px, lime 0)',
'solid', lime, [], [])
layout('radial-gradient(42px, blue -20px, lime 20px)',
init=(200, 150, 0, 20), colors=[(0, .5, .5, 1), lime])
layout('radial-gradient(100px 120px, blue, lime)',
init=(200, 150, 0, 100), scale_y=120/100)
layout('radial-gradient(25% 40%, blue, lime)',
init=(200, 150, 0, 100), scale_y=120/100)
layout('radial-gradient(circle closest-side, blue, lime)',
init=(200, 150, 0, 150))
layout('radial-gradient(circle closest-side at 150px 50px, blue, lime)',
init=(150, 50, 0, 50))
layout('radial-gradient(circle closest-side at 45px 50px, blue, lime)',
init=(45, 50, 0, 45))
layout('radial-gradient(circle closest-side at 420px 50px, blue, lime)',
init=(420, 50, 0, 20))
layout('radial-gradient(circle closest-side at 420px 281px, blue, lime)',
init=(420, 281, 0, 19))
layout('radial-gradient(closest-side, blue 20%, lime)',
init=(200, 150, 40, 200), scale_y=150/200)
layout('radial-gradient(closest-side at 300px 20%, blue, lime)',
init=(300, 60, 0, 100), scale_y=60/100)
layout('radial-gradient(closest-side at 10% 230px, blue, lime)',
init=(40, 230, 0, 40), scale_y=70/40)
layout('radial-gradient(circle farthest-side, blue, lime)',
init=(200, 150, 0, 200))
layout('radial-gradient(circle farthest-side at 150px 50px, blue, lime)',
init=(150, 50, 0, 250))
layout('radial-gradient(circle farthest-side at 45px 50px, blue, lime)',
init=(45, 50, 0, 355))
layout('radial-gradient(circle farthest-side at 420px 50px, blue, lime)',
init=(420, 50, 0, 420))
layout('radial-gradient(circle farthest-side at 220px 310px, blue, lime)',
init=(220, 310, 0, 310))
layout('radial-gradient(farthest-side, blue, lime)',
init=(200, 150, 0, 200), scale_y=150/200)
layout('radial-gradient(farthest-side at 300px 20%, blue, lime)',
init=(300, 60, 0, 300), scale_y=240/300)
layout('radial-gradient(farthest-side at 10% 230px, blue, lime)',
init=(40, 230, 0, 360), scale_y=230/360)
layout('radial-gradient(circle closest-corner, blue, lime)',
init=(200, 150, 0, 250))
layout('radial-gradient(circle closest-corner at 340px 80px, blue, lime)',
init=(340, 80, 0, 100))
layout('radial-gradient(circle closest-corner at 0 342px, blue, lime)',
init=(0, 342, 0, 42))
sqrt2 = math.sqrt(2)
layout('radial-gradient(closest-corner, blue, lime)',
init=(200, 150, 0, 200 * sqrt2), scale_y=150/200)
layout('radial-gradient(closest-corner at 450px 100px, blue, lime)',
init=(450, 100, 0, 50 * sqrt2), scale_y=100/50)
layout('radial-gradient(closest-corner at 40px 210px, blue, lime)',
init=(40, 210, 0, 40 * sqrt2), scale_y=90/40)
layout('radial-gradient(circle farthest-corner, blue, lime)',
init=(200, 150, 0, 250))
layout('radial-gradient(circle farthest-corner'
' at 300px -100px, blue, lime)',
init=(300, -100, 0, 500))
layout('radial-gradient(circle farthest-corner at 400px 0, blue, lime)',
init=(400, 0, 0, 500))
layout('radial-gradient(farthest-corner, blue, lime)',
init=(200, 150, 0, 200 * sqrt2), scale_y=150/200)
layout('radial-gradient(farthest-corner at 450px 100px, blue, lime)',
init=(450, 100, 0, 450 * sqrt2), scale_y=200/450)
layout('radial-gradient(farthest-corner at 40px 210px, blue, lime)',
init=(40, 210, 0, 360 * sqrt2), scale_y=210/360)
|
bsd-3-clause
|
yunque/sms-tools
|
lectures/03-Fourier-properties/plots-code/symmetry-real-even.py
|
26
|
1150
|
import matplotlib.pyplot as plt
import numpy as np
import sys
import math
from scipy.signal import triang
from scipy.fftpack import fft, fftshift
M = 127
N = 128
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
x = triang(M)
fftbuffer = np.zeros(N)
fftbuffer[:hM1] = x[hM2:]
fftbuffer[N-hM2:] = x[:hM2]
X = fftshift(fft(fftbuffer))
mX = abs(X)
pX = np.unwrap(np.angle(X))
plt.figure(1, figsize=(9.5, 4))
plt.subplot(311)
plt.title('x[n]')
plt.plot(np.arange(-hM2, hM1, 1.0), x, 'b', lw=1.5)
plt.axis([-hM2, hM1, 0, 1])
plt.subplot(323)
plt.title('real(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.real(X), 'r', lw=1.5)
plt.axis([-N/2, N/2, min(np.real(X)), max(np.real(X))])
plt.subplot(324)
plt.title('im(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.imag(X), 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(325)
plt.title('abs(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), mX, 'r', lw=1.5)
plt.axis([-N/2,N/2,min(mX),max(mX)])
plt.subplot(326)
plt.title('angle(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), pX, 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.tight_layout()
plt.savefig('symmetry-real-even.png')
plt.show()
|
agpl-3.0
|
brianwrf/mongo-python-driver
|
tools/benchmark.py
|
18
|
5936
|
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MongoDB benchmarking suite."""
import time
import sys
sys.path[0:0] = [""]
import datetime
from pymongo import mongo_client
from pymongo import ASCENDING
trials = 2
per_trial = 5000
batch_size = 100
small = {}
medium = {"integer": 5,
"number": 5.05,
"boolean": False,
"array": ["test", "benchmark"]
}
# this is similar to the benchmark data posted to the user list
large = {"base_url": "http://www.example.com/test-me",
"total_word_count": 6743,
"access_time": datetime.datetime.utcnow(),
"meta_tags": {"description": "i am a long description string",
"author": "Holly Man",
"dynamically_created_meta_tag": "who know\n what"
},
"page_structure": {"counted_tags": 3450,
"no_of_js_attached": 10,
"no_of_images": 6
},
"harvested_words": ["10gen", "web", "open", "source", "application",
"paas", "platform-as-a-service", "technology",
"helps", "developers", "focus", "building",
"mongodb", "mongo"] * 20
}
def setup_insert(db, collection, object):
db.drop_collection(collection)
def insert(db, collection, object):
for i in range(per_trial):
to_insert = object.copy()
to_insert["x"] = i
db[collection].insert(to_insert)
def insert_batch(db, collection, object):
for i in range(per_trial / batch_size):
db[collection].insert([object] * batch_size)
def find_one(db, collection, x):
for _ in range(per_trial):
db[collection].find_one({"x": x})
def find(db, collection, x):
for _ in range(per_trial):
for _ in db[collection].find({"x": x}):
pass
def timed(name, function, args=[], setup=None):
times = []
for _ in range(trials):
if setup:
setup(*args)
start = time.time()
function(*args)
times.append(time.time() - start)
best_time = min(times)
print "%s%d" % (name + (60 - len(name)) * ".", per_trial / best_time)
return best_time
def main():
c = mongo_client.MongoClient(connectTimeoutMS=60*1000) # jack up timeout
c.drop_database("benchmark")
db = c.benchmark
timed("insert (small, no index)", insert,
[db, 'small_none', small], setup_insert)
timed("insert (medium, no index)", insert,
[db, 'medium_none', medium], setup_insert)
timed("insert (large, no index)", insert,
[db, 'large_none', large], setup_insert)
db.small_index.create_index("x", ASCENDING)
timed("insert (small, indexed)", insert, [db, 'small_index', small])
db.medium_index.create_index("x", ASCENDING)
timed("insert (medium, indexed)", insert, [db, 'medium_index', medium])
db.large_index.create_index("x", ASCENDING)
timed("insert (large, indexed)", insert, [db, 'large_index', large])
timed("batch insert (small, no index)", insert_batch,
[db, 'small_bulk', small], setup_insert)
timed("batch insert (medium, no index)", insert_batch,
[db, 'medium_bulk', medium], setup_insert)
timed("batch insert (large, no index)", insert_batch,
[db, 'large_bulk', large], setup_insert)
timed("find_one (small, no index)", find_one,
[db, 'small_none', per_trial / 2])
timed("find_one (medium, no index)", find_one,
[db, 'medium_none', per_trial / 2])
timed("find_one (large, no index)", find_one,
[db, 'large_none', per_trial / 2])
timed("find_one (small, indexed)", find_one,
[db, 'small_index', per_trial / 2])
timed("find_one (medium, indexed)", find_one,
[db, 'medium_index', per_trial / 2])
timed("find_one (large, indexed)", find_one,
[db, 'large_index', per_trial / 2])
timed("find (small, no index)", find, [db, 'small_none', per_trial / 2])
timed("find (medium, no index)", find, [db, 'medium_none', per_trial / 2])
timed("find (large, no index)", find, [db, 'large_none', per_trial / 2])
timed("find (small, indexed)", find, [db, 'small_index', per_trial / 2])
timed("find (medium, indexed)", find, [db, 'medium_index', per_trial / 2])
timed("find (large, indexed)", find, [db, 'large_index', per_trial / 2])
# timed("find range (small, no index)", find,
# [db, 'small_none',
# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}])
# timed("find range (medium, no index)", find,
# [db, 'medium_none',
# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}])
# timed("find range (large, no index)", find,
# [db, 'large_none',
# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}])
timed("find range (small, indexed)", find,
[db, 'small_index',
{"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}])
timed("find range (medium, indexed)", find,
[db, 'medium_index',
{"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}])
timed("find range (large, indexed)", find,
[db, 'large_index',
{"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}])
if __name__ == "__main__":
# cProfile.run("main()")
main()
|
apache-2.0
|
Amechi101/concepteur-market-app
|
venv/lib/python2.7/site-packages/django/contrib/admindocs/utils.py
|
216
|
3801
|
"Misc. utility functions/classes for admin documentation generator."
import re
from email.parser import HeaderParser
from email.errors import HeaderParseError
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.utils.encoding import force_bytes
try:
import docutils.core
import docutils.nodes
import docutils.parsers.rst.roles
except ImportError:
docutils_is_available = False
else:
docutils_is_available = True
def trim_docstring(docstring):
"""
Uniformly trims leading/trailing whitespace from docstrings.
Based on http://www.python.org/peps/pep-0257.html#handling-docstring-indentation
"""
if not docstring or not docstring.strip():
return ''
# Convert tabs to spaces and split into lines
lines = docstring.expandtabs().splitlines()
indent = min([len(line) - len(line.lstrip()) for line in lines if line.lstrip()])
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
return "\n".join(trimmed).strip()
def parse_docstring(docstring):
"""
Parse out the parts of a docstring. Returns (title, body, metadata).
"""
docstring = trim_docstring(docstring)
parts = re.split(r'\n{2,}', docstring)
title = parts[0]
if len(parts) == 1:
body = ''
metadata = {}
else:
parser = HeaderParser()
try:
metadata = parser.parsestr(parts[-1])
except HeaderParseError:
metadata = {}
body = "\n\n".join(parts[1:])
else:
metadata = dict(metadata.items())
if metadata:
body = "\n\n".join(parts[1:-1])
else:
body = "\n\n".join(parts[1:])
return title, body, metadata
def parse_rst(text, default_reference_context, thing_being_parsed=None):
"""
Convert the string from reST to an XHTML fragment.
"""
overrides = {
'doctitle_xform' : True,
'inital_header_level' : 3,
"default_reference_context" : default_reference_context,
"link_base" : reverse('django-admindocs-docroot').rstrip('/')
}
if thing_being_parsed:
thing_being_parsed = force_bytes("<%s>" % thing_being_parsed)
parts = docutils.core.publish_parts(text, source_path=thing_being_parsed,
destination_path=None, writer_name='html',
settings_overrides=overrides)
return mark_safe(parts['fragment'])
#
# reST roles
#
ROLES = {
'model' : '%s/models/%s/',
'view' : '%s/views/%s/',
'template' : '%s/templates/%s/',
'filter' : '%s/filters/#%s',
'tag' : '%s/tags/#%s',
}
def create_reference_role(rolename, urlbase):
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
node = docutils.nodes.reference(rawtext, text, refuri=(urlbase % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
context = inliner.document.settings.default_reference_context
node = docutils.nodes.reference(rawtext, text, refuri=(ROLES[context] % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
if docutils_is_available:
docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role)
docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE = 'cmsreference'
for name, urlbase in ROLES.items():
create_reference_role(name, urlbase)
|
mit
|
michaelld/gnuradio
|
gr-blocks/python/blocks/qa_multiply_conjugate.py
|
7
|
1914
|
#!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
class test_multiply_conjugate (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_000 (self):
src_data0 = (-2-2j, -1-1j, -2+2j, -1+1j,
2-2j, 1-1j, 2+2j, 1+1j,
0+0j)
src_data1 = (-3-3j, -4-4j, -3+3j, -4+4j,
3-3j, 4-4j, 3+3j, 4+4j,
0+0j)
exp_data = (12+0j, 8+0j, 12+0j, 8+0j,
12+0j, 8+0j, 12+0j, 8+0j,
0+0j)
src0 = blocks.vector_source_c(src_data0)
src1 = blocks.vector_source_c(src_data1)
op = blocks.multiply_conjugate_cc ()
dst = blocks.vector_sink_c ()
self.tb.connect(src0, (op,0))
self.tb.connect(src1, (op,1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
if __name__ == '__main__':
gr_unittest.run(test_multiply_conjugate, "test_multiply_conjugate.xml")
|
gpl-3.0
|
DG-i/openshift-ansible
|
playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
|
35
|
5312
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Ansible module for modifying OpenShift configs during an upgrade"""
import os
import yaml
DOCUMENTATION = '''
---
module: openshift_upgrade_config
short_description: OpenShift Upgrade Config
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
def modify_api_levels(level_list, remove, ensure, msg_prepend='',
msg_append=''):
""" modify_api_levels """
changed = False
changes = []
if not isinstance(remove, list):
remove = []
if not isinstance(ensure, list):
ensure = []
if not isinstance(level_list, list):
new_list = []
changed = True
changes.append("%s created missing %s" % (msg_prepend, msg_append))
else:
new_list = level_list
for level in remove:
if level in new_list:
new_list.remove(level)
changed = True
changes.append("%s removed %s %s" % (msg_prepend, level, msg_append))
for level in ensure:
if level not in new_list:
new_list.append(level)
changed = True
changes.append("%s added %s %s" % (msg_prepend, level, msg_append))
return {'new_list': new_list, 'changed': changed, 'changes': changes}
def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
"""Main upgrade method for 3.0 to 3.1."""
changes = []
# Facts do not get transferred to the hosts where custom modules run,
# need to make some assumptions here.
master_config = os.path.join(config_base, 'master/master-config.yaml')
master_cfg_file = open(master_config, 'r')
config = yaml.safe_load(master_cfg_file.read())
master_cfg_file.close()
# Remove unsupported api versions and ensure supported api versions from
# master config
unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
supported_levels = ['v1']
result = modify_api_levels(config.get('apiLevels'), unsupported_levels,
supported_levels, 'master-config.yaml:', 'from apiLevels')
if result['changed']:
config['apiLevels'] = result['new_list']
changes.append(result['changes'])
if 'kubernetesMasterConfig' in config and 'apiLevels' in config['kubernetesMasterConfig']:
config['kubernetesMasterConfig'].pop('apiLevels')
changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels')
# Add masterCA to serviceAccountConfig
if 'serviceAccountConfig' in config and 'masterCA' not in config['serviceAccountConfig']:
config['serviceAccountConfig']['masterCA'] = config['oauthConfig'].get('masterCA', 'ca.crt')
# Add proxyClientInfo to master-config
if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
config['kubernetesMasterConfig']['proxyClientInfo'] = {
'certFile': 'master.proxy-client.crt',
'keyFile': 'master.proxy-client.key'
}
changes.append("master-config.yaml: added proxyClientInfo")
if len(changes) > 0:
if backup:
# TODO: Check success:
ansible_module.backup_local(master_config)
# Write the modified config:
out_file = open(master_config, 'w')
out_file.write(yaml.safe_dump(config, default_flow_style=False))
out_file.close()
return changes
def upgrade_master(ansible_module, config_base, from_version, to_version, backup):
"""Upgrade entry point."""
if from_version == '3.0':
if to_version == '3.1':
return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup)
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name,
# redefined-outer-name
global module
module = AnsibleModule( # noqa: F405
argument_spec=dict(
config_base=dict(required=True),
from_version=dict(required=True, choices=['3.0']),
to_version=dict(required=True, choices=['3.1']),
role=dict(required=True, choices=['master']),
backup=dict(required=False, default=True, type='bool')
),
supports_check_mode=True,
)
from_version = module.params['from_version']
to_version = module.params['to_version']
role = module.params['role']
backup = module.params['backup']
config_base = module.params['config_base']
try:
changes = []
if role == 'master':
changes = upgrade_master(module, config_base, from_version,
to_version, backup)
changed = len(changes) > 0
return module.exit_json(changed=changed, changes=changes)
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
except Exception as e:
return module.fail_json(msg=str(e))
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
# import module snippets
from ansible.module_utils.basic import * # noqa: E402,F403
if __name__ == '__main__':
main()
|
apache-2.0
|
380wmda999/sphinx2.2.11-string-4G
|
api/sphinxapi.py
|
1
|
35093
|
#
# $Id$
#
# Python version of Sphinx searchd client (Python API)
#
# Copyright (c) 2006, Mike Osadnik
# Copyright (c) 2006-2016, Andrew Aksyonoff
# Copyright (c) 2008-2016, Sphinx Technologies Inc
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License. You should
# have received a copy of the LGPL license along with this program; if you
# did not, you can find it at http://www.gnu.org/
#
# WARNING!!!
#
# As of 2015, we strongly recommend to use either SphinxQL or REST APIs
# rather than the native SphinxAPI.
#
# While both the native SphinxAPI protocol and the existing APIs will
# continue to exist, and perhaps should not even break (too much), exposing
# all the new features via multiple different native API implementations
# is too much of a support complication for us.
#
# That said, you're welcome to overtake the maintenance of any given
# official API, and remove this warning ;)
#
import sys
import select
import socket
import re
from struct import *
# known searchd commands
SEARCHD_COMMAND_SEARCH = 0
SEARCHD_COMMAND_EXCERPT = 1
SEARCHD_COMMAND_UPDATE = 2
SEARCHD_COMMAND_KEYWORDS = 3
SEARCHD_COMMAND_PERSIST = 4
SEARCHD_COMMAND_STATUS = 5
SEARCHD_COMMAND_FLUSHATTRS = 7
# current client-side command implementation versions
VER_COMMAND_SEARCH = 0x11E
VER_COMMAND_EXCERPT = 0x104
VER_COMMAND_UPDATE = 0x103
VER_COMMAND_KEYWORDS = 0x100
VER_COMMAND_STATUS = 0x101
VER_COMMAND_FLUSHATTRS = 0x100
# known searchd status codes
SEARCHD_OK = 0
SEARCHD_ERROR = 1
SEARCHD_RETRY = 2
SEARCHD_WARNING = 3
# known match modes
SPH_MATCH_ALL = 0
SPH_MATCH_ANY = 1
SPH_MATCH_PHRASE = 2
SPH_MATCH_BOOLEAN = 3
SPH_MATCH_EXTENDED = 4
SPH_MATCH_FULLSCAN = 5
SPH_MATCH_EXTENDED2 = 6
# known ranking modes (extended2 mode only)
SPH_RANK_PROXIMITY_BM25 = 0 # default mode, phrase proximity major factor and BM25 minor one
SPH_RANK_BM25 = 1 # statistical mode, BM25 ranking only (faster but worse quality)
SPH_RANK_NONE = 2 # no ranking, all matches get a weight of 1
SPH_RANK_WORDCOUNT = 3 # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
SPH_RANK_PROXIMITY = 4
SPH_RANK_MATCHANY = 5
SPH_RANK_FIELDMASK = 6
SPH_RANK_SPH04 = 7
SPH_RANK_EXPR = 8
SPH_RANK_TOTAL = 9
# known sort modes
SPH_SORT_RELEVANCE = 0
SPH_SORT_ATTR_DESC = 1
SPH_SORT_ATTR_ASC = 2
SPH_SORT_TIME_SEGMENTS = 3
SPH_SORT_EXTENDED = 4
SPH_SORT_EXPR = 5
# known filter types
SPH_FILTER_VALUES = 0
SPH_FILTER_RANGE = 1
SPH_FILTER_FLOATRANGE = 2
SPH_FILTER_STRING = 3
# known attribute types
SPH_ATTR_NONE = 0
SPH_ATTR_INTEGER = 1
SPH_ATTR_TIMESTAMP = 2
SPH_ATTR_ORDINAL = 3
SPH_ATTR_BOOL = 4
SPH_ATTR_FLOAT = 5
SPH_ATTR_BIGINT = 6
SPH_ATTR_STRING = 7
SPH_ATTR_FACTORS = 1001
SPH_ATTR_MULTI = 0X40000001L
SPH_ATTR_MULTI64 = 0X40000002L
SPH_ATTR_TYPES = (SPH_ATTR_NONE,
SPH_ATTR_INTEGER,
SPH_ATTR_TIMESTAMP,
SPH_ATTR_ORDINAL,
SPH_ATTR_BOOL,
SPH_ATTR_FLOAT,
SPH_ATTR_BIGINT,
SPH_ATTR_STRING,
SPH_ATTR_MULTI,
SPH_ATTR_MULTI64)
# known grouping functions
SPH_GROUPBY_DAY = 0
SPH_GROUPBY_WEEK = 1
SPH_GROUPBY_MONTH = 2
SPH_GROUPBY_YEAR = 3
SPH_GROUPBY_ATTR = 4
SPH_GROUPBY_ATTRPAIR = 5
class SphinxClient:
def __init__ (self):
"""
Create a new client object, and fill defaults.
"""
self._host = 'localhost' # searchd host (default is "localhost")
self._port = 9312 # searchd port (default is 9312)
self._path = None # searchd unix-domain socket path
self._socket = None
self._offset = 0 # how much records to seek from result-set start (default is 0)
self._limit = 20 # how much records to return from result-set starting at offset (default is 20)
self._mode = SPH_MATCH_EXTENDED2 # query matching mode (default is SPH_MATCH_EXTENDED2)
self._weights = [] # per-field weights (default is 1 for all fields)
self._sort = SPH_SORT_RELEVANCE # match sorting mode (default is SPH_SORT_RELEVANCE)
self._sortby = '' # attribute to sort by (defualt is "")
self._min_id = 0 # min ID to match (default is 0)
self._max_id = 0 # max ID to match (default is UINT_MAX)
self._filters = [] # search filters
self._groupby = '' # group-by attribute name
self._groupfunc = SPH_GROUPBY_DAY # group-by function (to pre-process group-by attribute value with)
self._groupsort = '@group desc' # group-by sorting clause (to sort groups in result set with)
self._groupdistinct = '' # group-by count-distinct attribute
self._maxmatches = 1000 # max matches to retrieve
self._cutoff = 0 # cutoff to stop searching at
self._retrycount = 0 # distributed retry count
self._retrydelay = 0 # distributed retry delay
self._anchor = {} # geographical anchor point
self._indexweights = {} # per-index weights
self._ranker = SPH_RANK_PROXIMITY_BM25 # ranking mode
self._rankexpr = '' # ranking expression for SPH_RANK_EXPR
self._maxquerytime = 0 # max query time, milliseconds (default is 0, do not limit)
self._timeout = 1.0 # connection timeout
self._fieldweights = {} # per-field-name weights
self._overrides = {} # per-query attribute values overrides
self._select = '*' # select-list (attributes or expressions, with optional aliases)
self._query_flags = SetBit ( 0, 6, True ) # default idf=tfidf_normalized
self._predictedtime = 0 # per-query max_predicted_time
self._outerorderby = '' # outer match sort by
self._outeroffset = 0 # outer offset
self._outerlimit = 0 # outer limit
self._hasouter = False # sub-select enabled
self._error = '' # last error message
self._warning = '' # last warning message
self._reqs = [] # requests array for multi-query
def __del__ (self):
if self._socket:
self._socket.close()
def GetLastError (self):
"""
Get last error message (string).
"""
return self._error
def GetLastWarning (self):
"""
Get last warning message (string).
"""
return self._warning
def SetServer (self, host, port = None):
"""
Set searchd server host and port.
"""
assert(isinstance(host, str))
if host.startswith('/'):
self._path = host
return
elif host.startswith('unix://'):
self._path = host[7:]
return
self._host = host
if isinstance(port, int):
assert(port>0 and port<65536)
self._port = port
self._path = None
def SetConnectTimeout ( self, timeout ):
"""
Set connection timeout ( float second )
"""
assert (isinstance(timeout, float))
# set timeout to 0 make connaection non-blocking that is wrong so timeout got clipped to reasonable minimum
self._timeout = max ( 0.001, timeout )
def _Connect (self):
"""
INTERNAL METHOD, DO NOT CALL. Connects to searchd server.
"""
if self._socket:
# we have a socket, but is it still alive?
sr, sw, _ = select.select ( [self._socket], [self._socket], [], 0 )
# this is how alive socket should look
if len(sr)==0 and len(sw)==1:
return self._socket
# oops, looks like it was closed, lets reopen
self._socket.close()
self._socket = None
try:
if self._path:
af = socket.AF_UNIX
addr = self._path
desc = self._path
else:
af = socket.AF_INET
addr = ( self._host, self._port )
desc = '%s;%s' % addr
sock = socket.socket ( af, socket.SOCK_STREAM )
sock.settimeout ( self._timeout )
sock.connect ( addr )
except socket.error, msg:
if sock:
sock.close()
self._error = 'connection to %s failed (%s)' % ( desc, msg )
return
v = unpack('>L', sock.recv(4))
if v<1:
sock.close()
self._error = 'expected searchd protocol version, got %s' % v
return
# all ok, send my version
sock.send(pack('>L', 1))
return sock
def _GetResponse (self, sock, client_ver):
"""
INTERNAL METHOD, DO NOT CALL. Gets and checks response packet from searchd server.
"""
(status, ver, length) = unpack('>2HL', sock.recv(8))
response = ''
left = length
while left>0:
chunk = sock.recv(left)
if chunk:
response += chunk
left -= len(chunk)
else:
break
if not self._socket:
sock.close()
# check response
read = len(response)
if not response or read!=length:
if length:
self._error = 'failed to read searchd response (status=%s, ver=%s, len=%s, read=%s)' \
% (status, ver, length, read)
else:
self._error = 'received zero-sized searchd response'
return None
# check status
if status==SEARCHD_WARNING:
wend = 4 + unpack ( '>L', response[0:4] )[0]
self._warning = response[4:wend]
return response[wend:]
if status==SEARCHD_ERROR:
self._error = 'searchd error: '+response[4:]
return None
if status==SEARCHD_RETRY:
self._error = 'temporary searchd error: '+response[4:]
return None
if status!=SEARCHD_OK:
self._error = 'unknown status code %d' % status
return None
# check version
if ver<client_ver:
self._warning = 'searchd command v.%d.%d older than client\'s v.%d.%d, some options might not work' \
% (ver>>8, ver&0xff, client_ver>>8, client_ver&0xff)
return response
def _Send ( self, sock, req ):
"""
INTERNAL METHOD, DO NOT CALL. send request to searchd server.
"""
total = 0
while True:
sent = sock.send ( req[total:] )
if sent<=0:
break
total = total + sent
return total
def SetLimits (self, offset, limit, maxmatches=0, cutoff=0):
"""
Set offset and count into result set, and optionally set max-matches and cutoff limits.
"""
assert ( type(offset) in [int,long] and 0<=offset<16777216 )
assert ( type(limit) in [int,long] and 0<limit<16777216 )
assert(maxmatches>=0)
self._offset = offset
self._limit = limit
if maxmatches>0:
self._maxmatches = maxmatches
if cutoff>=0:
self._cutoff = cutoff
def SetMaxQueryTime (self, maxquerytime):
"""
Set maximum query time, in milliseconds, per-index. 0 means 'do not limit'.
"""
assert(isinstance(maxquerytime,int) and maxquerytime>0)
self._maxquerytime = maxquerytime
def SetMatchMode (self, mode):
"""
Set matching mode.
"""
print >> sys.stderr, 'DEPRECATED: Do not call this method or, even better, use SphinxQL instead of an API'
assert(mode in [SPH_MATCH_ALL, SPH_MATCH_ANY, SPH_MATCH_PHRASE, SPH_MATCH_BOOLEAN, SPH_MATCH_EXTENDED, SPH_MATCH_FULLSCAN, SPH_MATCH_EXTENDED2])
self._mode = mode
def SetRankingMode ( self, ranker, rankexpr='' ):
"""
Set ranking mode.
"""
assert(ranker>=0 and ranker<SPH_RANK_TOTAL)
self._ranker = ranker
self._rankexpr = rankexpr
def SetSortMode ( self, mode, clause='' ):
"""
Set sorting mode.
"""
assert ( mode in [SPH_SORT_RELEVANCE, SPH_SORT_ATTR_DESC, SPH_SORT_ATTR_ASC, SPH_SORT_TIME_SEGMENTS, SPH_SORT_EXTENDED, SPH_SORT_EXPR] )
assert ( isinstance ( clause, str ) )
self._sort = mode
self._sortby = clause
def SetFieldWeights (self, weights):
"""
Bind per-field weights by name; expects (name,field_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in weights.items():
assert(isinstance(key,str))
AssertUInt32 ( val )
self._fieldweights = weights
def SetIndexWeights (self, weights):
"""
Bind per-index weights by name; expects (name,index_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in weights.items():
assert(isinstance(key,str))
AssertUInt32(val)
self._indexweights = weights
def SetIDRange (self, minid, maxid):
"""
Set IDs range to match.
Only match records if document ID is beetwen $min and $max (inclusive).
"""
assert(isinstance(minid, (int, long)))
assert(isinstance(maxid, (int, long)))
assert(minid<=maxid)
self._min_id = minid
self._max_id = maxid
def SetFilter ( self, attribute, values, exclude=0 ):
"""
Set values set filter.
Only match records where 'attribute' value is in given 'values' set.
"""
assert(isinstance(attribute, str))
assert iter(values)
for value in values:
AssertInt32 ( value )
self._filters.append ( { 'type':SPH_FILTER_VALUES, 'attr':attribute, 'exclude':exclude, 'values':values } )
def SetFilterString ( self, attribute, value, exclude=0 ):
"""
Set string filter.
Only match records where 'attribute' value is equal
"""
assert(isinstance(attribute, str))
assert(isinstance(value, str))
print ( "attr='%s' val='%s' " % ( attribute, value ) )
self._filters.append ( { 'type':SPH_FILTER_STRING, 'attr':attribute, 'exclude':exclude, 'value':value } )
def SetFilterRange (self, attribute, min_, max_, exclude=0 ):
"""
Set range filter.
Only match records if 'attribute' value is beetwen 'min_' and 'max_' (inclusive).
"""
assert(isinstance(attribute, str))
AssertInt32(min_)
AssertInt32(max_)
assert(min_<=max_)
self._filters.append ( { 'type':SPH_FILTER_RANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_ } )
def SetFilterFloatRange (self, attribute, min_, max_, exclude=0 ):
assert(isinstance(attribute,str))
assert(isinstance(min_,float))
assert(isinstance(max_,float))
assert(min_ <= max_)
self._filters.append ( {'type':SPH_FILTER_FLOATRANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_} )
def SetGeoAnchor (self, attrlat, attrlong, latitude, longitude):
assert(isinstance(attrlat,str))
assert(isinstance(attrlong,str))
assert(isinstance(latitude,float))
assert(isinstance(longitude,float))
self._anchor['attrlat'] = attrlat
self._anchor['attrlong'] = attrlong
self._anchor['lat'] = latitude
self._anchor['long'] = longitude
def SetGroupBy ( self, attribute, func, groupsort='@group desc' ):
"""
Set grouping attribute and function.
"""
assert(isinstance(attribute, str))
assert(func in [SPH_GROUPBY_DAY, SPH_GROUPBY_WEEK, SPH_GROUPBY_MONTH, SPH_GROUPBY_YEAR, SPH_GROUPBY_ATTR, SPH_GROUPBY_ATTRPAIR] )
assert(isinstance(groupsort, str))
self._groupby = attribute
self._groupfunc = func
self._groupsort = groupsort
def SetGroupDistinct (self, attribute):
assert(isinstance(attribute,str))
self._groupdistinct = attribute
def SetRetries (self, count, delay=0):
assert(isinstance(count,int) and count>=0)
assert(isinstance(delay,int) and delay>=0)
self._retrycount = count
self._retrydelay = delay
def SetOverride (self, name, type, values):
print >> sys.stderr, 'DEPRECATED: Do not call this method. Use SphinxQL REMAP() function instead.'
assert(isinstance(name, str))
assert(type in SPH_ATTR_TYPES)
assert(isinstance(values, dict))
self._overrides[name] = {'name': name, 'type': type, 'values': values}
def SetSelect (self, select):
assert(isinstance(select, str))
self._select = select
def SetQueryFlag ( self, name, value ):
known_names = [ "reverse_scan", "sort_method", "max_predicted_time", "boolean_simplify", "idf", "global_idf" ]
flags = { "reverse_scan":[0, 1], "sort_method":["pq", "kbuffer"],"max_predicted_time":[0], "boolean_simplify":[True, False], "idf":["normalized", "plain", "tfidf_normalized", "tfidf_unnormalized"], "global_idf":[True, False] }
assert ( name in known_names )
assert ( value in flags[name] or ( name=="max_predicted_time" and isinstance(value, (int, long)) and value>=0))
if name=="reverse_scan":
self._query_flags = SetBit ( self._query_flags, 0, value==1 )
if name=="sort_method":
self._query_flags = SetBit ( self._query_flags, 1, value=="kbuffer" )
if name=="max_predicted_time":
self._query_flags = SetBit ( self._query_flags, 2, value>0 )
self._predictedtime = int(value)
if name=="boolean_simplify":
self._query_flags= SetBit ( self._query_flags, 3, value )
if name=="idf" and ( value=="plain" or value=="normalized" ) :
self._query_flags = SetBit ( self._query_flags, 4, value=="plain" )
if name=="global_idf":
self._query_flags= SetBit ( self._query_flags, 5, value )
if name=="idf" and ( value=="tfidf_normalized" or value=="tfidf_unnormalized" ) :
self._query_flags = SetBit ( self._query_flags, 6, value=="tfidf_normalized" )
def SetOuterSelect ( self, orderby, offset, limit ):
assert(isinstance(orderby, str))
assert(isinstance(offset, (int, long)))
assert(isinstance(limit, (int, long)))
assert ( offset>=0 )
assert ( limit>0 )
self._outerorderby = orderby
self._outeroffset = offset
self._outerlimit = limit
self._hasouter = True
def ResetOverrides (self):
self._overrides = {}
def ResetFilters (self):
"""
Clear all filters (for multi-queries).
"""
self._filters = []
self._anchor = {}
def ResetGroupBy (self):
"""
Clear groupby settings (for multi-queries).
"""
self._groupby = ''
self._groupfunc = SPH_GROUPBY_DAY
self._groupsort = '@group desc'
self._groupdistinct = ''
def ResetQueryFlag (self):
self._query_flags = SetBit ( 0, 6, True ) # default idf=tfidf_normalized
self._predictedtime = 0
def ResetOuterSelect (self):
self._outerorderby = ''
self._outeroffset = 0
self._outerlimit = 0
self._hasouter = False
def Query (self, query, index='*', comment=''):
"""
Connect to searchd server and run given search query.
Returns None on failure; result set hash on success (see documentation for details).
"""
assert(len(self._reqs)==0)
self.AddQuery(query,index,comment)
results = self.RunQueries()
self._reqs = [] # we won't re-run erroneous batch
if not results or len(results)==0:
return None
self._error = results[0]['error']
self._warning = results[0]['warning']
if results[0]['status'] == SEARCHD_ERROR:
return None
return results[0]
def AddQuery (self, query, index='*', comment=''):
"""
Add query to batch.
"""
# build request
req = []
req.append(pack('>5L', self._query_flags, self._offset, self._limit, self._mode, self._ranker))
if self._ranker==SPH_RANK_EXPR:
req.append(pack('>L', len(self._rankexpr)))
req.append(self._rankexpr)
req.append(pack('>L', self._sort))
req.append(pack('>L', len(self._sortby)))
req.append(self._sortby)
if isinstance(query,unicode):
query = query.encode('utf-8')
assert(isinstance(query,str))
req.append(pack('>L', len(query)))
req.append(query)
req.append(pack('>L', len(self._weights)))
for w in self._weights:
req.append(pack('>L', w))
assert(isinstance(index,str))
req.append(pack('>L', len(index)))
req.append(index)
req.append(pack('>L',1)) # id64 range marker
req.append(pack('>Q', self._min_id))
req.append(pack('>Q', self._max_id))
# filters
req.append ( pack ( '>L', len(self._filters) ) )
for f in self._filters:
req.append ( pack ( '>L', len(f['attr'])) + f['attr'])
filtertype = f['type']
req.append ( pack ( '>L', filtertype))
if filtertype == SPH_FILTER_VALUES:
req.append ( pack ('>L', len(f['values'])))
for val in f['values']:
req.append ( pack ('>q', val))
elif filtertype == SPH_FILTER_RANGE:
req.append ( pack ('>2q', f['min'], f['max']))
elif filtertype == SPH_FILTER_FLOATRANGE:
req.append ( pack ('>2f', f['min'], f['max']))
elif filtertype == SPH_FILTER_STRING:
req.append ( pack ( '>L', len(f['value']) ) )
req.append ( f['value'] )
req.append ( pack ( '>L', f['exclude'] ) )
# group-by, max-matches, group-sort
req.append ( pack ( '>2L', self._groupfunc, len(self._groupby) ) )
req.append ( self._groupby )
req.append ( pack ( '>2L', self._maxmatches, len(self._groupsort) ) )
req.append ( self._groupsort )
req.append ( pack ( '>LLL', self._cutoff, self._retrycount, self._retrydelay))
req.append ( pack ( '>L', len(self._groupdistinct)))
req.append ( self._groupdistinct)
# anchor point
if len(self._anchor) == 0:
req.append ( pack ('>L', 0))
else:
attrlat, attrlong = self._anchor['attrlat'], self._anchor['attrlong']
latitude, longitude = self._anchor['lat'], self._anchor['long']
req.append ( pack ('>L', 1))
req.append ( pack ('>L', len(attrlat)) + attrlat)
req.append ( pack ('>L', len(attrlong)) + attrlong)
req.append ( pack ('>f', latitude) + pack ('>f', longitude))
# per-index weights
req.append ( pack ('>L',len(self._indexweights)))
for indx,weight in self._indexweights.items():
req.append ( pack ('>L',len(indx)) + indx + pack ('>L',weight))
# max query time
req.append ( pack ('>L', self._maxquerytime) )
# per-field weights
req.append ( pack ('>L',len(self._fieldweights) ) )
for field,weight in self._fieldweights.items():
req.append ( pack ('>L',len(field)) + field + pack ('>L',weight) )
# comment
comment = str(comment)
req.append ( pack('>L',len(comment)) + comment )
# attribute overrides
req.append ( pack('>L', len(self._overrides)) )
for v in self._overrides.values():
req.extend ( ( pack('>L', len(v['name'])), v['name'] ) )
req.append ( pack('>LL', v['type'], len(v['values'])) )
for id, value in v['values'].iteritems():
req.append ( pack('>Q', id) )
if v['type'] == SPH_ATTR_FLOAT:
req.append ( pack('>f', value) )
elif v['type'] == SPH_ATTR_BIGINT:
req.append ( pack('>q', value) )
else:
req.append ( pack('>l', value) )
# select-list
req.append ( pack('>L', len(self._select)) )
req.append ( self._select )
if self._predictedtime>0:
req.append ( pack('>L', self._predictedtime ) )
# outer
req.append ( pack('>L',len(self._outerorderby)) + self._outerorderby )
req.append ( pack ( '>2L', self._outeroffset, self._outerlimit ) )
if self._hasouter:
req.append ( pack('>L', 1) )
else:
req.append ( pack('>L', 0) )
# send query, get response
req = ''.join(req)
self._reqs.append(req)
return
def RunQueries (self):
"""
Run queries batch.
Returns None on network IO failure; or an array of result set hashes on success.
"""
if len(self._reqs)==0:
self._error = 'no queries defined, issue AddQuery() first'
return None
sock = self._Connect()
if not sock:
return None
req = ''.join(self._reqs)
length = len(req)+8
req = pack('>HHLLL', SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, length, 0, len(self._reqs))+req
self._Send ( sock, req )
response = self._GetResponse(sock, VER_COMMAND_SEARCH)
if not response:
return None
nreqs = len(self._reqs)
# parse response
max_ = len(response)
p = 0
results = []
for i in range(0,nreqs,1):
result = {}
results.append(result)
result['error'] = ''
result['warning'] = ''
status = unpack('>L', response[p:p+4])[0]
p += 4
result['status'] = status
if status != SEARCHD_OK:
length = unpack('>L', response[p:p+4])[0]
p += 4
message = response[p:p+length]
p += length
if status == SEARCHD_WARNING:
result['warning'] = message
else:
result['error'] = message
continue
# read schema
fields = []
attrs = []
nfields = unpack('>L', response[p:p+4])[0]
p += 4
while nfields>0 and p<max_:
nfields -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
fields.append(response[p:p+length])
p += length
result['fields'] = fields
nattrs = unpack('>L', response[p:p+4])[0]
p += 4
while nattrs>0 and p<max_:
nattrs -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
attr = response[p:p+length]
p += length
type_ = unpack('>L', response[p:p+4])[0]
p += 4
attrs.append([attr,type_])
result['attrs'] = attrs
# read match count
count = unpack('>L', response[p:p+4])[0]
p += 4
id64 = unpack('>L', response[p:p+4])[0]
p += 4
# read matches
result['matches'] = []
while count>0 and p<max_:
count -= 1
if id64:
doc, weight = unpack('>QL', response[p:p+12])
p += 12
else:
doc, weight = unpack('>2L', response[p:p+8])
p += 8
match = { 'id':doc, 'weight':weight, 'attrs':{} }
for i in range(len(attrs)):
if attrs[i][1] == SPH_ATTR_FLOAT:
match['attrs'][attrs[i][0]] = unpack('>f', response[p:p+4])[0]
elif attrs[i][1] == SPH_ATTR_BIGINT:
match['attrs'][attrs[i][0]] = unpack('>q', response[p:p+8])[0]
p += 4
elif attrs[i][1] == SPH_ATTR_STRING:
slen = unpack('>L', response[p:p+4])[0]
p += 4
match['attrs'][attrs[i][0]] = ''
if slen>0:
match['attrs'][attrs[i][0]] = response[p:p+slen]
p += slen-4
elif attrs[i][1] == SPH_ATTR_FACTORS:
slen = unpack('>L', response[p:p+4])[0]
p += 4
match['attrs'][attrs[i][0]] = ''
if slen>0:
match['attrs'][attrs[i][0]] = response[p:p+slen-4]
p += slen-4
p -= 4
elif attrs[i][1] == SPH_ATTR_MULTI:
match['attrs'][attrs[i][0]] = []
nvals = unpack('>L', response[p:p+4])[0]
p += 4
for n in range(0,nvals,1):
match['attrs'][attrs[i][0]].append(unpack('>L', response[p:p+4])[0])
p += 4
p -= 4
elif attrs[i][1] == SPH_ATTR_MULTI64:
match['attrs'][attrs[i][0]] = []
nvals = unpack('>L', response[p:p+4])[0]
nvals = nvals/2
p += 4
for n in range(0,nvals,1):
match['attrs'][attrs[i][0]].append(unpack('>q', response[p:p+8])[0])
p += 8
p -= 4
else:
match['attrs'][attrs[i][0]] = unpack('>L', response[p:p+4])[0]
p += 4
result['matches'].append ( match )
result['total'], result['total_found'], result['time'], words = unpack('>4L', response[p:p+16])
result['time'] = '%.3f' % (result['time']/1000.0)
p += 16
result['words'] = []
while words>0:
words -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
word = response[p:p+length]
p += length
docs, hits = unpack('>2L', response[p:p+8])
p += 8
result['words'].append({'word':word, 'docs':docs, 'hits':hits})
self._reqs = []
return results
def BuildExcerpts (self, docs, index, words, opts=None):
"""
Connect to searchd server and generate exceprts from given documents.
"""
if not opts:
opts = {}
if isinstance(words,unicode):
words = words.encode('utf-8')
assert(isinstance(docs, list))
assert(isinstance(index, str))
assert(isinstance(words, str))
assert(isinstance(opts, dict))
sock = self._Connect()
if not sock:
return None
# fixup options
opts.setdefault('before_match', '<b>')
opts.setdefault('after_match', '</b>')
opts.setdefault('chunk_separator', ' ... ')
opts.setdefault('html_strip_mode', 'index')
opts.setdefault('limit', 256)
opts.setdefault('limit_passages', 0)
opts.setdefault('limit_words', 0)
opts.setdefault('around', 5)
opts.setdefault('start_passage_id', 1)
opts.setdefault('passage_boundary', 'none')
# build request
# v.1.0 req
flags = 1 # (remove spaces)
if opts.get('exact_phrase'): flags |= 2
if opts.get('single_passage'): flags |= 4
if opts.get('use_boundaries'): flags |= 8
if opts.get('weight_order'): flags |= 16
if opts.get('query_mode'): flags |= 32
if opts.get('force_all_words'): flags |= 64
if opts.get('load_files'): flags |= 128
if opts.get('allow_empty'): flags |= 256
if opts.get('emit_zones'): flags |= 512
if opts.get('load_files_scattered'): flags |= 1024
# mode=0, flags
req = [pack('>2L', 0, flags)]
# req index
req.append(pack('>L', len(index)))
req.append(index)
# req words
req.append(pack('>L', len(words)))
req.append(words)
# options
req.append(pack('>L', len(opts['before_match'])))
req.append(opts['before_match'])
req.append(pack('>L', len(opts['after_match'])))
req.append(opts['after_match'])
req.append(pack('>L', len(opts['chunk_separator'])))
req.append(opts['chunk_separator'])
req.append(pack('>L', int(opts['limit'])))
req.append(pack('>L', int(opts['around'])))
req.append(pack('>L', int(opts['limit_passages'])))
req.append(pack('>L', int(opts['limit_words'])))
req.append(pack('>L', int(opts['start_passage_id'])))
req.append(pack('>L', len(opts['html_strip_mode'])))
req.append((opts['html_strip_mode']))
req.append(pack('>L', len(opts['passage_boundary'])))
req.append((opts['passage_boundary']))
# documents
req.append(pack('>L', len(docs)))
for doc in docs:
if isinstance(doc,unicode):
doc = doc.encode('utf-8')
assert(isinstance(doc, str))
req.append(pack('>L', len(doc)))
req.append(doc)
req = ''.join(req)
# send query, get response
length = len(req)
# add header
req = pack('>2HL', SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT, length)+req
self._Send ( sock, req )
response = self._GetResponse(sock, VER_COMMAND_EXCERPT )
if not response:
return []
# parse response
pos = 0
res = []
rlen = len(response)
for i in range(len(docs)):
length = unpack('>L', response[pos:pos+4])[0]
pos += 4
if pos+length > rlen:
self._error = 'incomplete reply'
return []
res.append(response[pos:pos+length])
pos += length
return res
def UpdateAttributes ( self, index, attrs, values, mva=False, ignorenonexistent=False ):
"""
Update given attribute values on given documents in given indexes.
Returns amount of updated documents (0 or more) on success, or -1 on failure.
'attrs' must be a list of strings.
'values' must be a dict with int key (document ID) and list of int values (new attribute values).
optional boolean parameter 'mva' points that there is update of MVA attributes.
In this case the 'values' must be a dict with int key (document ID) and list of lists of int values
(new MVA attribute values).
Optional boolean parameter 'ignorenonexistent' points that the update will silently ignore any warnings about
trying to update a column which is not exists in current index schema.
Example:
res = cl.UpdateAttributes ( 'test1', [ 'group_id', 'date_added' ], { 2:[123,1000000000], 4:[456,1234567890] } )
"""
assert ( isinstance ( index, str ) )
assert ( isinstance ( attrs, list ) )
assert ( isinstance ( values, dict ) )
for attr in attrs:
assert ( isinstance ( attr, str ) )
for docid, entry in values.items():
AssertUInt32(docid)
assert ( isinstance ( entry, list ) )
assert ( len(attrs)==len(entry) )
for val in entry:
if mva:
assert ( isinstance ( val, list ) )
for vals in val:
AssertInt32(vals)
else:
AssertInt32(val)
# build request
req = [ pack('>L',len(index)), index ]
req.append ( pack('>L',len(attrs)) )
ignore_absent = 0
if ignorenonexistent: ignore_absent = 1
req.append ( pack('>L', ignore_absent ) )
mva_attr = 0
if mva: mva_attr = 1
for attr in attrs:
req.append ( pack('>L',len(attr)) + attr )
req.append ( pack('>L', mva_attr ) )
req.append ( pack('>L',len(values)) )
for docid, entry in values.items():
req.append ( pack('>Q',docid) )
for val in entry:
val_len = val
if mva: val_len = len ( val )
req.append ( pack('>L',val_len ) )
if mva:
for vals in val:
req.append ( pack ('>L',vals) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE, length ) + req
self._Send ( sock, req )
response = self._GetResponse ( sock, VER_COMMAND_UPDATE )
if not response:
return -1
# parse response
updated = unpack ( '>L', response[0:4] )[0]
return updated
def BuildKeywords ( self, query, index, hits ):
"""
Connect to searchd server, and generate keywords list for a given query.
Returns None on failure, or a list of keywords on success.
"""
assert ( isinstance ( query, str ) )
assert ( isinstance ( index, str ) )
assert ( isinstance ( hits, int ) )
# build request
req = [ pack ( '>L', len(query) ) + query ]
req.append ( pack ( '>L', len(index) ) + index )
req.append ( pack ( '>L', hits ) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS, length ) + req
self._Send ( sock, req )
response = self._GetResponse ( sock, VER_COMMAND_KEYWORDS )
if not response:
return None
# parse response
res = []
nwords = unpack ( '>L', response[0:4] )[0]
p = 4
max_ = len(response)
while nwords>0 and p<max_:
nwords -= 1
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
tokenized = response[p:p+length]
p += length
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
normalized = response[p:p+length]
p += length
entry = { 'tokenized':tokenized, 'normalized':normalized }
if hits:
entry['docs'], entry['hits'] = unpack ( '>2L', response[p:p+8] )
p += 8
res.append ( entry )
if nwords>0 or p>max_:
self._error = 'incomplete reply'
return None
return res
def Status ( self, session=False ):
"""
Get the status
"""
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
sess = 1
if session:
sess = 0
req = pack ( '>2HLL', SEARCHD_COMMAND_STATUS, VER_COMMAND_STATUS, 4, sess )
self._Send ( sock, req )
response = self._GetResponse ( sock, VER_COMMAND_STATUS )
if not response:
return None
# parse response
res = []
p = 8
max_ = len(response)
while p<max_:
length = unpack ( '>L', response[p:p+4] )[0]
k = response[p+4:p+length+4]
p += 4+length
length = unpack ( '>L', response[p:p+4] )[0]
v = response[p+4:p+length+4]
p += 4+length
res += [[k, v]]
return res
### persistent connections
def Open(self):
if self._socket:
self._error = 'already connected'
return None
server = self._Connect()
if not server:
return None
# command, command version = 0, body length = 4, body = 1
request = pack ( '>hhII', SEARCHD_COMMAND_PERSIST, 0, 4, 1 )
self._Send ( server, request )
self._socket = server
return True
def Close(self):
if not self._socket:
self._error = 'not connected'
return
self._socket.close()
self._socket = None
def EscapeString(self, string):
return re.sub(r"([=\(\)|\-!@~\"&/\\\^\$\=\<])", r"\\\1", string)
def FlushAttributes(self):
sock = self._Connect()
if not sock:
return -1
request = pack ( '>hhI', SEARCHD_COMMAND_FLUSHATTRS, VER_COMMAND_FLUSHATTRS, 0 ) # cmd, ver, bodylen
self._Send ( sock, request )
response = self._GetResponse ( sock, VER_COMMAND_FLUSHATTRS )
if not response or len(response)!=4:
self._error = 'unexpected response length'
return -1
tag = unpack ( '>L', response[0:4] )[0]
return tag
def AssertInt32 ( value ):
assert(isinstance(value, (int, long)))
assert(value>=-2**32-1 and value<=2**32-1)
def AssertUInt32 ( value ):
assert(isinstance(value, (int, long)))
assert(value>=0 and value<=2**32-1)
def SetBit ( flag, bit, on ):
if on:
flag += ( 1<<bit )
else:
reset = 255 ^ ( 1<<bit )
flag = flag & reset
return flag
#
# $Id$
#
|
gpl-2.0
|
masamichi/bite-project
|
deps/mrtaskman/server/mapreduce/lib/graphy/common.py
|
77
|
14637
|
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code common to all chart types."""
import copy
import warnings
from mapreduce.lib.graphy import formatters
from mapreduce.lib.graphy import util
class Marker(object):
"""Represents an abstract marker, without position. You can attach these to
a DataSeries.
Object attributes:
shape: One of the shape codes (Marker.arrow, Marker.diamond, etc.)
color: color (as hex string, f.ex. '0000ff' for blue)
size: size of the marker
"""
# TODO: Write an example using markers.
# Shapes:
arrow = 'a'
cross = 'c'
diamond = 'd'
circle = 'o'
square = 's'
x = 'x'
# Note: The Google Chart API also knows some other markers ('v', 'V', 'r',
# 'b') that I think would fit better into a grid API.
# TODO: Make such a grid API
def __init__(self, shape, color, size):
"""Construct a Marker. See class docstring for details on args."""
# TODO: Shapes 'r' and 'b' would be much easier to use if they had a
# special-purpose API (instead of trying to fake it with markers)
self.shape = shape
self.color = color
self.size = size
class _BasicStyle(object):
"""Basic style object. Used internally."""
def __init__(self, color):
self.color = color
class DataSeries(object):
"""Represents one data series for a chart (both data & presentation
information).
Object attributes:
points: List of numbers representing y-values (x-values are not specified
because the Google Chart API expects even x-value spacing).
label: String with the series' label in the legend. The chart will only
have a legend if at least one series has a label. If some series
do not have a label then they will have an empty description in
the legend. This is currently a limitation in the Google Chart
API.
style: A chart-type-specific style object. (LineStyle for LineChart,
BarsStyle for BarChart, etc.)
markers: List of (x, m) tuples where m is a Marker object and x is the
x-axis value to place it at.
The "fill" markers ('r' & 'b') are a little weird because they
aren't a point on a line. For these, you can fake it by
passing slightly weird data (I'd like a better API for them at
some point):
For 'b', you attach the marker to the starting series, and set x
to the index of the ending line. Size is ignored, I think.
For 'r', you can attach to any line, specify the starting
y-value for x and the ending y-value for size. Y, in this case,
is becase 0.0 (bottom) and 1.0 (top).
color: DEPRECATED
"""
# TODO: Should we require the points list to be non-empty ?
# TODO: Do markers belong here? They are really only used for LineCharts
def __init__(self, points, label=None, style=None, markers=None, color=None):
"""Construct a DataSeries. See class docstring for details on args."""
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! Label is a hex triplet. Maybe '
'it is a color? The old argument order (color & style '
'before label) is deprecated.', DeprecationWarning,
stacklevel=2)
if color is not None:
warnings.warn('Passing color is deprecated. Pass a style object '
'instead.', DeprecationWarning, stacklevel=2)
# Attempt to fix it for them. If they also passed a style, honor it.
if style is None:
style = _BasicStyle(color)
if style is not None and isinstance(style, basestring):
warnings.warn('Your code is broken! Style is a string, not an object. '
'Maybe you are passing a color? Passing color is '
'deprecated; pass a style object instead.',
DeprecationWarning, stacklevel=2)
if style is None:
style = _BasicStyle(None)
self.data = points
self.style = style
self.markers = markers or []
self.label = label
def _GetColor(self):
warnings.warn('DataSeries.color is deprecated, use '
'DataSeries.style.color instead.', DeprecationWarning,
stacklevel=2)
return self.style.color
def _SetColor(self, color):
warnings.warn('DataSeries.color is deprecated, use '
'DataSeries.style.color instead.', DeprecationWarning,
stacklevel=2)
self.style.color = color
color = property(_GetColor, _SetColor)
class AxisPosition(object):
"""Represents all the available axis positions.
The available positions are as follows:
AxisPosition.TOP
AxisPosition.BOTTOM
AxisPosition.LEFT
AxisPosition.RIGHT
"""
LEFT = 'y'
RIGHT = 'r'
BOTTOM = 'x'
TOP = 't'
class Axis(object):
"""Represents one axis.
Object setings:
min: Minimum value for the bottom or left end of the axis
max: Max value.
labels: List of labels to show along the axis.
label_positions: List of positions to show the labels at. Uses the scale
set by min & max, so if you set min = 0 and max = 10, then
label positions [0, 5, 10] would be at the bottom,
middle, and top of the axis, respectively.
grid_spacing: Amount of space between gridlines (in min/max scale).
A value of 0 disables gridlines.
label_gridlines: If True, draw a line extending from each label
on the axis all the way across the chart.
"""
def __init__(self, axis_min=None, axis_max=None):
"""Construct a new Axis.
Args:
axis_min: smallest value on the axis
axis_max: largest value on the axis
"""
self.min = axis_min
self.max = axis_max
self.labels = []
self.label_positions = []
self.grid_spacing = 0
self.label_gridlines = False
# TODO: Add other chart types. Order of preference:
# - scatter plots
# - us/world maps
class BaseChart(object):
"""Base chart object with standard behavior for all other charts.
Object attributes:
data: List of DataSeries objects. Chart subtypes provide convenience
functions (like AddLine, AddBars, AddSegment) to add more series
later.
left/right/bottom/top: Axis objects for the 4 different axes.
formatters: A list of callables which will be used to format this chart for
display. TODO: Need better documentation for how these
work.
auto_scale, auto_color, auto_legend:
These aliases let users access the default formatters without poking
around in self.formatters. If the user removes them from
self.formatters then they will no longer be enabled, even though they'll
still be accessible through the aliases. Similarly, re-assigning the
aliases has no effect on the contents of self.formatters.
display: This variable is reserved for backends to populate with a display
object. The intention is that the display object would be used to
render this chart. The details of what gets put here depends on
the specific backend you are using.
"""
# Canonical ordering of position keys
_POSITION_CODES = 'yrxt'
# TODO: Add more inline args to __init__ (esp. labels).
# TODO: Support multiple series in the constructor, if given.
def __init__(self):
"""Construct a BaseChart object."""
self.data = []
self._axes = {}
for code in self._POSITION_CODES:
self._axes[code] = [Axis()]
self._legend_labels = [] # AutoLegend fills this out
self._show_legend = False # AutoLegend fills this out
# Aliases for default formatters
self.auto_color = formatters.AutoColor()
self.auto_scale = formatters.AutoScale()
self.auto_legend = formatters.AutoLegend
self.formatters = [self.auto_color, self.auto_scale, self.auto_legend]
# display is used to convert the chart into something displayable (like a
# url or img tag).
self.display = None
def AddFormatter(self, formatter):
"""Add a new formatter to the chart (convenience method)."""
self.formatters.append(formatter)
def AddSeries(self, points, color=None, style=None, markers=None,
label=None):
"""DEPRECATED
Add a new series of data to the chart; return the DataSeries object."""
warnings.warn('AddSeries is deprecated. Instead, call AddLine for '
'LineCharts, AddBars for BarCharts, AddSegment for '
'PieCharts ', DeprecationWarning, stacklevel=2)
series = DataSeries(points, color=color, style=style, markers=markers,
label=label)
self.data.append(series)
return series
def GetDependentAxes(self):
"""Return any dependent axes ('left' and 'right' by default for LineCharts,
although bar charts would use 'bottom' and 'top').
"""
return self._axes[AxisPosition.LEFT] + self._axes[AxisPosition.RIGHT]
def GetIndependentAxes(self):
"""Return any independent axes (normally top & bottom, although horizontal
bar charts use left & right by default).
"""
return self._axes[AxisPosition.TOP] + self._axes[AxisPosition.BOTTOM]
def GetDependentAxis(self):
"""Return this chart's main dependent axis (often 'left', but
horizontal bar-charts use 'bottom').
"""
return self.left
def GetIndependentAxis(self):
"""Return this chart's main independent axis (often 'bottom', but
horizontal bar-charts use 'left').
"""
return self.bottom
def _Clone(self):
"""Make a deep copy this chart.
Formatters & display will be missing from the copy, due to limitations in
deepcopy.
"""
orig_values = {}
# Things which deepcopy will likely choke on if it tries to copy.
uncopyables = ['formatters', 'display', 'auto_color', 'auto_scale',
'auto_legend']
for name in uncopyables:
orig_values[name] = getattr(self, name)
setattr(self, name, None)
clone = copy.deepcopy(self)
for name, orig_value in orig_values.iteritems():
setattr(self, name, orig_value)
return clone
def GetFormattedChart(self):
"""Get a copy of the chart with formatting applied."""
# Formatters need to mutate the chart, but we don't want to change it out
# from under the user. So, we work on a copy of the chart.
scratchpad = self._Clone()
for formatter in self.formatters:
formatter(scratchpad)
return scratchpad
def GetMinMaxValues(self):
"""Get the largest & smallest values in this chart, returned as
(min_value, max_value). Takes into account complciations like stacked data
series.
For example, with non-stacked series, a chart with [1, 2, 3] and [4, 5, 6]
would return (1, 6). If the same chart was stacking the data series, it
would return (5, 9).
"""
MinPoint = lambda data: min(x for x in data if x is not None)
MaxPoint = lambda data: max(x for x in data if x is not None)
mins = [MinPoint(series.data) for series in self.data if series.data]
maxes = [MaxPoint(series.data) for series in self.data if series.data]
if not mins or not maxes:
return None, None # No data, just bail.
return min(mins), max(maxes)
def AddAxis(self, position, axis):
"""Add an axis to this chart in the given position.
Args:
position: an AxisPosition object specifying the axis's position
axis: The axis to add, an Axis object
Returns:
the value of the axis parameter
"""
self._axes.setdefault(position, []).append(axis)
return axis
def GetAxis(self, position):
"""Get or create the first available axis in the given position.
This is a helper method for the left, right, top, and bottom properties.
If the specified axis does not exist, it will be created.
Args:
position: the position to search for
Returns:
The first axis in the given position
"""
# Not using setdefault here just in case, to avoid calling the Axis()
# constructor needlessly
if position in self._axes:
return self._axes[position][0]
else:
axis = Axis()
self._axes[position] = [axis]
return axis
def SetAxis(self, position, axis):
"""Set the first axis in the given position to the given value.
This is a helper method for the left, right, top, and bottom properties.
Args:
position: an AxisPosition object specifying the axis's position
axis: The axis to set, an Axis object
Returns:
the value of the axis parameter
"""
self._axes.setdefault(position, [None])[0] = axis
return axis
def _GetAxes(self):
"""Return a generator of (position_code, Axis) tuples for this chart's axes.
The axes will be sorted by position using the canonical ordering sequence,
_POSITION_CODES.
"""
for code in self._POSITION_CODES:
for axis in self._axes.get(code, []):
yield (code, axis)
def _GetBottom(self):
return self.GetAxis(AxisPosition.BOTTOM)
def _SetBottom(self, value):
self.SetAxis(AxisPosition.BOTTOM, value)
bottom = property(_GetBottom, _SetBottom,
doc="""Get or set the bottom axis""")
def _GetLeft(self):
return self.GetAxis(AxisPosition.LEFT)
def _SetLeft(self, value):
self.SetAxis(AxisPosition.LEFT, value)
left = property(_GetLeft, _SetLeft,
doc="""Get or set the left axis""")
def _GetRight(self):
return self.GetAxis(AxisPosition.RIGHT)
def _SetRight(self, value):
self.SetAxis(AxisPosition.RIGHT, value)
right = property(_GetRight, _SetRight,
doc="""Get or set the right axis""")
def _GetTop(self):
return self.GetAxis(AxisPosition.TOP)
def _SetTop(self, value):
self.SetAxis(AxisPosition.TOP, value)
top = property(_GetTop, _SetTop,
doc="""Get or set the top axis""")
|
apache-2.0
|
andrewmoses/ssquiz
|
flask/lib/python2.7/site-packages/openid/store/interface.py
|
180
|
7083
|
"""
This module contains the definition of the C{L{OpenIDStore}}
interface.
"""
class OpenIDStore(object):
"""
This is the interface for the store objects the OpenID library
uses. It is a single class that provides all of the persistence
mechanisms that the OpenID library needs, for both servers and
consumers.
@change: Version 2.0 removed the C{storeNonce}, C{getAuthKey}, and C{isDumb}
methods, and changed the behavior of the C{L{useNonce}} method
to support one-way nonces. It added C{L{cleanupNonces}},
C{L{cleanupAssociations}}, and C{L{cleanup}}.
@sort: storeAssociation, getAssociation, removeAssociation,
useNonce
"""
def storeAssociation(self, server_url, association):
"""
This method puts a C{L{Association
<openid.association.Association>}} object into storage,
retrievable by server URL and handle.
@param server_url: The URL of the identity server that this
association is with. Because of the way the server
portion of the library uses this interface, don't assume
there are any limitations on the character set of the
input string. In particular, expect to see unescaped
non-url-safe characters in the server_url field.
@type server_url: C{str}
@param association: The C{L{Association
<openid.association.Association>}} to store.
@type association: C{L{Association
<openid.association.Association>}}
@return: C{None}
@rtype: C{NoneType}
"""
raise NotImplementedError
def getAssociation(self, server_url, handle=None):
"""
This method returns an C{L{Association
<openid.association.Association>}} object from storage that
matches the server URL and, if specified, handle. It returns
C{None} if no such association is found or if the matching
association is expired.
If no handle is specified, the store may return any
association which matches the server URL. If multiple
associations are valid, the recommended return value for this
method is the one most recently issued.
This method is allowed (and encouraged) to garbage collect
expired associations when found. This method must not return
expired associations.
@param server_url: The URL of the identity server to get the
association for. Because of the way the server portion of
the library uses this interface, don't assume there are
any limitations on the character set of the input string.
In particular, expect to see unescaped non-url-safe
characters in the server_url field.
@type server_url: C{str}
@param handle: This optional parameter is the handle of the
specific association to get. If no specific handle is
provided, any valid association matching the server URL is
returned.
@type handle: C{str} or C{NoneType}
@return: The C{L{Association
<openid.association.Association>}} for the given identity
server.
@rtype: C{L{Association <openid.association.Association>}} or
C{NoneType}
"""
raise NotImplementedError
def removeAssociation(self, server_url, handle):
"""
This method removes the matching association if it's found,
and returns whether the association was removed or not.
@param server_url: The URL of the identity server the
association to remove belongs to. Because of the way the
server portion of the library uses this interface, don't
assume there are any limitations on the character set of
the input string. In particular, expect to see unescaped
non-url-safe characters in the server_url field.
@type server_url: C{str}
@param handle: This is the handle of the association to
remove. If there isn't an association found that matches
both the given URL and handle, then there was no matching
handle found.
@type handle: C{str}
@return: Returns whether or not the given association existed.
@rtype: C{bool} or C{int}
"""
raise NotImplementedError
def useNonce(self, server_url, timestamp, salt):
"""Called when using a nonce.
This method should return C{True} if the nonce has not been
used before, and store it for a while to make sure nobody
tries to use the same value again. If the nonce has already
been used or the timestamp is not current, return C{False}.
You may use L{openid.store.nonce.SKEW} for your timestamp window.
@change: In earlier versions, round-trip nonces were used and
a nonce was only valid if it had been previously stored
with C{storeNonce}. Version 2.0 uses one-way nonces,
requiring a different implementation here that does not
depend on a C{storeNonce} call. (C{storeNonce} is no
longer part of the interface.)
@param server_url: The URL of the server from which the nonce
originated.
@type server_url: C{str}
@param timestamp: The time that the nonce was created (to the
nearest second), in seconds since January 1 1970 UTC.
@type timestamp: C{int}
@param salt: A random string that makes two nonces from the
same server issued during the same second unique.
@type salt: str
@return: Whether or not the nonce was valid.
@rtype: C{bool}
"""
raise NotImplementedError
def cleanupNonces(self):
"""Remove expired nonces from the store.
Discards any nonce from storage that is old enough that its
timestamp would not pass L{useNonce}.
This method is not called in the normal operation of the
library. It provides a way for store admins to keep
their storage from filling up with expired data.
@return: the number of nonces expired.
@returntype: int
"""
raise NotImplementedError
def cleanupAssociations(self):
"""Remove expired associations from the store.
This method is not called in the normal operation of the
library. It provides a way for store admins to keep
their storage from filling up with expired data.
@return: the number of associations expired.
@returntype: int
"""
raise NotImplementedError
def cleanup(self):
"""Shortcut for C{L{cleanupNonces}()}, C{L{cleanupAssociations}()}.
This method is not called in the normal operation of the
library. It provides a way for store admins to keep
their storage from filling up with expired data.
"""
return self.cleanupNonces(), self.cleanupAssociations()
|
bsd-3-clause
|
ZhangXinNan/tensorflow
|
tensorflow/contrib/distributions/python/ops/conditional_transformed_distribution.py
|
39
|
11189
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Conditional Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import conditional_distribution
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import util as distribution_util
# pylint: disable=protected-access
_concat_vectors = transformed_distribution._concat_vectors
# pylint: enable=protected-access
__all__ = [
"ConditionalTransformedDistribution",
]
_condition_kwargs_dict = {
"bijector_kwargs": ("Python dictionary of arg names/values "
"forwarded to the bijector."),
"distribution_kwargs": ("Python dictionary of arg names/values "
"forwarded to the distribution."),
}
class ConditionalTransformedDistribution(
conditional_distribution.ConditionalDistribution,
transformed_distribution.TransformedDistribution):
"""A TransformedDistribution that allows intrinsic conditioning."""
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _sample_n(self, n, seed=None,
bijector_kwargs=None,
distribution_kwargs=None):
sample_shape = _concat_vectors(
distribution_util.pick_vector(self._needs_rotation, self._empty, [n]),
self._override_batch_shape,
self._override_event_shape,
distribution_util.pick_vector(self._needs_rotation, [n], self._empty))
distribution_kwargs = distribution_kwargs or {}
x = self.distribution.sample(sample_shape=sample_shape,
seed=seed,
**distribution_kwargs)
x = self._maybe_rotate_dims(x)
# We'll apply the bijector in the `_call_sample_n` function.
return x
def _call_sample_n(self, sample_shape, seed, name,
bijector_kwargs=None,
distribution_kwargs=None):
# We override `_call_sample_n` rather than `_sample_n` so we can ensure that
# the result of `self.bijector.forward` is not modified (and thus caching
# works).
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
# First, generate samples. We will possibly generate extra samples in the
# event that we need to reinterpret the samples as part of the
# event_shape.
x = self._sample_n(n, seed, bijector_kwargs, distribution_kwargs)
# Next, we reshape `x` into its final form. We do this prior to the call
# to the bijector to ensure that the bijector caching works.
batch_event_shape = array_ops.shape(x)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
x = array_ops.reshape(x, final_shape)
# Finally, we apply the bijector's forward transformation. For caching to
# work, it is imperative that this is the last modification to the
# returned result.
bijector_kwargs = bijector_kwargs or {}
y = self.bijector.forward(x, **bijector_kwargs)
y = self._set_sample_static_shape(y, sample_shape)
return y
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _log_prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
# For caching to work, it is imperative that the bijector is the first to
# modify the input.
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(
y, event_ndims=event_ndims, **bijector_kwargs)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_log_prob_for_one_fiber(y, x, ildj,
distribution_kwargs)
lp_on_fibers = [
self._finish_log_prob_for_one_fiber(y, x_i, ildj_i, distribution_kwargs)
for x_i, ildj_i in zip(x, ildj)]
return math_ops.reduce_logsumexp(array_ops.stack(lp_on_fibers), axis=0)
def _finish_log_prob_for_one_fiber(self, y, x, ildj, distribution_kwargs):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)
return math_ops.cast(ildj, log_prob.dtype) + log_prob
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(
y, event_ndims=event_ndims, **bijector_kwargs)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_prob_for_one_fiber(y, x, ildj, distribution_kwargs)
prob_on_fibers = [
self._finish_prob_for_one_fiber(y, x_i, ildj_i, distribution_kwargs)
for x_i, ildj_i in zip(x, ildj)]
return sum(prob_on_fibers)
def _finish_prob_for_one_fiber(self, y, x, ildj, distribution_kwargs):
"""Finish computation of prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
return math_ops.exp(math_ops.cast(ildj, prob.dtype)) * prob
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _log_cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("log_cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_cdf is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_cdf(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("cdf is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.cdf(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _log_survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("log_survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_survival_function is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_survival_function(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("survival_function is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.survival_function(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _quantile(self, value, bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("quantile is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("quantile is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
# x_q is the "qth quantile" of X iff q = P[X <= x_q]. Now, since X =
# g^{-1}(Y), q = P[X <= x_q] = P[g^{-1}(Y) <= x_q] = P[Y <= g(x_q)],
# implies the qth quantile of Y is g(x_q).
inv_cdf = self.distribution.quantile(value, **distribution_kwargs)
return self.bijector.forward(inv_cdf, **bijector_kwargs)
def _maybe_get_static_event_ndims(self):
if self.event_shape.ndims is not None:
return self.event_shape.ndims
event_ndims = array_ops.size(self.event_shape_tensor())
event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)
if event_ndims_ is not None:
return event_ndims_
return event_ndims
|
apache-2.0
|
nurnbeck/project-2-CMPUT-291
|
ret_DATA.py
|
1
|
1814
|
import os
import time
import bsddb3 as bsddb
'''
Retrieve records with a given data
- Assume that database is closed before calling ret_DATA();
- Writes (append) the result to the file 'answers'.
For now I assume that indexfile = btree, further tests are necessary.
Tested under DB_SIZE = 10
'''
DB_FILE = "/tmp/yishuo_db/sample_db"
SDB_FILE = "/tmp/yishuo_db/IndexFile"
def ret_DATA(filetype):
if filetype == 'btree':
db = bsddb.btopen(DB_FILE, 'r')
elif filetype == 'hash':
db = bsddb.hashopen(DB_FILE, 'r')
elif filetype == 'indexfile':
db = bsddb.btopen(DB_FILE, 'r')
indexfile = bsddb.hashopen(SDB_FILE, 'r')
else:
print("Unknown type, function terminated\n")
return
# open answers for writing, appending to the end of the file
answers = open('answers', 'a')
result_lst = []
data = input("Enter the data you want to search > ")
data = data.encode(encoding = 'UTF-8')
start_time = time.time()
for key in db.keys():
if db[key] == data:
result_lst.append(key.decode(encoding = 'UTF-8'))
end_time = time.time()
elapse_time = (end_time - start_time) * 1000000
print("Result:")
data = data.decode(encoding = 'UTF-8')
if result_lst:
for key in result_lst:
print('Key:', key)
answers.write(key)
answers.write('\n')
print('Data:', data)
answers.write(data)
answers.write('\n')
answers.write('\n')
else:
print("Data not found")
print()
print(len(result_lst), "record(s) received")
print("Used", elapse_time, "micro seconds")
print()
answers.close()
db.close()
if filetype == 'indexfile':
indexfile.close()
return
|
mit
|
mspark93/VTK
|
Rendering/Core/Testing/Python/TestCameraInterpolator.py
|
20
|
3316
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.6, 0)
lut.SetSaturationRange(1.0, 0)
lut.SetValueRange(0.5, 1.0)
# Read the data: a height field results
demReader = vtk.vtkDEMReader()
demReader.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem")
demReader.Update()
lo = demReader.GetOutput().GetScalarRange()[0]
hi = demReader.GetOutput().GetScalarRange()[1]
surface = vtk.vtkImageDataGeometryFilter()
surface.SetInputConnection(demReader.GetOutputPort())
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(surface.GetOutputPort())
warp.SetScaleFactor(1)
warp.UseNormalOn()
warp.SetNormal(0, 0, 1)
warp.Update()
normals = vtk.vtkPolyDataNormals()
normals.SetInputData(warp.GetPolyDataOutput())
normals.SetFeatureAngle(60)
normals.SplittingOff()
demMapper = vtk.vtkPolyDataMapper()
demMapper.SetInputConnection(normals.GetOutputPort())
demMapper.SetScalarRange(lo, hi)
demMapper.SetLookupTable(lut)
demActor = vtk.vtkLODActor()
demActor.SetMapper(demMapper)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(demActor)
ren1.SetBackground(0, 0, 0)
renWin.SetSize(300, 300)
ren1.SetBackground(0.1, 0.2, 0.4)
# render the image
#
renWin.Render()
view1 = vtk.vtkCamera()
view1.SetClippingRange(30972.2, 35983.7)
view1.SetFocalPoint(562835, 5.11498e+006, 2294.5)
view1.SetPosition(562835, 5.11498e+006, 35449.9)
view1.SetViewAngle(30)
view1.SetViewUp(0, 1, 0)
view2 = vtk.vtkCamera()
view2.SetClippingRange(9013.43, 13470.4)
view2.SetFocalPoint(562835, 5.11498e+006, 2294.5)
view2.SetPosition(562835, 5.11498e+006, 13269.4)
view2.SetViewAngle(30)
view2.SetViewUp(0, 1, 0)
view3 = vtk.vtkCamera()
view3.SetClippingRange(4081.2, 13866.4)
view3.SetFocalPoint(562853, 5.11586e+006, 2450.05)
view3.SetPosition(562853, 5.1144e+006, 10726.6)
view3.SetViewAngle(30)
view3.SetViewUp(0, 0.984808, 0.173648)
view4 = vtk.vtkCamera()
view4.SetClippingRange(14.0481, 14048.1)
view4.SetFocalPoint(562880, 5.11652e+006, 2733.15)
view4.SetPosition(562974, 5.11462e+006, 6419.98)
view4.SetViewAngle(30)
view4.SetViewUp(0.0047047, 0.888364, 0.459116)
view5 = vtk.vtkCamera()
view5.SetClippingRange(14.411, 14411)
view5.SetFocalPoint(562910, 5.11674e+006, 3027.15)
view5.SetPosition(562414, 5.11568e+006, 3419.87)
view5.SetViewAngle(30)
view5.SetViewUp(-0.0301976, 0.359864, 0.932516)
interpolator = vtk.vtkCameraInterpolator()
interpolator.SetInterpolationTypeToSpline()
interpolator.AddCamera(0, view1)
interpolator.AddCamera(5, view2)
interpolator.AddCamera(7.5, view3)
interpolator.AddCamera(9.0, view4)
interpolator.AddCamera(11.0, view5)
camera = vtk.vtkCamera()
ren1.SetActiveCamera(camera)
def animate():
numSteps = 500
min = interpolator.GetMinimumT()
max = interpolator.GetMaximumT()
i = 0
while i <= numSteps:
t = float(i) * (max - min) / float(numSteps)
interpolator.InterpolateCamera(t, camera)
renWin.Render()
i += 1
interpolator.InterpolateCamera(8.2, camera)
# animate()
#iren.Start()
|
bsd-3-clause
|
liberorbis/libernext
|
apps/frappe/frappe/website/context.py
|
17
|
2811
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.website.doctype.website_settings.website_settings import get_website_settings
from frappe.website.template import render_blocks
from frappe.website.router import get_route_info
from frappe.website.utils import can_cache
from frappe.website.permissions import get_access
def get_context(path):
context = None
cache_key = "page_context:{}".format(path)
def add_data_path(context):
if not context.data:
context.data = {}
context.data["path"] = path
# try from memcache
if can_cache():
context = frappe.cache().get_value(cache_key)
if not context:
context = get_route_info(path)
# permission may be required for rendering
if context.doc and context.doc.doctype=="Website Group":
context["access"] = get_access(context.doc, context.pathname)
else:
context["access"] = frappe._dict({"public_read":1, "public_write":1})
context = build_context(context)
add_data_path(context)
if can_cache(context.no_cache):
frappe.cache().set_value(cache_key, context)
else:
context["access"] = frappe._dict({"public_read":1, "public_write":1})
add_data_path(context)
context.update(context.data or {})
return context
def build_context(sitemap_options):
"""get_context method of doc or module is supposed to render content templates and push it into context"""
context = frappe._dict(sitemap_options)
context.update(get_website_settings())
# provide doc
if context.doc:
context.update(context.doc.as_dict())
if hasattr(context.doc, "get_context"):
ret = context.doc.get_context(context)
if ret:
context.update(ret)
for prop in ("no_cache", "no_sitemap"):
if not prop in context:
context[prop] = getattr(context.doc, prop, False)
elif context.controller:
module = frappe.get_module(context.controller)
if module:
if hasattr(module, "get_context"):
ret = module.get_context(context)
if ret:
context.update(ret)
if hasattr(module, "get_children"):
context.get_children = module.get_children
add_metatags(context)
if context.get("base_template_path") != context.get("template") and not context.get("rendered"):
context.data = render_blocks(context)
return context
def add_metatags(context):
tags = context.get("metatags")
if tags:
if not "twitter:card" in tags:
tags["twitter:card"] = "summary"
if not "og:type" in tags:
tags["og:type"] = "article"
if tags.get("name"):
tags["og:title"] = tags["twitter:title"] = tags["name"]
if tags.get("description"):
tags["og:description"] = tags["twitter:description"] = tags["description"]
if tags.get("image"):
tags["og:image"] = tags["twitter:image:src"] = tags["image"]
|
gpl-2.0
|
TheTypoMaster/asuswrt
|
release/src/router/samba-3.5.8/source4/scripting/python/samba/idmap.py
|
23
|
2430
|
#!/usr/bin/python
# Unix SMB/CIFS implementation.
# Copyright (C) 2008 Kai Blin <kai@samba.org>
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Convenience functions for using the idmap database."""
__docformat__ = "restructuredText"
import samba
class IDmapDB(samba.Ldb):
"""The IDmap database."""
# Mappings for ID_TYPE_UID, ID_TYPE_GID and ID_TYPE_BOTH
TYPE_UID = 1
TYPE_GID = 2
TYPE_BOTH = 3
def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
credentials=None, flags=0, options=None):
"""Opens the IDMap Database
For parameter meanings see the super class (samba.Ldb)
"""
self.lp = lp
if url is None:
url = lp.get("idmap database")
super(IDmapDB, self).__init__(url=url, lp=lp, modules_dir=modules_dir,
session_info=session_info, credentials=credentials, flags=flags,
options=options)
def connect(self, url=None, flags=0, options=None):
super(IDmapDB, self).connect(url=self.lp.private_path(url), flags=flags,
options=options)
def setup_name_mapping(self, sid, type, unixid):
"""Setup a mapping between a sam name and a unix name.
:param sid: SID of the NT-side of the mapping.
:param unixname: Unix name to map to.
"""
type_string = ""
if type == self.TYPE_UID:
type_string = "ID_TYPE_UID"
elif type == self.TYPE_GID:
type_string = "ID_TYPE_GID"
elif type == self.TYPE_BOTH:
type_string = "ID_TYPE_BOTH"
else:
return
mod = """
dn: CN=%s
xidNumber: %s
objectSid: %s
objectClass: sidMap
type: %s
cn: %s
""" % (sid, unixid, sid, type_string, sid)
self.add(self.parse_ldif(mod).next()[1])
|
gpl-2.0
|
crosswalk-project/chromium-crosswalk-efl
|
tools/perf/benchmarks/media.py
|
32
|
3214
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import media
import page_sets
from telemetry import benchmark
from telemetry.page import page_test
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
class _MSEMeasurement(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
media_metric = tab.EvaluateJavaScript('window.__testMetrics')
trace = media_metric['id'] if 'id' in media_metric else None
metrics = media_metric['metrics'] if 'metrics' in media_metric else []
for m in metrics:
trace_name = '%s.%s' % (m, trace)
if isinstance(metrics[m], list):
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, trace_name, units='ms',
values=[float(v) for v in metrics[m]],
important=True))
else:
results.AddValue(scalar.ScalarValue(
results.current_page, trace_name, units='ms',
value=float(metrics[m]), important=True))
@benchmark.Disabled('android')
class Media(benchmark.Benchmark):
"""Obtains media metrics for key user scenarios."""
test = media.Media
page_set = page_sets.ToughVideoCasesPageSet
@benchmark.Disabled
class MediaNetworkSimulation(benchmark.Benchmark):
"""Obtains media metrics under different network simulations."""
test = media.Media
page_set = page_sets.MediaCnsCasesPageSet
@benchmark.Enabled('android')
@benchmark.Disabled('l')
class MediaAndroid(benchmark.Benchmark):
"""Obtains media metrics for key user scenarios on Android."""
test = media.Media
tag = 'android'
page_set = page_sets.ToughVideoCasesPageSet
# Exclude is_4k and 50 fps media files (garden* & crowd*).
options = {'page_label_filter_exclude': 'is_4k,is_50fps'}
@benchmark.Enabled('chromeos')
class MediaChromeOS4kOnly(benchmark.Benchmark):
"""Benchmark for media performance on ChromeOS using only is_4k test content.
"""
test = media.Media
tag = 'chromeOS4kOnly'
page_set = page_sets.ToughVideoCasesPageSet
options = {
'page_label_filter': 'is_4k',
# Exclude is_50fps test files: crbug/331816
'page_label_filter_exclude': 'is_50fps'
}
@benchmark.Enabled('chromeos')
class MediaChromeOS(benchmark.Benchmark):
"""Benchmark for media performance on all ChromeOS platforms.
This benchmark does not run is_4k content, there's a separate benchmark for
that.
"""
test = media.Media
tag = 'chromeOS'
page_set = page_sets.ToughVideoCasesPageSet
# Exclude is_50fps test files: crbug/331816
options = {'page_label_filter_exclude': 'is_4k,is_50fps'}
class MediaSourceExtensions(benchmark.Benchmark):
"""Obtains media metrics for key media source extensions functions."""
test = _MSEMeasurement
page_set = page_sets.MseCasesPageSet
def CustomizeBrowserOptions(self, options):
# Needed to allow XHR requests to return stream objects.
options.AppendExtraBrowserArgs(
['--enable-experimental-web-platform-features',
'--disable-gesture-requirement-for-media-playback'])
|
bsd-3-clause
|
Godiyos/python-for-android
|
python3-alpha/python3-src/Lib/lib2to3/tests/test_parser.py
|
47
|
6147
|
"""Test suite for 2to3's parser and grammar files.
This is the place to add tests for changes to 2to3's grammar, such as those
merging the grammars for Python 2 and 3. In addition to specific tests for
parts of the grammar we've changed, we also make sure we can parse the
test_grammar.py files from both Python 2 and Python 3.
"""
from __future__ import with_statement
# Testing imports
from . import support
from .support import driver, test_dir
# Python imports
import os
# Local imports
from lib2to3.pgen2 import tokenize
from ..pgen2.parse import ParseError
class GrammarTest(support.TestCase):
def validate(self, code):
support.parse_string(code)
def invalid_syntax(self, code):
try:
self.validate(code)
except ParseError:
pass
else:
raise AssertionError("Syntax shouldn't have been valid")
class TestRaiseChanges(GrammarTest):
def test_2x_style_1(self):
self.validate("raise")
def test_2x_style_2(self):
self.validate("raise E, V")
def test_2x_style_3(self):
self.validate("raise E, V, T")
def test_2x_style_invalid_1(self):
self.invalid_syntax("raise E, V, T, Z")
def test_3x_style(self):
self.validate("raise E1 from E2")
def test_3x_style_invalid_1(self):
self.invalid_syntax("raise E, V from E1")
def test_3x_style_invalid_2(self):
self.invalid_syntax("raise E from E1, E2")
def test_3x_style_invalid_3(self):
self.invalid_syntax("raise from E1, E2")
def test_3x_style_invalid_4(self):
self.invalid_syntax("raise E from")
# Adapated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
class TestFunctionAnnotations(GrammarTest):
def test_1(self):
self.validate("""def f(x) -> list: pass""")
def test_2(self):
self.validate("""def f(x:int): pass""")
def test_3(self):
self.validate("""def f(*x:str): pass""")
def test_4(self):
self.validate("""def f(**x:float): pass""")
def test_5(self):
self.validate("""def f(x, y:1+2): pass""")
def test_6(self):
self.validate("""def f(a, (b:1, c:2, d)): pass""")
def test_7(self):
self.validate("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""")
def test_8(self):
s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
*g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
self.validate(s)
class TestExcept(GrammarTest):
def test_new(self):
s = """
try:
x
except E as N:
y"""
self.validate(s)
def test_old(self):
s = """
try:
x
except E, N:
y"""
self.validate(s)
# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
class TestSetLiteral(GrammarTest):
def test_1(self):
self.validate("""x = {'one'}""")
def test_2(self):
self.validate("""x = {'one', 1,}""")
def test_3(self):
self.validate("""x = {'one', 'two', 'three'}""")
def test_4(self):
self.validate("""x = {2, 3, 4,}""")
class TestNumericLiterals(GrammarTest):
def test_new_octal_notation(self):
self.validate("""0o7777777777777""")
self.invalid_syntax("""0o7324528887""")
def test_new_binary_notation(self):
self.validate("""0b101010""")
self.invalid_syntax("""0b0101021""")
class TestClassDef(GrammarTest):
def test_new_syntax(self):
self.validate("class B(t=7): pass")
self.validate("class B(t, *args): pass")
self.validate("class B(t, **kwargs): pass")
self.validate("class B(t, *args, **kwargs): pass")
self.validate("class B(t, y=9, *args, **kwargs): pass")
class TestParserIdempotency(support.TestCase):
"""A cut-down version of pytree_idempotency.py."""
def test_all_project_files(self):
for filepath in support.all_project_files():
with open(filepath, "rb") as fp:
encoding = tokenize.detect_encoding(fp.readline)[0]
self.assertTrue(encoding is not None,
"can't detect encoding for %s" % filepath)
with open(filepath, "r") as fp:
source = fp.read()
source = source.decode(encoding)
tree = driver.parse_string(source)
new = str(tree)
if encoding:
new = new.encode(encoding)
if diff(filepath, new):
self.fail("Idempotency failed: %s" % filepath)
def test_extended_unpacking(self):
driver.parse_string("a, *b, c = x\n")
driver.parse_string("[*a, b] = x\n")
driver.parse_string("(z, *y, w) = m\n")
driver.parse_string("for *z, m in d: pass\n")
class TestLiterals(GrammarTest):
def validate(self, s):
driver.parse_string(support.dedent(s) + "\n\n")
def test_multiline_bytes_literals(self):
s = """
md5test(b"\xaa" * 80,
(b"Test Using Larger Than Block-Size Key "
b"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
self.validate(s)
def test_multiline_bytes_tripquote_literals(self):
s = '''
b"""
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
"""
'''
self.validate(s)
def test_multiline_str_literals(self):
s = """
md5test("\xaa" * 80,
("Test Using Larger Than Block-Size Key "
"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
self.validate(s)
def diff(fn, result, encoding):
f = open("@", "w")
try:
f.write(result.encode(encoding))
finally:
f.close()
try:
fn = fn.replace('"', '\\"')
return os.system('diff -u "%s" @' % fn)
finally:
os.remove("@")
|
apache-2.0
|
cjgunase/ThinkStats2
|
code/relay_soln.py
|
70
|
1675
|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import thinkstats2
import thinkplot
import relay
def ObservedPmf(pmf, speed, label=None):
"""Returns a new Pmf representing speeds observed at a given speed.
The chance of observing a runner is proportional to the difference
in speed.
Args:
pmf: distribution of actual speeds
speed: speed of the observing runner
label: string label for the new dist
Returns:
Pmf object
"""
new = pmf.Copy(label=label)
for val in new.Values():
diff = abs(val - speed)
new.Mult(val, diff)
new.Normalize()
return new
def main():
results = relay.ReadResults()
speeds = relay.GetSpeeds(results)
speeds = relay.BinData(speeds, 3, 12, 100)
# plot the distribution of actual speeds
pmf = thinkstats2.Pmf(speeds, 'actual speeds')
# plot the biased distribution seen by the observer
biased = ObservedPmf(pmf, 7.5, label='observed speeds')
thinkplot.Pmf(biased)
thinkplot.Save(root='observed_speeds',
title='PMF of running speed',
xlabel='speed (mph)',
ylabel='PMF')
cdf = thinkstats2.Cdf(pmf)
cdf_biased = thinkstats2.Cdf(biased)
thinkplot.PrePlot(2)
thinkplot.Cdfs([cdf, cdf_biased])
thinkplot.Save(root='observed_speeds_cdf',
title='CDF of running speed',
xlabel='speed (mph)',
ylabel='CDF')
if __name__ == '__main__':
main()
|
gpl-3.0
|
fillycheezstake/MissionPlanner
|
Lib/_abcoll.py
|
56
|
15273
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
]
### ONE-TRICK PONIES ###
def _hasattr(C, attr):
try:
return any(attr in B.__dict__ for B in C.__mro__)
except AttributeError:
# Old-style class
return hasattr(C, attr)
class Hashable:
__metaclass__ = ABCMeta
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
try:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
except AttributeError:
# Old-style class
if getattr(C, "__hash__", None):
return True
return NotImplemented
class Iterable:
__metaclass__ = ABCMeta
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if _hasattr(C, "__iter__"):
return True
return NotImplemented
Iterable.register(str)
class Iterator(Iterable):
@abstractmethod
def next(self):
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if _hasattr(C, "next") and _hasattr(C, "__iter__"):
return True
return NotImplemented
class Sized:
__metaclass__ = ABCMeta
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if _hasattr(C, "__len__"):
return True
return NotImplemented
class Container:
__metaclass__ = ABCMeta
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if _hasattr(C, "__contains__"):
return True
return NotImplemented
class Callable:
__metaclass__ = ABCMeta
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if _hasattr(C, "__call__"):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other <= self
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
def isdisjoint(self, other):
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
# Sets are not hashable by default, but subclasses can change this
__hash__ = None
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxint
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def iterkeys(self):
return iter(self)
def itervalues(self):
for key in self:
yield self[key]
def iteritems(self):
for key in self:
yield (key, self[key])
def keys(self):
return list(self)
def items(self):
return [(key, self[key]) for key in self]
def values(self):
return [self[key] for key in self]
# Mappings are not hashable by default, but subclasses can change this
__hash__ = None
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
class MutableMapping(Mapping):
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(basestring)
Sequence.register(buffer)
Sequence.register(xrange)
class MutableSequence(Sequence):
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
raise IndexError
def append(self, value):
self.insert(len(self), value)
def reverse(self):
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
for v in values:
self.append(v)
def pop(self, index=-1):
v = self[index]
del self[index]
return v
def remove(self, value):
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
|
gpl-3.0
|
tiqiheng/pyspider
|
pyspider/scheduler/task_queue.py
|
57
|
7035
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-07 13:12:10
import time
import heapq
import logging
import threading
from six.moves import queue as Queue
try:
from UserDict import DictMixin
except ImportError:
from collections import Mapping as DictMixin
from .token_bucket import Bucket
logger = logging.getLogger('scheduler')
try:
cmp
except NameError:
cmp = lambda x, y: (x > y) - (x < y)
class InQueueTask(DictMixin):
__slots__ = ('taskid', 'priority', 'exetime')
__getitem__ = lambda *x: getattr(*x)
__setitem__ = lambda *x: setattr(*x)
__iter__ = lambda self: iter(self.__slots__)
__len__ = lambda self: len(self.__slots__)
keys = lambda self: self.__slots__
def __init__(self, taskid, priority=0, exetime=0):
self.taskid = taskid
self.priority = priority
self.exetime = exetime
def __cmp__(self, other):
if self.exetime == 0 and other.exetime == 0:
return -cmp(self.priority, other.priority)
else:
return cmp(self.exetime, other.exetime)
def __lt__(self, other):
return self.__cmp__(other) < 0
class PriorityTaskQueue(Queue.Queue):
'''
TaskQueue
Same taskid items will been merged
'''
def _init(self, maxsize):
self.queue = []
self.queue_dict = dict()
def _qsize(self, len=len):
return len(self.queue_dict)
def _put(self, item, heappush=heapq.heappush):
if item.taskid in self.queue_dict:
task = self.queue_dict[item.taskid]
changed = False
if item.priority > task.priority:
task.priority = item.priority
changed = True
if item.exetime < task.exetime:
task.exetime = item.exetime
changed = True
if changed:
self._resort()
else:
heappush(self.queue, item)
self.queue_dict[item.taskid] = item
def _get(self, heappop=heapq.heappop):
while self.queue:
item = heappop(self.queue)
if item.taskid is None:
continue
self.queue_dict.pop(item.taskid, None)
return item
return None
@property
def top(self):
while self.queue and self.queue[0].taskid is None:
heapq.heappop(self.queue)
if self.queue:
return self.queue[0]
return None
def _resort(self):
heapq.heapify(self.queue)
def __contains__(self, taskid):
return taskid in self.queue_dict
def __getitem__(self, taskid):
return self.queue_dict[taskid]
def __setitem__(self, taskid, item):
assert item.taskid == taskid
self.put(item)
def __delitem__(self, taskid):
self.queue_dict.pop(taskid).taskid = None
class TaskQueue(object):
'''
task queue for scheduler, have a priority queue and a time queue for delayed tasks
'''
processing_timeout = 10 * 60
def __init__(self, rate=0, burst=0):
self.mutex = threading.RLock()
self.priority_queue = PriorityTaskQueue()
self.time_queue = PriorityTaskQueue()
self.processing = PriorityTaskQueue()
self.bucket = Bucket(rate=rate, burst=burst)
@property
def rate(self):
return self.bucket.rate
@rate.setter
def rate(self, value):
self.bucket.rate = value
@property
def burst(self):
return self.burst.burst
@burst.setter
def burst(self, value):
self.bucket.burst = value
def check_update(self):
'''
Check time queue and processing queue
put tasks to priority queue when execute time arrived or process timeout
'''
self._check_time_queue()
self._check_processing()
def _check_time_queue(self):
now = time.time()
self.mutex.acquire()
while self.time_queue.qsize() and self.time_queue.top.exetime < now:
task = self.time_queue.get_nowait()
task.exetime = 0
self.priority_queue.put(task)
self.mutex.release()
def _check_processing(self):
now = time.time()
self.mutex.acquire()
while self.processing.qsize() and self.processing.top.exetime < now:
task = self.processing.get_nowait()
if task.taskid is None:
continue
task.exetime = 0
self.priority_queue.put(task)
logger.info("processing: retry %s", task.taskid)
self.mutex.release()
def put(self, taskid, priority=0, exetime=0):
'''Put a task into task queue'''
now = time.time()
task = InQueueTask(taskid, priority, exetime)
self.mutex.acquire()
if taskid in self.priority_queue:
self.priority_queue.put(task)
elif taskid in self.time_queue:
self.time_queue.put(task)
elif taskid in self.processing and self.processing[taskid].taskid:
# force update a processing task is not allowed as there are so many
# problems may happen
pass
else:
if exetime and exetime > now:
self.time_queue.put(task)
else:
self.priority_queue.put(task)
self.mutex.release()
def get(self):
'''Get a task from queue when bucket available'''
if self.bucket.get() < 1:
return None
now = time.time()
self.mutex.acquire()
try:
task = self.priority_queue.get_nowait()
self.bucket.desc()
except Queue.Empty:
self.mutex.release()
return None
task.exetime = now + self.processing_timeout
self.processing.put(task)
self.mutex.release()
return task.taskid
def done(self, taskid):
'''Mark task done'''
if taskid in self.processing:
del self.processing[taskid]
return True
return False
def size(self):
return self.priority_queue.qsize() + self.time_queue.qsize() + self.processing.qsize()
def __len__(self):
return self.size()
def __contains__(self, taskid):
if taskid in self.priority_queue or taskid in self.time_queue:
return True
if taskid in self.processing and self.processing[taskid].taskid:
return True
return False
if __name__ == '__main__':
task_queue = TaskQueue()
task_queue.processing_timeout = 0.1
task_queue.put('a3', 3, time.time() + 0.1)
task_queue.put('a1', 1)
task_queue.put('a2', 2)
assert task_queue.get() == 'a2'
time.sleep(0.1)
task_queue._check_time_queue()
assert task_queue.get() == 'a3'
assert task_queue.get() == 'a1'
task_queue._check_processing()
assert task_queue.get() == 'a2'
assert len(task_queue) == 0
|
apache-2.0
|
alexbruy/QGIS
|
python/plugins/processing/algs/lidar/lastools/las2dem.py
|
3
|
3592
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
las2dem.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from .LAStoolsUtils import LAStoolsUtils
from .LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterBoolean
class las2dem(LAStoolsAlgorithm):
ATTRIBUTE = "ATTRIBUTE"
PRODUCT = "PRODUCT"
ATTRIBUTES = ["elevation", "slope", "intensity", "rgb", "edge_longest", "edge_shortest"]
PRODUCTS = ["actual values", "hillshade", "gray", "false"]
USE_TILE_BB = "USE_TILE_BB"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('las2dem')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParametersFilter1ReturnClassFlagsGUI()
self.addParametersStepGUI()
self.addParameter(ParameterSelection(las2dem.ATTRIBUTE,
self.tr("Attribute"), las2dem.ATTRIBUTES, 0))
self.addParameter(ParameterSelection(las2dem.PRODUCT,
self.tr("Product"), las2dem.PRODUCTS, 0))
self.addParameter(ParameterBoolean(las2dem.USE_TILE_BB,
self.tr("use tile bounding box (after tiling with buffer)"), False))
self.addParametersRasterOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "las2dem")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
self.addParametersFilter1ReturnClassFlagsCommands(commands)
self.addParametersStepCommands(commands)
attribute = self.getParameterValue(las2dem.ATTRIBUTE)
if attribute != 0:
commands.append("-" + las2dem.ATTRIBUTES[attribute])
product = self.getParameterValue(las2dem.PRODUCT)
if product != 0:
commands.append("-" + las2dem.PRODUCTS[product])
if (self.getParameterValue(las2dem.USE_TILE_BB)):
commands.append("-use_tile_bb")
self.addParametersRasterOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
gpl-2.0
|
lihui7115/ChromiumGStreamerBackend
|
tools/telemetry/telemetry/value/skip_unittest.py
|
13
|
1626
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import story
from telemetry import page as page_module
from telemetry import value
from telemetry.value import skip
class TestBase(unittest.TestCase):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
self.story_set = story_set
@property
def pages(self):
return self.story_set.stories
class ValueTest(TestBase):
def testBuildbotAndRepresentativeValue(self):
v = skip.SkipValue(self.pages[0], 'page skipped for testing reason')
self.assertIsNone(v.GetBuildbotValue())
self.assertIsNone(v.GetBuildbotDataType(
value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
self.assertIsNone(v.GetChartAndTraceNameForPerPageResult())
self.assertIsNone(v.GetRepresentativeNumber())
self.assertIsNone(v.GetRepresentativeString())
def testAsDict(self):
v = skip.SkipValue(self.pages[0], 'page skipped for testing reason')
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d['reason'], 'page skipped for testing reason')
def testFromDict(self):
d = {
'type': 'skip',
'name': 'skip',
'units': '',
'reason': 'page skipped for testing reason'
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, skip.SkipValue))
self.assertEquals(v.reason, 'page skipped for testing reason')
|
bsd-3-clause
|
larusx/yunmark
|
site-packages/markdown/extensions/abbr.py
|
13
|
2786
|
'''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
See <https://pythonhosted.org/Markdown/extensions/abbreviations.html>
for documentation.
Oringinal code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/) and
[Seemant Kulleen](http://www.kulleen.org/)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
from ..inlinepatterns import Pattern
from ..util import etree, AtomicString
import re
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
super(AbbrPattern, self).__init__(pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = AtomicString(m.group('abbr'))
abbr.set('title', self.title)
return abbr
def makeExtension(*args, **kwargs):
return AbbrExtension(*args, **kwargs)
|
mit
|
benjamindeleener/odoo
|
addons/l10n_ca/__openerp__.py
|
1
|
1831
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Canada - Accounting',
'version': '1.0',
'author': 'Savoir-faire Linux',
'website': 'https://www.savoirfairelinux.com',
'category': 'Localization/Account Charts',
'description': """
This is the module to manage the Canadian accounting chart in Odoo.
===========================================================================================
Canadian accounting charts and localizations.
Fiscal positions
----------------
When considering taxes to be applied, it is the province where the delivery occurs that matters.
Therefore we decided to implement the most common case in the fiscal positions: delivery is the
responsibility of the vendor and done at the customer location.
Some examples:
1) You have a customer from another province and you deliver to his location.
On the customer, set the fiscal position to his province.
2) You have a customer from another province. However this customer comes to your location
with their truck to pick up products. On the customer, do not set any fiscal position.
3) An international vendor doesn't charge you any tax. Taxes are charged at customs
by the customs broker. On the vendor, set the fiscal position to International.
4) An international vendor charge you your provincial tax. They are registered with your
position.
""",
'depends': [
'account',
'base_iban',
'base_vat',
'l10n_multilang',
],
'data': [
'account_chart_template.xml',
'account_chart.xml',
'account_chart_template_after.xml',
'account_tax.xml',
'fiscal_templates.xml',
'account_chart_template.yml',
],
'installable': True,
'post_init_hook': 'load_translations',
}
|
gpl-3.0
|
py-geek/City-Air
|
venv/lib/python2.7/site-packages/psycopg2/tests/testutils.py
|
14
|
10390
|
# testutils.py - utility module for psycopg2 testing.
#
# Copyright (C) 2010-2011 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
# Use unittest2 if available. Otherwise mock a skip facility with warnings.
import os
import platform
import sys
from functools import wraps
from testconfig import dsn
try:
import unittest2
unittest = unittest2
except ImportError:
import unittest
unittest2 = None
if hasattr(unittest, 'skipIf'):
skip = unittest.skip
skipIf = unittest.skipIf
else:
import warnings
def skipIf(cond, msg):
def skipIf_(f):
@wraps(f)
def skipIf__(self):
if cond:
warnings.warn(msg)
return
else:
return f(self)
return skipIf__
return skipIf_
def skip(msg):
return skipIf(True, msg)
def skipTest(self, msg):
warnings.warn(msg)
return
unittest.TestCase.skipTest = skipTest
# Silence warnings caused by the stubborness of the Python unittest maintainers
# http://bugs.python.org/issue9424
if not hasattr(unittest.TestCase, 'assert_') \
or unittest.TestCase.assert_ is not unittest.TestCase.assertTrue:
# mavaff...
unittest.TestCase.assert_ = unittest.TestCase.assertTrue
unittest.TestCase.failUnless = unittest.TestCase.assertTrue
unittest.TestCase.assertEquals = unittest.TestCase.assertEqual
unittest.TestCase.failUnlessEqual = unittest.TestCase.assertEqual
class ConnectingTestCase(unittest.TestCase):
"""A test case providing connections for tests.
A connection for the test is always available as `self.conn`. Others can be
created with `self.connect()`. All are closed on tearDown.
Subclasses needing to customize setUp and tearDown should remember to call
the base class implementations.
"""
def setUp(self):
self._conns = []
def tearDown(self):
# close the connections used in the test
for conn in self._conns:
if not conn.closed:
conn.close()
def connect(self, **kwargs):
try:
self._conns
except AttributeError, e:
raise AttributeError(
"%s (did you remember calling ConnectingTestCase.setUp()?)"
% e)
import psycopg2
conn = psycopg2.connect(dsn, **kwargs)
self._conns.append(conn)
return conn
def _get_conn(self):
if not hasattr(self, '_the_conn'):
self._the_conn = self.connect()
return self._the_conn
def _set_conn(self, conn):
self._the_conn = conn
conn = property(_get_conn, _set_conn)
def decorate_all_tests(cls, *decorators):
"""
Apply all the *decorators* to all the tests defined in the TestCase *cls*.
"""
for n in dir(cls):
if n.startswith('test'):
for d in decorators:
setattr(cls, n, d(getattr(cls, n)))
def skip_if_no_uuid(f):
"""Decorator to skip a test if uuid is not supported by Py/PG."""
@wraps(f)
def skip_if_no_uuid_(self):
try:
import uuid
except ImportError:
return self.skipTest("uuid not available in this Python version")
try:
cur = self.conn.cursor()
cur.execute("select typname from pg_type where typname = 'uuid'")
has = cur.fetchone()
finally:
self.conn.rollback()
if has:
return f(self)
else:
return self.skipTest("uuid type not available on the server")
return skip_if_no_uuid_
def skip_if_tpc_disabled(f):
"""Skip a test if the server has tpc support disabled."""
@wraps(f)
def skip_if_tpc_disabled_(self):
from psycopg2 import ProgrammingError
cnn = self.connect()
cur = cnn.cursor()
try:
cur.execute("SHOW max_prepared_transactions;")
except ProgrammingError:
return self.skipTest(
"server too old: two phase transactions not supported.")
else:
mtp = int(cur.fetchone()[0])
cnn.close()
if not mtp:
return self.skipTest(
"server not configured for two phase transactions. "
"set max_prepared_transactions to > 0 to run the test")
return f(self)
return skip_if_tpc_disabled_
def skip_if_no_namedtuple(f):
@wraps(f)
def skip_if_no_namedtuple_(self):
try:
from collections import namedtuple
except ImportError:
return self.skipTest("collections.namedtuple not available")
else:
return f(self)
return skip_if_no_namedtuple_
def skip_if_no_iobase(f):
"""Skip a test if io.TextIOBase is not available."""
@wraps(f)
def skip_if_no_iobase_(self):
try:
from io import TextIOBase
except ImportError:
return self.skipTest("io.TextIOBase not found.")
else:
return f(self)
return skip_if_no_iobase_
def skip_before_postgres(*ver):
"""Skip a test on PostgreSQL before a certain version."""
ver = ver + (0,) * (3 - len(ver))
def skip_before_postgres_(f):
@wraps(f)
def skip_before_postgres__(self):
if self.conn.server_version < int("%d%02d%02d" % ver):
return self.skipTest("skipped because PostgreSQL %s"
% self.conn.server_version)
else:
return f(self)
return skip_before_postgres__
return skip_before_postgres_
def skip_after_postgres(*ver):
"""Skip a test on PostgreSQL after (including) a certain version."""
ver = ver + (0,) * (3 - len(ver))
def skip_after_postgres_(f):
@wraps(f)
def skip_after_postgres__(self):
if self.conn.server_version >= int("%d%02d%02d" % ver):
return self.skipTest("skipped because PostgreSQL %s"
% self.conn.server_version)
else:
return f(self)
return skip_after_postgres__
return skip_after_postgres_
def skip_before_python(*ver):
"""Skip a test on Python before a certain version."""
def skip_before_python_(f):
@wraps(f)
def skip_before_python__(self):
if sys.version_info[:len(ver)] < ver:
return self.skipTest("skipped because Python %s"
% ".".join(map(str, sys.version_info[:len(ver)])))
else:
return f(self)
return skip_before_python__
return skip_before_python_
def skip_from_python(*ver):
"""Skip a test on Python after (including) a certain version."""
def skip_from_python_(f):
@wraps(f)
def skip_from_python__(self):
if sys.version_info[:len(ver)] >= ver:
return self.skipTest("skipped because Python %s"
% ".".join(map(str, sys.version_info[:len(ver)])))
else:
return f(self)
return skip_from_python__
return skip_from_python_
def skip_if_no_superuser(f):
"""Skip a test if the database user running the test is not a superuser"""
@wraps(f)
def skip_if_no_superuser_(self):
from psycopg2 import ProgrammingError
try:
return f(self)
except ProgrammingError, e:
import psycopg2.errorcodes
if e.pgcode == psycopg2.errorcodes.INSUFFICIENT_PRIVILEGE:
self.skipTest("skipped because not superuser")
else:
raise
return skip_if_no_superuser_
def skip_if_green(reason):
def skip_if_green_(f):
@wraps(f)
def skip_if_green__(self):
from testconfig import green
if green:
return self.skipTest(reason)
else:
return f(self)
return skip_if_green__
return skip_if_green_
skip_copy_if_green = skip_if_green("copy in async mode currently not supported")
def skip_if_no_getrefcount(f):
@wraps(f)
def skip_if_no_getrefcount_(self):
if not hasattr(sys, 'getrefcount'):
return self.skipTest('skipped, no sys.getrefcount()')
else:
return f(self)
return skip_if_no_getrefcount_
def skip_if_windows(f):
"""Skip a test if run on windows"""
@wraps(f)
def skip_if_windows_(self):
if platform.system() == 'Windows':
return self.skipTest("Not supported on Windows")
else:
return f(self)
return skip_if_windows_
def script_to_py3(script):
"""Convert a script to Python3 syntax if required."""
if sys.version_info[0] < 3:
return script
import tempfile
f = tempfile.NamedTemporaryFile(suffix=".py", delete=False)
f.write(script.encode())
f.flush()
filename = f.name
f.close()
# 2to3 is way too chatty
import logging
logging.basicConfig(filename=os.devnull)
from lib2to3.main import main
if main("lib2to3.fixes", ['--no-diffs', '-w', '-n', filename]):
raise Exception('py3 conversion failed')
f2 = open(filename)
try:
return f2.read()
finally:
f2.close()
os.remove(filename)
class py3_raises_typeerror(object):
def __enter__(self):
pass
def __exit__(self, type, exc, tb):
if sys.version_info[0] >= 3:
assert type is TypeError
return True
|
mit
|
AdrienMQ/parameter-framework
|
tools/coverage/coverage.py
|
2
|
34920
|
#!/usr/bin/env python3
# Copyright (c) 2011-2014, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Generate a coverage report by parsing parameter framework log.
The coverage report contains the:
- domain
- configuration
- rule
- criterion
basic coverage statistics.
"""
import xml.dom.minidom
import sys
import re
import logging
FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(stream=sys.stderr, level=logging.WARNING, format=FORMAT)
logger = logging.getLogger("Coverage")
class CustomError(Exception):
pass
class ChildError(CustomError):
def __init__(self, parent, child):
self.parent = parent
self.child = child
class ChildNotFoundError(ChildError):
def __str__(self):
return "Unable to find the child %s in %s" % (self.child, self.parent)
class DuplicatedChildError(ChildError):
def __str__(self):
return "Add existing child %s in %s." % (self.child, self.parent)
class Element():
"""Root class for all coverage elements"""
tag = "element"
def __init__(self, name):
self.parent = None
self.children = []
self.nbUse = 0
self.name = name
self.debug("New element")
def __str__(self):
return "%s (%s)" % (self.name, self.tag)
def __eq__(self, compared):
return (self.name == compared.name) and (self.children == compared.children)
def getName(self, default=""):
return self.name or default
def hasChildren(self):
return bool(self.children)
def getChildren(self):
return self.children
def _getDescendants(self):
for child in self.children:
yield child
for descendant in child._getDescendants() :
yield descendant
def getChildFromName(self, childName):
for child in self.children :
if child.getName() == childName :
return child
self.debug("Child %s not found" % childName, logging.ERROR)
self.debug("Child list :")
for child in self.children :
self.debug(" - %s" % child)
raise ChildNotFoundError(self, childName)
def addChild(self, child):
self.debug("new child: " + child.name)
self.children.append(child)
child._adoptedBy(self)
def _adoptedBy(self, parent):
assert(not self.parent)
self.parent = parent
def _getElementNames(self, elementList):
return (substate.name for substate in elementList)
def _description(self, withCoverage, withNbUse):
description = self.name
if withNbUse or withCoverage :
description += " has been used " + str(self.nbUse) + " time"
if withCoverage :
description += self._coverageFormating(self._getCoverage())
return description
def _getCoverage(self):
"""Return the coverage of the element between 0 and 1
If the element has no coverage dependency (usually child) return 0 or 1.
otherwise the element coverage is the dependency coverage average"""
coverageDependanceElements = list(self._getCoverageDependanceElements())
nbcoverageDependence = len(coverageDependanceElements)
if nbcoverageDependence == 0:
if self.nbUse == 0:
return 0
else:
return 1
coverageDependenceValues = (depElement._getCoverage()
for depElement in coverageDependanceElements)
return sum(coverageDependenceValues) / nbcoverageDependence
def _getCoverageDependanceElements(self):
return self.children
def _coverageFormating(self, coverage):
# If no coverage provided
if not coverage :
return ""
# Calculate coverage
return " (%s coverage)" % self._number2percent(coverage)
@staticmethod
def _number2percent(number):
"""Format a number to a integer % string
example: _number2percent(0.6666) -> "67%"
"""
return "{0:.0f}%".format(100 * number)
def _dumpDescription(self, withCoverage, withNbUse):
self.debug("yelding description")
yield RankedLine(self._description(withCoverage, withNbUse), lineSuffix="")
for dumped in self._dumpPropagate(withCoverage, withNbUse) :
yield dumped
def _dumpPropagate(self, withCoverage, withNbUse):
for child in self.children :
for dumpedDescription in child._dumpDescription(withCoverage, withNbUse) :
yield dumpedDescription.increasedRank()
def dump(self, withCoverage=False, withNbUse=True):
return "\n".join(
str(dumpedDescription) for dumpedDescription in
self._dumpDescription(withCoverage, withNbUse))
def exportToXML(self, document, domElement=None):
if domElement == None:
domElement = document.createElement(self.tag)
self._XMLaddAttributes(domElement)
for child in self.children :
domElement.appendChild(child.exportToXML(document))
return domElement
def _XMLaddAttributes(self, domElement):
attributes = self._getXMLAttributes()
coverage = self._getCoverage()
if coverage != None :
attributes["Coverage"] = self._number2percent(coverage)
for key, value in attributes.items():
domElement.setAttribute(key, value)
def _getXMLAttributes(self):
return {
"Name": self.name,
"NbUse": str(self.nbUse)
}
def _incNbUse(self):
self.nbUse += 1
def childUsed(self, child):
self._incNbUse()
# Propagate to parent
self._tellParentThatChildUsed()
def _tellParentThatChildUsed(self):
if self.parent :
self.parent.childUsed(self)
def parentUsed(self):
self._incNbUse()
# Propagate to children
for child in self.children :
child.parentUsed()
def hasBeenUsed(self):
return self.nbUse > 0
def operationOnChild(self, path, operation):
if path:
return self._operationPropagate(path, operation)
else :
self.debug("operating on self")
return operation(self)
def _operationPropagate(self, path, operation):
childName = path.pop(0)
child = self.getChildFromName(childName)
return child.operationOnChild(path, operation)
def debug(self, stringOrFunction, level=logging.DEBUG):
"""Print a debug line on stderr in tree form
If the debug line is expensive to generate, provide callable
object, it will be called if log is enable for this level.
This callable object should return the logline string.
"""
if logger.isEnabledFor(level):
# TODO: use buildin callable if python >= 3.2
if hasattr(stringOrFunction, "__call__"):
string = stringOrFunction()
else:
string = stringOrFunction
rankedLine = DebugRankedLine("%s: %s" % (self, string))
self._logDebug(rankedLine, level)
def _logDebug(self, rankedLine, level):
if self.parent:
self.parent._logDebug(rankedLine.increasedRank(), level)
else :
logger.log(level, str(rankedLine))
class FromDomElement(Element):
def __init__(self, DomElement):
self._initFromDom(DomElement)
super().__init__(self.name)
def _initFromDom(self, DomElement):
self.name = DomElement.getAttribute("Name")
class DomElementLocation():
def __init__(self, classConstructor, path=None):
self.classConstructor = classConstructor
if path :
self.path = path
else :
self.path = []
self.path.append(classConstructor.tag)
class DomPopulatedElement(Element):
"""Default child populate
Look for each dom element with tag specified in self.tag
and instantiate it with the dom element
"""
childClasses = []
def populate(self, dom):
for childDomElementLocation in self.childClasses :
self.debug("Looking for child %s in path %s" % (
childDomElementLocation.path[-1], childDomElementLocation.path))
for childDomElement in self._findChildFromTagPath(dom, childDomElementLocation.path) :
childElement = childDomElementLocation.classConstructor(childDomElement)
self.addChild(childElement)
childElement.populate(childDomElement)
def _findChildFromTagPath(self, dom, path):
if not path :
yield dom
else :
# Copy list
path = list(path)
tag = path.pop(0)
# Find element with tag
self.debug("Going to find elements with tag %s in %s" % (tag, dom))
self.debug(lambda: "Nb of solutions: %s" % len(dom.getElementsByTagName(tag)))
for elementByTag in dom.getElementsByTagName(tag) :
self.debug("Found element: %s" % elementByTag)
# If the same tag is found
if elementByTag in dom.childNodes :
# Yield next level
for element in self._findChildFromTagPath(elementByTag, path) :
yield element
class Rule(Element):
def usedIfApplicable(self, criteria):
childApplicability = (child.usedIfApplicable(criteria)
for child in self.children)
isApplicable = self._isApplicable(criteria, childApplicability)
if isApplicable :
self._incNbUse()
self.debug("Rule applicability: %s" % isApplicable)
assert(isApplicable == True or isApplicable == False)
return isApplicable
def _isApplicable(self, criteria, childApplicability):
"""Return the rule applicability depending on children applicability.
If at least one child is applicable, return true"""
# Lazy evaluation as in the PFW
return all(childApplicability)
class CriterionRule(FromDomElement, DomPopulatedElement, Rule):
tag = "SelectionCriterionRule"
childClasses = []
isApplicableOperations = {
"Includes" : lambda criterion, value: criterion.stateIncludes(value),
"Excludes" : lambda criterion, value: not criterion.stateIncludes(value),
"Is" : lambda criterion, value: criterion.stateIs(value),
"IsNot" : lambda criterion, value: not criterion.stateIs(value)
}
def _initFromDom(self, DomElement):
self.selectionCriterion = DomElement.getAttribute("SelectionCriterion")
self.matchesWhen = DomElement.getAttribute("MatchesWhen")
self.value = DomElement.getAttribute("Value")
self.name = "%s %s %s" % (self.selectionCriterion, self.matchesWhen, self.value)
applicableOperationWithoutValue = self.isApplicableOperations[self.matchesWhen]
self.isApplicableOperation = lambda criterion: applicableOperationWithoutValue(criterion, self.value)
def _isApplicable(self, criteria, childApplicability):
return criteria.operationOnChild([self.selectionCriterion],
self.isApplicableOperation)
class CompoundRule(FromDomElement, DomPopulatedElement, Rule):
"""CompoundRule can be of type ALL or ANY"""
tag = "CompoundRule"
# Declare childClasses but define it at first class instantiation
childClasses = None
def __init__(self, dom):
# Define childClasses at first class instantiation
if self.childClasses == None :
self.childClasses = [DomElementLocation(CriterionRule),
DomElementLocation(CompoundRule)]
super().__init__(dom)
def _initFromDom(self, DomElement):
type = DomElement.getAttribute("Type")
self.ofTypeAll = {"All" : True, "Any" : False}[type]
self.name = type
def _isApplicable(self, criteria, childApplicability):
if self.ofTypeAll :
applicability = super()._isApplicable(criteria, childApplicability)
else:
# Lazy evaluation as in the PFW
applicability = any(childApplicability)
return applicability
class RootRule(DomPopulatedElement, Rule):
tag = "RootRule"
childClasses = [DomElementLocation(CompoundRule)]
def populate(self, dom):
super().populate(dom)
self.debug("Children: %s" % self.children)
# A configuration can only have one or no rule
assert(len(self.children) <= 1)
def _getCoverageDependanceElements(self):
return self._getDescendants()
class CriteronStates(Element):
"""Root of configuration application criterion state"""
tag = "CriterionStates"
def parentUsed(self, criteria):
"""Add criteria to child if not exist, if exist increase it's nbUse"""
self._incNbUse()
matches = [child for child in self.children if child == criteria]
assert(len(matches) <= 1)
if matches :
self.debug("Criteria state has already been encounter")
currentcriteria = matches[0]
else :
self.debug("Criteria state has never been encounter, saving it")
currentcriteria = criteria
self.addChild(criteria)
currentcriteria.parentUsed()
class Configuration(FromDomElement, DomPopulatedElement):
tag = "Configuration"
childClasses = []
class IneligibleConfigurationAppliedError(CustomError):
def __init__(self, configuration, criteria):
self.configuration = configuration
self.criteria = criteria
def __str__(self):
return ("Applying ineligible %s, "
"rule:\n%s\n"
"Criteria current state:\n%s" % (
self.configuration,
self.configuration.rootRule.dump(withCoverage=False, withNbUse=False),
self.criteria.dump(withCoverage=False, withNbUse=False)
))
def __init__(self, DomElement):
super().__init__(DomElement)
self.rootRule = RootRule("RootRule")
self.addChild(self.rootRule)
self.criteronStates = CriteronStates("CriterionStates")
self.addChild(self.criteronStates)
def populate(self, dom):
# Delegate to rootRule
self.rootRule.populate(dom)
def _getCoverage(self):
# Delegate to rootRule
return self.rootRule._getCoverage()
def used(self, criteria):
self._incNbUse()
# Propagate use to parents
self._tellParentThatChildUsed()
# Propagate to criterion coverage
self.criteronStates.parentUsed(criteria.export())
# Propagate to rules
if not self.rootRule.usedIfApplicable(criteria) :
self.debug("Applied but rule does not match current "
"criteria (parent: %s) " % self.parent.name,
logging.ERROR)
raise self.IneligibleConfigurationAppliedError(self, criteria.export())
def _dumpPropagate(self, withCoverage, withNbUse):
self.debug("Going to ask %s for description" % self.rootRule)
for dumpedDescription in self.rootRule._dumpDescription(
withCoverage=withCoverage,
withNbUse=withNbUse) :
yield dumpedDescription.increasedRank()
self.debug("Going to ask %s for description" % self.criteronStates)
for dumpedDescription in self.criteronStates._dumpDescription(
withCoverage=False,
withNbUse=withNbUse) :
yield dumpedDescription.increasedRank()
class Domain(FromDomElement, DomPopulatedElement):
tag = "ConfigurableDomain"
childClasses = [DomElementLocation(Configuration, ["Configurations"])]
class Domains(DomPopulatedElement):
tag = "Domains"
childClasses = [DomElementLocation(Domain, ["ConfigurableDomains"])]
class RankedLine():
def __init__(self, string,
stringPrefix="|-- ",
rankString="| ",
linePrefix="",
lineSuffix="\n"):
self.string = string
self.rank = 0
self.stringPrefix = stringPrefix
self.rankString = rankString
self.linePrefix = linePrefix
self.lineSuffix = lineSuffix
def increasedRank(self):
self.rank += 1
return self
def __str__(self):
return self.linePrefix + \
self.rank * self.rankString + \
self.stringPrefix + \
self.string + \
self.lineSuffix
class DebugRankedLine(RankedLine):
def __init__(self, string, lineSuffix=""):
super().__init__(string,
stringPrefix="",
rankString=" ",
linePrefix="",
lineSuffix=lineSuffix)
class CriterionState(Element):
tag = "CriterionState"
def used(self):
self._incNbUse()
class Criterion(Element):
tag = "Criterion"
inclusivenessTranslate = {True: "Inclusive", False: "Exclusive"}
class ChangeRequestToNonAccessibleState(CustomError):
def __init__(self, requestedState, detail):
self.requestedState = requestedState
self.detail = detail
def __str__(self):
return ("Change request to non accessible state %s. Detail: %s" %
(self.requestedState, self.detail))
def __init__(self, name, isInclusif,
stateNamesList, currentStateNamesList,
ignoreIntegrity=False):
super().__init__(name)
self.isInclusif = isInclusif
for state in stateNamesList :
self.addChild(CriterionState(state))
self.currentState = []
self.initStateNamesList = list(currentStateNamesList)
self.changeState(self.initStateNamesList, ignoreIntegrity)
def reset(self):
# Set current state as provided at initialisation
self.changeState(self.initStateNamesList, ignoreIntegrity=True)
def changeState(self, subStateNames, ignoreIntegrity=False):
self.debug("Changing state from: %s to: %s" % (
list(self._getElementNames(self.currentState)),
subStateNames))
if not ignoreIntegrity and not self.isIntegre(subStateNames):
raise self.ChangeRequestToNonAccessibleState(subStateNames,
"An exclusive criterion must have a non empty state")
newCurrentState = []
for subStateName in subStateNames :
subState = self.getChildFromName(subStateName)
subState.used()
newCurrentState.append(subState)
self.currentState = newCurrentState
self._incNbUse()
self._tellParentThatChildUsed()
def isIntegre(self, subStateNames):
return self.isInclusif or len(subStateNames) == 1
def childUsed(self, child):
self.currentState = child
super().childUsed(child)
def export(self):
subStateNames = self._getElementNames(self.currentState)
return Criterion(self.name, self.isInclusif, subStateNames, subStateNames,
ignoreIntegrity=True)
def stateIncludes(self, subStateName):
subStateCurrentNames = list(self._getElementNames(self.currentState))
self.debug("Testing if %s is included in %s" % (subStateName, subStateCurrentNames))
isIncluded = subStateName in subStateCurrentNames
self.debug("IsIncluded: %s" % isIncluded)
return isIncluded
def stateIs(self, subStateNames):
if len(self.currentState) != 1 :
return False
else :
return self.stateIncludes(subStateNames)
def _getXMLAttributes(self):
attributes = super()._getXMLAttributes()
attributes["Type"] = self.inclusivenessTranslate[self.isInclusif]
return attributes
class Criteria(Element):
tag = "Criteria"
class DuplicatedCriterionError(DuplicatedChildError):
pass
def export(self):
self.debug("Exporting criteria")
assert(self.children)
exported = Criteria(self.name)
for child in self.children :
exported.addChild(child.export())
return exported
def addChild(self, child):
if child in self.children:
raise self.DuplicatedCriterionError(self, child)
super().addChild(child)
class ConfigAppliedWithoutCriteriaError(CustomError):
def __init__(self, configurationName, domainName):
self.configurationName = configurationName
self.domainName = domainName
def __str__(self):
return ('Applying configuration "%s" from domain "%s" before declaring criteria' %
(self.configurationName, self.domainName))
class ParsePFWlog():
MATCH = "match"
ACTION = "action"
def __init__(self, domains, criteria, ErrorsToIgnore=()):
self.domains = domains;
self.criteria = criteria;
self.ErrorsToIgnore = ErrorsToIgnore
configApplicationRegext = r""".*Applying configuration "(.*)" from domain "([^"]*)"""
matchConfigApplicationLine = re.compile(configApplicationRegext).match
criterionCreationRegext = ", ".join([
r""".*Criterion name: (.*)""",
r"""type kind: (.*)""",
r"""current state: (.*)""",
r"""states: {(.*)}"""
])
matchCriterionCreationLine = re.compile(criterionCreationRegext).match
changingCriterionRegext = r""".*Selection criterion changed event: Criterion name: (.*), current state: ([^\n\r]*)"""
matchChangingCriterionLine = re.compile(changingCriterionRegext).match
self.lineLogTypes = [
{
self.MATCH: matchConfigApplicationLine,
self.ACTION: self._configApplication
}, {
self.MATCH: matchCriterionCreationLine,
self.ACTION: self._criterionCreation
}, {
self.MATCH: matchChangingCriterionLine,
self.ACTION: self._changingCriterion
}
]
@staticmethod
def _formatCriterionList(liststring, separator):
list = liststring.split(separator)
if len(list) == 1 and list[0] == "<none>":
list = []
return list
def _criterionCreation(self, matchCriterionCreation):
# Unpack
criterionName, criterionType, currentCriterionStates, criterionStates = matchCriterionCreation.group(1, 2, 3, 4)
criterionStateList = self._formatCriterionList(criterionStates, ", ")
criterionIsInclusif = {"exclusive" : False, "inclusive" : True}[criterionType]
currentcriterionStateList = self._formatCriterionList(currentCriterionStates, "|")
logger.info("Creating criterion: " + criterionName +
" (" + criterionType + ") " +
" with current state: " + str(currentcriterionStateList) +
", possible states:" + str(criterionStateList))
try:
self.criteria.addChild(Criterion(
criterionName,
criterionIsInclusif,
criterionStateList,
currentcriterionStateList
))
except self.criteria.DuplicatedCriterionError as ex:
logger.debug(ex)
logger.warning("Reseting criterion %s. Did you reset the PFW ?" % criterionName)
self.criteria.operationOnChild(
[criterionName],
lambda criterion: criterion.reset()
)
def _changingCriterion(self, matchChangingCriterion):
# Unpack
criterionName, newCriterionSubStateNames = matchChangingCriterion.group(1, 2)
newCriterionState = self._formatCriterionList(newCriterionSubStateNames, "|")
logger.info("Changing criterion %s to %s" % (criterionName , newCriterionState))
path = [criterionName]
changeCriterionOperation = lambda criterion : criterion.changeState(newCriterionState)
self.criteria.operationOnChild(path, changeCriterionOperation)
def _configApplication(self, matchConfig):
# Unpack
configurationName, domainName = matchConfig.group(1, 2)
# Check that at least one criterion exist
if not self.criteria.hasChildren() :
logger.error("Applying configuration before declaring criteria")
logger.info("Is the log starting at PFW boot ?")
raise ConfigAppliedWithoutCriteriaError(configurationName, domainName)
# Change criterion state
path = [domainName, configurationName]
usedOperation = lambda element : element.used(self.criteria)
logger.info("Applying configuration %s from domain %s" % (
configurationName, domainName))
self.domains.operationOnChild(path, usedOperation)
def _digest(self, lineLogType, lineLog):
match = lineLogType[self.MATCH](lineLog)
if match :
lineLogType[self.ACTION](match)
return True
return False
def parsePFWlog(self, lines):
for lineNb, lineLog in enumerate(lines):
logger.debug("Parsing line :%s" % lineLog.rstrip())
digested = (self._digest(lineLogType, lineLog)
for lineLogType in self.lineLogTypes)
try:
success = any(digested)
# Catch some exception in order to print the current parsing line,
# then raise the exception again if not continue of error
except CustomError as ex:
logger.error('Error raised while parsing line %s: "%s"' %
(lineNb, repr(lineLog)))
# If exception is a subclass of ErrorsToIgnore, log it and continue
# otherwise raise it again.
if not issubclass(type(ex), self.ErrorsToIgnore):
raise ex
else:
logger.error('Ignoring exception:"%s", '
'can not guarantee database integrity' % ex)
else:
if not success:
logger.debug("Line does not match, dropped")
class Root(Element):
tag = "CoverageReport"
def __init__(self, name, dom):
super().__init__(name)
# Create domain tree
self.domains = Domains("Domains")
self.domains.populate(dom)
self.addChild(self.domains)
# Create criterion list
self.criteria = Criteria("CriterionRoot")
self.addChild(self.criteria)
def exportToXML(self):
"""Export tree to an xml document"""
impl = xml.dom.minidom.getDOMImplementation()
document = impl.createDocument(namespaceURI=None, qualifiedName=self.tag, doctype=None)
super().exportToXML(document, document.documentElement)
return document
# ============================
# Command line argument parser
# ============================
class ArgumentParser:
"""class that parse command line arguments with argparse library
Result of parsing are the class attributes.
"""
levelTranslate = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
def __init__(self):
try:
# As argparse is only in the stdlib since python 3.2,
# testing its availability
import argparse
except ImportError:
logger.warning("Unable to import argparse "
"(parser for command-line options and arguments), "
"using default argument values:")
logger.warning(" - InputFile: stdin")
self.inputFile = sys.stdin
logger.warning(" - OutputFile: stdout")
self.outputFile = sys.stdout
try:
self.domainsFile = sys.argv[1]
except IndexError as ex:
logger.fatal("No domain file provided (first argument)")
raise ex
else:
logger.warning(" - Domain file: " + self.domainsFile)
logger.warning(" - Output format: xml")
self.XMLreport = True
logger.warning(" - Debug level: error")
self.debugLevel = logging.ERROR
else :
myArgParser = argparse.ArgumentParser(description='Generate PFW report')
myArgParser.add_argument(
'domainsFile',
type=argparse.FileType('r'),
help="the PFW domain XML file"
)
myArgParser.add_argument(
'pfwlog', nargs='?',
type=argparse.FileType('r'), default=sys.stdin,
help="the PFW log file, default stdin"
)
myArgParser.add_argument(
'-o', '--output',
dest="outputFile",
type=argparse.FileType('w'), default=sys.stdout,
help="the coverage report output file, default stdout"
)
myArgParser.add_argument(
'-v', '--verbose',
dest="debugLevel", default=0,
action='count',
help="print debug warnings from warning (default) to debug (-vv)"
)
outputFormatGroupe = myArgParser.add_mutually_exclusive_group(required=False)
outputFormatGroupe.add_argument(
'--xml',
dest="xmlFlag",
action='store_true',
help=" XML coverage output report"
)
outputFormatGroupe.add_argument(
'--raw',
dest="rawFlag",
action='store_true',
help="raw coverage output report"
)
myArgParser.add_argument(
'--ignore-incoherent-criterion-state',
dest="incoherentCriterionFlag",
action='store_true',
help="ignore criterion transition to incoherent state"
)
myArgParser.add_argument(
'--ignore-ineligible-configuration-application',
dest="ineligibleConfigurationApplicationFlag",
action='store_true',
help="ignore application of configuration with a false rule "
"(not applicable configuration)"
)
# Process command line arguments
options = myArgParser.parse_args()
# Mapping to attributes
self.inputFile = options.pfwlog
self.outputFile = options.outputFile
self.domainsFile = options.domainsFile
# Output report in xml if flag not set
self.XMLreport = not options.rawFlag
# Setting logger level
levelCapped = min(options.debugLevel, len(self.levelTranslate) - 1)
self.debugLevel = self.levelTranslate[levelCapped]
# Setting ignore options
errorToIgnore = []
if options.ineligibleConfigurationApplicationFlag :
errorToIgnore.append(Configuration.IneligibleConfigurationAppliedError)
if options.incoherentCriterionFlag:
errorToIgnore.append(Criterion.ChangeRequestToNonAccessibleState)
self.errorToIgnore = tuple(errorToIgnore)
def main():
errorDuringLogParsing = -1
errorDuringArgumentParsing = 1
try:
commandLineArguments = ArgumentParser()
except LookupError as ex:
logger.error("Error during argument parsing")
logger.debug(str(ex))
sys.exit(errorDuringArgumentParsing)
# Setting logger level
logger.setLevel(commandLineArguments.debugLevel)
logger.info("Log level set to: %s" %
logging.getLevelName(commandLineArguments.debugLevel))
# Create tree from XML
dom = xml.dom.minidom.parse(commandLineArguments.domainsFile)
# Create element tree
root = Root("DomainCoverage", dom)
# Parse PFW events
parser = ParsePFWlog(root.domains, root.criteria, commandLineArguments.errorToIgnore)
try:
parser.parsePFWlog(commandLineArguments.inputFile.readlines())
except CustomError as ex:
logger.fatal("Error during parsing log file %s: %s" %
(commandLineArguments.inputFile, ex))
sys.exit(errorDuringLogParsing)
# Output report
outputFile = commandLineArguments.outputFile
if not commandLineArguments.XMLreport :
outputFile.write("%s\n" % root.dump(withCoverage=True, withNbUse=True))
else :
outputFile.write(root.exportToXML().toprettyxml())
if __name__ == "__main__" :
""" Execute main if the python interpreter is running this module as the main program """
main()
|
bsd-3-clause
|
Softmotions/edx-platform
|
lms/djangoapps/instructor_task/api_helper.py
|
102
|
15417
|
"""
Helper lib for instructor_tasks API.
Includes methods to check args for rescoring task, encoding student input,
and task submission logic, including handling the Celery backend.
"""
import hashlib
import json
import logging
from django.utils.translation import ugettext as _
from celery.result import AsyncResult
from celery.states import READY_STATES, SUCCESS, FAILURE, REVOKED
from courseware.module_render import get_xqueue_callback_url_prefix
from courseware.courses import get_problems_in_section
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import UsageKey
from instructor_task.models import InstructorTask, PROGRESS
log = logging.getLogger(__name__)
class AlreadyRunningError(Exception):
"""Exception indicating that a background task is already running"""
pass
def _task_is_running(course_id, task_type, task_key):
"""Checks if a particular task is already running"""
running_tasks = InstructorTask.objects.filter(
course_id=course_id, task_type=task_type, task_key=task_key
)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES:
running_tasks = running_tasks.exclude(task_state=state)
return len(running_tasks) > 0
def _reserve_task(course_id, task_type, task_key, task_input, requester):
"""
Creates a database entry to indicate that a task is in progress.
Throws AlreadyRunningError if the task is already in progress.
Includes the creation of an arbitrary value for task_id, to be
submitted with the task call to celery.
The InstructorTask.create method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
Note that there is a chance of a race condition here, when two users
try to run the same task at almost exactly the same time. One user
could be after the check and before the create when the second user
gets to the check. At that point, both users are able to run their
tasks simultaneously. This is deemed a small enough risk to not
put in further safeguards.
"""
if _task_is_running(course_id, task_type, task_key):
log.warning("Duplicate task found for task_type %s and task_key %s", task_type, task_key)
raise AlreadyRunningError("requested task is already running")
try:
most_recent_id = InstructorTask.objects.latest('id').id
except InstructorTask.DoesNotExist:
most_recent_id = "None found"
finally:
log.warning(
"No duplicate tasks found: task_type %s, task_key %s, and most recent task_id = %s",
task_type,
task_key,
most_recent_id
)
# Create log entry now, so that future requests will know it's running.
return InstructorTask.create(course_id, task_type, task_key, task_input, requester)
def _get_xmodule_instance_args(request, task_id):
"""
Calculate parameters needed for instantiating xmodule instances.
The `request_info` will be passed to a tracking log function, to provide information
about the source of the task request. The `xqueue_callback_url_prefix` is used to
permit old-style xqueue callbacks directly to the appropriate module in the LMS.
The `task_id` is also passed to the tracking log function.
"""
request_info = {'username': request.user.username,
'ip': request.META['REMOTE_ADDR'],
'agent': request.META.get('HTTP_USER_AGENT', ''),
'host': request.META['SERVER_NAME'],
}
xmodule_instance_args = {'xqueue_callback_url_prefix': get_xqueue_callback_url_prefix(request),
'request_info': request_info,
'task_id': task_id,
}
return xmodule_instance_args
def _update_instructor_task(instructor_task, task_result):
"""
Updates and possibly saves a InstructorTask entry based on a task Result.
Used when updated status is requested.
The `instructor_task` that is passed in is updated in-place, but
is usually not saved. In general, tasks that have finished (either with
success or failure) should have their entries updated by the task itself,
so are not updated here. Tasks that are still running are not updated
and saved while they run. The one exception to the no-save rule are tasks that
are in a "revoked" state. This may mean that the task never had the
opportunity to update the InstructorTask entry.
Tasks that are in progress and have subtasks doing the processing do not look
to the task's AsyncResult object. When subtasks are running, the
InstructorTask object itself is updated with the subtasks' progress,
not any AsyncResult object. In this case, the InstructorTask is
not updated at all.
Calculates json to store in "task_output" field of the `instructor_task`,
as well as updating the task_state.
For a successful task, the json contains the output of the task result.
For a failed task, the json contains "exception", "message", and "traceback"
keys. A revoked task just has a "message" stating it was revoked.
"""
# Pull values out of the result object as close to each other as possible.
# If we wait and check the values later, the values for the state and result
# are more likely to have changed. Pull the state out first, and
# then code assuming that the result may not exactly match the state.
task_id = task_result.task_id
result_state = task_result.state
returned_result = task_result.result
result_traceback = task_result.traceback
# Assume we don't always save the InstructorTask entry if we don't have to,
# but that in most cases we will update the InstructorTask in-place with its
# current progress.
entry_needs_updating = True
entry_needs_saving = False
task_output = None
if instructor_task.task_state == PROGRESS and len(instructor_task.subtasks) > 0:
# This happens when running subtasks: the result object is marked with SUCCESS,
# meaning that the subtasks have successfully been defined. However, the InstructorTask
# will be marked as in PROGRESS, until the last subtask completes and marks it as SUCCESS.
# We want to ignore the parent SUCCESS if subtasks are still running, and just trust the
# contents of the InstructorTask.
entry_needs_updating = False
elif result_state in [PROGRESS, SUCCESS]:
# construct a status message directly from the task result's result:
# it needs to go back with the entry passed in.
log.info("background task (%s), state %s: result: %s", task_id, result_state, returned_result)
task_output = InstructorTask.create_output_for_success(returned_result)
elif result_state == FAILURE:
# on failure, the result's result contains the exception that caused the failure
exception = returned_result
traceback = result_traceback if result_traceback is not None else ''
log.warning("background task (%s) failed: %s %s", task_id, returned_result, traceback)
task_output = InstructorTask.create_output_for_failure(exception, result_traceback)
elif result_state == REVOKED:
# on revocation, the result's result doesn't contain anything
# but we cannot rely on the worker thread to set this status,
# so we set it here.
entry_needs_saving = True
log.warning("background task (%s) revoked.", task_id)
task_output = InstructorTask.create_output_for_revoked()
# save progress and state into the entry, even if it's not being saved:
# when celery is run in "ALWAYS_EAGER" mode, progress needs to go back
# with the entry passed in.
if entry_needs_updating:
instructor_task.task_state = result_state
if task_output is not None:
instructor_task.task_output = task_output
if entry_needs_saving:
instructor_task.save()
def get_updated_instructor_task(task_id):
"""
Returns InstructorTask object corresponding to a given `task_id`.
If the InstructorTask thinks the task is still running, then
the task's result is checked to return an updated state and output.
"""
# First check if the task_id is known
try:
instructor_task = InstructorTask.objects.get(task_id=task_id)
except InstructorTask.DoesNotExist:
log.warning("query for InstructorTask status failed: task_id=(%s) not found", task_id)
return None
# if the task is not already known to be done, then we need to query
# the underlying task's result object:
if instructor_task.task_state not in READY_STATES:
result = AsyncResult(task_id)
_update_instructor_task(instructor_task, result)
return instructor_task
def get_status_from_instructor_task(instructor_task):
"""
Get the status for a given InstructorTask entry.
Returns a dict, with the following keys:
'task_id': id assigned by LMS and used by celery.
'task_state': state of task as stored in celery's result store.
'in_progress': boolean indicating if task is still running.
'task_progress': dict containing progress information. This includes:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
'duration_ms': how long the task has (or had) been running.
'exception': name of exception class raised in failed tasks.
'message': returned for failed and revoked tasks.
'traceback': optional, returned if task failed and produced a traceback.
"""
status = {}
if instructor_task is not None:
# status basic information matching what's stored in InstructorTask:
status['task_id'] = instructor_task.task_id
status['task_state'] = instructor_task.task_state
status['in_progress'] = instructor_task.task_state not in READY_STATES
if instructor_task.task_output is not None:
status['task_progress'] = json.loads(instructor_task.task_output)
return status
def check_arguments_for_rescoring(usage_key):
"""
Do simple checks on the descriptor to confirm that it supports rescoring.
Confirms first that the usage_key is defined (since that's currently typed
in). An ItemNotFoundException is raised if the corresponding module
descriptor doesn't exist. NotImplementedError is raised if the
corresponding module doesn't support rescoring calls.
"""
descriptor = modulestore().get_item(usage_key)
if not hasattr(descriptor, 'module_class') or not hasattr(descriptor.module_class, 'rescore_problem'):
msg = "Specified module does not support rescoring."
raise NotImplementedError(msg)
def check_entrance_exam_problems_for_rescoring(exam_key): # pylint: disable=invalid-name
"""
Grabs all problem descriptors in exam and checks each descriptor to
confirm that it supports re-scoring.
An ItemNotFoundException is raised if the corresponding module
descriptor doesn't exist for exam_key. NotImplementedError is raised if
any of the problem in entrance exam doesn't support re-scoring calls.
"""
problems = get_problems_in_section(exam_key).values()
if any(not hasattr(problem, 'module_class') or not hasattr(problem.module_class, 'rescore_problem')
for problem in problems):
msg = _("Not all problems in entrance exam support re-scoring.")
raise NotImplementedError(msg)
def encode_problem_and_student_input(usage_key, student=None): # pylint: disable=invalid-name
"""
Encode optional usage_key and optional student into task_key and task_input values.
Args:
usage_key (Location): The usage_key identifying the problem.
student (User): the student affected
"""
assert isinstance(usage_key, UsageKey)
if student is not None:
task_input = {'problem_url': usage_key.to_deprecated_string(), 'student': student.username}
task_key_stub = "{student}_{problem}".format(student=student.id, problem=usage_key.to_deprecated_string())
else:
task_input = {'problem_url': usage_key.to_deprecated_string()}
task_key_stub = "_{problem}".format(problem=usage_key.to_deprecated_string())
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub).hexdigest()
return task_input, task_key
def encode_entrance_exam_and_student_input(usage_key, student=None): # pylint: disable=invalid-name
"""
Encode usage_key and optional student into task_key and task_input values.
Args:
usage_key (Location): The usage_key identifying the entrance exam.
student (User): the student affected
"""
assert isinstance(usage_key, UsageKey)
if student is not None:
task_input = {'entrance_exam_url': unicode(usage_key), 'student': student.username}
task_key_stub = "{student}_{entranceexam}".format(student=student.id, entranceexam=unicode(usage_key))
else:
task_input = {'entrance_exam_url': unicode(usage_key)}
task_key_stub = "_{entranceexam}".format(entranceexam=unicode(usage_key))
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub).hexdigest()
return task_input, task_key
def submit_task(request, task_type, task_class, course_key, task_input, task_key):
"""
Helper method to submit a task.
Reserves the requested task, based on the `course_key`, `task_type`, and `task_key`,
checking to see if the task is already running. The `task_input` is also passed so that
it can be stored in the resulting InstructorTask entry. Arguments are extracted from
the `request` provided by the originating server request. Then the task is submitted to run
asynchronously, using the specified `task_class` and using the task_id constructed for it.
`AlreadyRunningError` is raised if the task is already running.
The _reserve_task method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check to see if task is already running, and reserve it otherwise:
instructor_task = _reserve_task(course_key, task_type, task_key, task_input, request.user)
# submit task:
task_id = instructor_task.task_id
task_args = [instructor_task.id, _get_xmodule_instance_args(request, task_id)] # pylint: disable=no-member
task_class.apply_async(task_args, task_id=task_id)
return instructor_task
|
agpl-3.0
|
AnotherBobSmith/CLUZ
|
cluz_dialog3.py
|
1
|
15904
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
A QGIS plugin
CLUZ for QGIS
-------------------
begin : 2016-23-02
copyright : (C) 2016 by Bob Smith, DICE
email : r.j.smith@kent.ac.uk
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
import qgis
import os
import csv
import cluz_setup
import cluz_functions1
import cluz_functions3
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/forms")
from cluz_form_target import Ui_targetDialog
from cluz_form_abund_select import Ui_abundSelectDialog
from cluz_form_abund import Ui_abundDialog
from cluz_form_change import Ui_ChangeStatusDialog
from cluz_form_identify import Ui_identifyDialog
from cluz_form_met import Ui_metDialog
class targetDialog(QDialog, Ui_targetDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
self.clip = QApplication.clipboard()
targetDict = cluz_setup.makeTargetDict(setupObject)
if targetDict != "blank":
setupObject.targetDict = targetDict
self.loadTargetDictData(setupObject)
def loadTargetDictData(self, setupObject):
decPrec = setupObject.decimalPlaces
targetCSVFilePath = setupObject.targetPath
decPrecHeaderNameList = ["target", "spf", "conserved", "total"] # List of columns that will be changed to decimal precision
pcValueUpdate = False
with open(targetCSVFilePath, 'rb') as f:
targetReader = csv.reader(f)
targetHeaderList = targetReader.next()
lowerHeaderList = []
for aHeader in targetHeaderList:
lowerHeaderList.append(aHeader.lower())
self.targetTableWidget.clear()
self.targetTableWidget.setColumnCount(len(targetHeaderList))
insertRowNumber = 0
for aRow in targetReader:
pcValue = aRow[lowerHeaderList.index("pc_target")]
targetValue = float(aRow[lowerHeaderList.index("target")])
consValue = float(aRow[lowerHeaderList.index("conserved")])
if targetValue <= 0:
limboPCValue = "-1"
else:
limboPCValue = consValue / targetValue
limboPCValue *= 100
limboPCValue = cluz_setup.returnRoundedValue(setupObject, limboPCValue)
if float(limboPCValue) != float(pcValue):
pcValueUpdate = True
aRow[lowerHeaderList.index("pc_target")] = limboPCValue
addTargetTableRow(self, aRow, targetHeaderList, decPrecHeaderNameList, insertRowNumber, decPrec)
insertRowNumber += 1
self.targetTableWidget.setHorizontalHeaderLabels(targetHeaderList)
for aColValue in range(len(targetHeaderList)):
self.targetTableWidget.resizeColumnToContents(aColValue)
if pcValueUpdate == True:
cluz_setup.updateTargetCSVFromTargetDict(setupObject, setupObject.targetDict)
# http://stackoverflow.com/questions/24971305/copy-pyqt-table-selection-including-column-and-row-headers
def keyPressEvent(self, e):
if (e.modifiers() & Qt.ControlModifier):
selected = self.targetTableWidget.selectedRanges()
if e.key() == Qt.Key_C: #copy
s = ""
for r in xrange(selected[0].topRow(), selected[0].bottomRow() + 1):
for c in xrange(selected[0].leftColumn(), selected[0].rightColumn()+1):
try:
s += str(self.targetTableWidget.item(r, c).text()) + "\t"
except AttributeError:
s += "\t"
s = s[:-1] + "\n" #eliminate last '\t'
self.clip.setText(s)
def addTargetTableRow(self, aRow, targetHeaderList, decPrecHeaderNameList, insertRowNumber, decPrec):
self.targetTableWidget.insertRow(insertRowNumber)
for aColValue in range(len(targetHeaderList)):
headerName = targetHeaderList[aColValue].lower()
tableValue = aRow[aColValue]
if headerName in decPrecHeaderNameList:
tableValue = round(float(tableValue), decPrec)
tableValue = format(tableValue, "." + str(decPrec) + "f")
targTableItem = QTableWidgetItem(str(tableValue))
if headerName == "target":
targetValue = tableValue
elif headerName == "conserved":
conservedValue = tableValue
if headerName == "pc_target" and str(tableValue) == "-1":
targTableItem.setTextColor(QColor.fromRgb(128, 128, 128))
elif headerName == "pc_target" and float(tableValue) >= 0:
if float(conservedValue) < float(targetValue):
targTableItem.setTextColor(QColor.fromRgb(255, 0, 0))
else:
targTableItem.setTextColor(QColor.fromRgb(0, 102, 51))
self.targetTableWidget.setItem(insertRowNumber, aColValue, targTableItem)
class abundSelectDialog(QDialog, Ui_abundSelectDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
featStringDict = self.loadAbundSelectFeatureList(setupObject)
QObject.connect(self.okButton, SIGNAL("clicked()"), lambda: self.displayAbundValues(setupObject, featStringDict))
def loadAbundSelectFeatureList(self, setupObject):
featIDList = setupObject.targetDict.keys()
featIDList.sort()
featStringList = []
featStringDict = {}
for aFeat in featIDList:
aString = str(aFeat) + " - " + setupObject.targetDict[aFeat][0]
featStringList.append(aString)
featStringDict[aString] = aFeat
self.featListWidget.addItems(featStringList)
return featStringDict
def displayAbundValues(self, setupObject, featStringDict):
selectedFeatIDList = [featStringDict[item.text()] for item in self.featListWidget.selectedItems()]
if len(selectedFeatIDList) == 0:
selectedFeatIDList = setupObject.targetDict.keys()
self.close()
self.abundDialog = abundDialog(self, setupObject, selectedFeatIDList)
# show the dialog
self.abundDialog.show()
# Run the dialog event loop
result = self.abundDialog.exec_()
class abundDialog(QDialog, Ui_abundDialog):
def __init__(self, iface, setupObject, selectedFeatIDList):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
self.clip = QApplication.clipboard()
self.loadAbundDictData(setupObject, selectedFeatIDList)
def loadAbundDictData(self, setupObject, selectedFeatIDList):
decPrec = setupObject.decimalPlaces
abundPUKeyDict = setupObject.abundPUKeyDict
featSet = set(selectedFeatIDList)
abundHeaderList = ["PU_ID"]
for aFeatID in featSet:
abundHeaderList.append("F_" + str(aFeatID))
self.abundTableWidget.clear()
self.abundTableWidget.setColumnCount(len(abundHeaderList))
insertRowNumber = 0
for puID in abundPUKeyDict:
self.abundTableWidget.insertRow(insertRowNumber)
zeroValue = round(0.0, decPrec)
zeroValue = format(zeroValue, "." + str(decPrec) + "f")
blankString = str(zeroValue)
puStringList = [blankString] * len(featSet)
puAbundDict = abundPUKeyDict[puID]
for featID in puAbundDict:
if featID in featSet:
featAmount = puAbundDict[featID]
featAmount = round(float(featAmount), decPrec)
featAmount = format(featAmount, "." + str(decPrec) + "f")
featIndex = list(featSet).index(featID)
puStringList[featIndex] = str(featAmount)
puStringList.insert(0, str(puID))
for aColValue in range(len(puStringList)):
featValue = puStringList[aColValue]
abundTableItem = QTableWidgetItem(str(featValue))
self.abundTableWidget.setItem(insertRowNumber, aColValue, abundTableItem)
insertRowNumber += 1
self.abundTableWidget.setHorizontalHeaderLabels(abundHeaderList)
for aColValue in range(len(abundHeaderList)):
self.abundTableWidget.resizeColumnToContents(aColValue)
# http://stackoverflow.com/questions/24971305/copy-pyqt-table-selection-including-column-and-row-headers
def keyPressEvent(self, e):
if (e.modifiers() & Qt.ControlModifier):
selected = self.abundTableWidget.selectedRanges()
if e.key() == Qt.Key_C: #copy
s = ""
for r in xrange(selected[0].topRow(), selected[0].bottomRow() + 1):
for c in xrange(selected[0].leftColumn(), selected[0].rightColumn()+1):
try:
s += str(self.abundTableWidget.item(r, c).text()) + "\t"
except AttributeError:
s += "\t"
s = s[:-1] + "\n" #eliminate last '\t'
self.clip.setText(s)
class changeStatusDialog(QDialog, Ui_ChangeStatusDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self, None, Qt.WindowStaysOnTopHint)
self.iface = iface
self.setupUi(self)
(targetsMetCount, targetCount) = cluz_functions3.returnTargetsMetTuple(setupObject)
self.targetsMetLabel.setText("Targets met: " + str(targetsMetCount) + " of " + str(targetCount))
self.undoButton.setEnabled(False)
QObject.connect(self.changeButton, SIGNAL("clicked()"), lambda: self.changeStatus(setupObject))
QObject.connect(self.undoButton, SIGNAL("clicked()"), lambda: self.undoStatusChange(setupObject))
QObject.connect(self.closeButton, SIGNAL("clicked()"), lambda: self.closeStatusDialog(setupObject))
def changeStatus(self, setupObject):
if self.availableButton.isChecked():
statusType = "Available"
elif self.earmarkedButton.isChecked():
statusType = "Earmarked"
elif self.conservedButton.isChecked():
statusType = "Conserved"
elif self.excludedButton.isChecked():
statusType = "Excluded"
changeLockedPUsBool = self.changeCheckBox.isChecked()
selectedPUIDStatusDict = cluz_functions3.changeStatusPuLayer(setupObject, statusType, changeLockedPUsBool)
changeAbundDict = cluz_functions3.calcChangeAbundDict(setupObject, selectedPUIDStatusDict, statusType)
targetDict = cluz_functions3.updateTargetDictWithChanges(setupObject, changeAbundDict)
setupObject.targetDict = targetDict
cluz_setup.updateTargetCSVFromTargetDict(setupObject, targetDict)
(targetsMetCount, targetCount) = cluz_functions3.returnTargetsMetTuple(setupObject)
self.targetsMetLabel.setText("Targets met: " + str(targetsMetCount) + " of " + str(targetCount))
setupObject.selectedPUIDStatusDict = selectedPUIDStatusDict
self.undoButton.setEnabled(True)
def undoStatusChange(self, setupObject):
canvas = qgis.utils.iface.mapCanvas()
cluz_functions3.undoStatusChangeInPuLayer(setupObject)
newConTotDict = cluz_functions1.returnConTotDict(setupObject)
targetDict = cluz_functions1.updateConTotFieldsTargetDict(setupObject, newConTotDict)
cluz_setup.updateTargetCSVFromTargetDict(setupObject, targetDict)
setupObject.targetDict = targetDict
(targetsMetCount, targetCount) = cluz_functions3.returnTargetsMetTuple(setupObject)
self.targetsMetLabel.setText("Targets met: " + str(targetsMetCount) + " of " + str(targetCount))
setupObject.selectedPUIDStatusDict = "blank"
self.undoButton.setEnabled(False)
canvas.refresh()
def closeStatusDialog(self, setupObject):
self.close()
class identifyDialog(QDialog, Ui_identifyDialog):
def __init__(self, iface, setupObject, point):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
selectedPUIDList = cluz_functions3.returnPointPUIDList(setupObject, point)
identDict, targetMetDict = cluz_functions3.makeIdentifyData(setupObject, selectedPUIDList)
titleString = cluz_functions3.setIdentifyDialogWindowTitle(selectedPUIDList, identDict)
if len(identDict.keys()) > 0:
self.identDict = identDict
self.targetMetDict = targetMetDict
self.showIdentifyData()
self.setWindowTitle(titleString)
self.setWindowTitle(titleString)
def showIdentifyData(self):
self.identifyTableWidget.clear()
self.identifyTableWidget.setColumnCount(7)
cluz_functions3.addIdenitfyDataToTableWidget(self.identifyTableWidget, self.targetMetDict, self.identDict)
headerList = ["ID ", "Name ", "Amount ", "As % of total ", "Target ", "As % of target ", "% of target currently met "]
self.identifyTableWidget.setHorizontalHeaderLabels(headerList)
for aColValue in range(len(headerList)):
self.identifyTableWidget.resizeColumnToContents(aColValue)
class metDialog(QDialog, Ui_metDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self)
outputPath = setupObject.outputPath
outputName = setupObject.outputName + "_mvbest.txt"
self.metTargetFile = outputPath + os.sep + outputName
self.iface = iface
self.setupUi(self)
self.metLoadTargetDictData()
self.setWindowTitle("Marxan Targets Met table for analysis " + setupObject.outputName)
def metLoadTargetDictData(self):
targetMetDict = {}
with open(self.metTargetFile, 'rb') as f:
targetMetReader = csv.reader(f)
targetMetHeaderList = next(targetMetReader, None)
for row in targetMetReader:
puID = int(row.pop(0))
targetMetDict[puID] = row
targetIDList = targetMetDict.keys()
targetIDList.sort()
self.metTableWidget.clear()
self.metTableWidget.setColumnCount(len(targetMetHeaderList))
insertRowNumber = 0
for aFeat in targetIDList:
self.metTableWidget.insertRow(insertRowNumber)
aRowList = targetMetDict[aFeat]
aRowList.insert(0, aFeat)
for aColValue in range(len(targetMetHeaderList)):
featValue = aRowList[aColValue]
metTableItem = QTableWidgetItem(str(featValue))
self.metTableWidget.setItem(insertRowNumber,aColValue,metTableItem)
insertRowNumber += 1
self.metTableWidget.setHorizontalHeaderLabels(targetMetHeaderList)
for aColValue in range(len(targetMetHeaderList)):
self.metTableWidget.resizeColumnToContents(aColValue)
|
gpl-2.0
|
cisco-openstack/networking-cisco
|
networking_cisco/plugins/ml2/drivers/cisco/ucsm/config.py
|
2
|
3192
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import constants as const
""" Cisco UCS Manager ML2 Mechanism driver specific configuration.
Following are user configurable options for UCS Manager ML2 Mechanism
driver. The ucsm_username, ucsm_password, and ucsm_ip are
required options. Additional configuration knobs are provided to pre-
create UCS Manager port profiles.
"""
ml2_cisco_ucsm_opts = [
cfg.StrOpt('ucsm_ip',
help=_('Cisco UCS Manager IP address. This is a required field '
'to communicate with a Cisco UCS Manager.')),
cfg.StrOpt('ucsm_username',
help=_('Username for UCS Manager. This is a required field '
'to communicate with a Cisco UCS Manager.')),
cfg.StrOpt('ucsm_password',
secret=True, # do not expose value in the logs
help=_('Password for UCS Manager. This is a required field '
'to communicate with a Cisco UCS Manager.')),
cfg.ListOpt('supported_pci_devs',
default=[const.PCI_INFO_CISCO_VIC_1240,
const.PCI_INFO_INTEL_82599],
help=_('List of comma separated vendor_id:product_id of '
'SR_IOV capable devices supported by this MD. This MD '
'supports both VM-FEX and SR-IOV devices.')),
cfg.ListOpt('ucsm_host_list',
help=_('List of comma separated Host:Service Profile tuples '
'providing the Service Profile associated with each '
'Host to be supported by this MD.')),
]
cfg.CONF.register_opts(ml2_cisco_ucsm_opts, "ml2_cisco_ucsm")
def parse_pci_vendor_config():
vendor_list = []
vendor_config_list = cfg.CONF.ml2_cisco_ucsm.supported_pci_devs
for vendor in vendor_config_list:
vendor_product = vendor.split(':')
if len(vendor_product) != 2:
raise cfg.Error(_("UCS Mech Driver: Invalid PCI device "
"config: %s") % vendor)
vendor_list.append(vendor)
return vendor_list
def parse_ucsm_host_config():
host_dict = {}
host_config_list = cfg.CONF.ml2_cisco_ucsm.ucsm_host_list
for host in host_config_list:
host_sp = host.split(':')
if len(host_sp) != 2:
raise cfg.Error(_("UCS Mech Driver: Invalid Host Service "
"Profile config: %s") % host)
key = host_sp[0]
host_dict[key] = host_sp[1]
return host_dict
|
apache-2.0
|
mosesfistos1/beetbox
|
beetsplug/filefilter.py
|
13
|
3038
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Malte Ried.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Filter imported files using a regular expression.
"""
from __future__ import division, absolute_import, print_function
import re
from beets import config
from beets.util import bytestring_path
from beets.plugins import BeetsPlugin
from beets.importer import SingletonImportTask
class FileFilterPlugin(BeetsPlugin):
def __init__(self):
super(FileFilterPlugin, self).__init__()
self.register_listener('import_task_created',
self.import_task_created_event)
self.config.add({
'path': '.*'
})
self.path_album_regex = \
self.path_singleton_regex = \
re.compile(bytestring_path(self.config['path'].get()))
if 'album_path' in self.config:
self.path_album_regex = re.compile(
bytestring_path(self.config['album_path'].get()))
if 'singleton_path' in self.config:
self.path_singleton_regex = re.compile(
bytestring_path(self.config['singleton_path'].get()))
def import_task_created_event(self, session, task):
if task.items and len(task.items) > 0:
items_to_import = []
for item in task.items:
if self.file_filter(item['path']):
items_to_import.append(item)
if len(items_to_import) > 0:
task.items = items_to_import
else:
# Returning an empty list of tasks from the handler
# drops the task from the rest of the importer pipeline.
return []
elif isinstance(task, SingletonImportTask):
if not self.file_filter(task.item['path']):
return []
# If not filtered, return the original task unchanged.
return [task]
def file_filter(self, full_path):
"""Checks if the configured regular expressions allow the import
of the file given in full_path.
"""
import_config = dict(config['import'])
full_path = bytestring_path(full_path)
if 'singletons' not in import_config or not import_config[
'singletons']:
# Album
return self.path_album_regex.match(full_path) is not None
else:
# Singleton
return self.path_singleton_regex.match(full_path) is not None
|
mit
|
40223134/40223134w17
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/sprite.py
|
603
|
55779
|
## pygame - Python Game Library
## Copyright (C) 2000-2003, 2007 Pete Shinners
## (C) 2004 Joe Wreschnig
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"""pygame module with basic game object classes
This module contains several simple classes to be used within games. There
are the main Sprite class and several Group classes that contain Sprites.
The use of these classes is entirely optional when using Pygame. The classes
are fairly lightweight and only provide a starting place for the code
that is common to most games.
The Sprite class is intended to be used as a base class for the different
types of objects in the game. There is also a base Group class that simply
stores sprites. A game could create new types of Group classes that operate
on specially customized Sprite instances they contain.
The basic Sprite class can draw the Sprites it contains to a Surface. The
Group.draw() method requires that each Sprite have a Surface.image attribute
and a Surface.rect. The Group.clear() method requires these same attributes
and can be used to erase all the Sprites with background. There are also
more advanced Groups: pygame.sprite.RenderUpdates() and
pygame.sprite.OrderedUpdates().
Lastly, this module contains several collision functions. These help find
sprites inside multiple groups that have intersecting bounding rectangles.
To find the collisions, the Sprites are required to have a Surface.rect
attribute assigned.
The groups are designed for high efficiency in removing and adding Sprites
to them. They also allow cheap testing to see if a Sprite already exists in
a Group. A given Sprite can exist in any number of groups. A game could use
some groups to control object rendering, and a completely separate set of
groups to control interaction or player movement. Instead of adding type
attributes or bools to a derived Sprite class, consider keeping the
Sprites inside organized Groups. This will allow for easier lookup later
in the game.
Sprites and Groups manage their relationships with the add() and remove()
methods. These methods can accept a single or multiple group arguments for
membership. The default initializers for these classes also take a
single group or list of groups as argments for initial membership. It is safe
to repeatedly add and remove the same Sprite from a Group.
While it is possible to design sprite and group classes that don't derive
from the Sprite and AbstractGroup classes below, it is strongly recommended
that you extend those when you create a new Sprite or Group class.
Sprites are not thread safe, so lock them yourself if using threads.
"""
##todo
## a group that holds only the 'n' most recent elements.
## sort of like the GroupSingle class, but holding more
## than one sprite
##
## drawing groups that can 'automatically' store the area
## underneath so they can "clear" without needing a background
## function. obviously a little slower than normal, but nice
## to use in many situations. (also remember it must "clear"
## in the reverse order that it draws :])
##
## the drawing groups should also be able to take a background
## function, instead of just a background surface. the function
## would take a surface and a rectangle on that surface to erase.
##
## perhaps more types of collision functions? the current two
## should handle just about every need, but perhaps more optimized
## specific ones that aren't quite so general but fit into common
## specialized cases.
import pygame
from pygame.rect import Rect
from pygame.time import get_ticks
from operator import truth
# Python 3 does not have the callable function, but an equivalent can be made
# with the hasattr function.
#if 'callable' not in dir(__builtins__):
callable = lambda obj: hasattr(obj, '__call__')
# Don't depend on pygame.mask if it's not there...
try:
from pygame.mask import from_surface
except:
pass
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" % (self.__class__.__name__, len(self.__g))
class DirtySprite(Sprite):
"""a more featureful subclass of Sprite with more attributes
pygame.sprite.DirtySprite(*groups): return DirtySprite
Extra DirtySprite attributes with their default values:
dirty = 1
If set to 1, it is repainted and then set to 0 again.
If set to 2, it is always dirty (repainted each frame;
flag is not reset).
If set to 0, it is not dirty and therefore not repainted again.
blendmode = 0
It's the special_flags argument of Surface.blit; see the blendmodes in
the Surface.blit documentation
source_rect = None
This is the source rect to use. Remember that it is relative to the top
left corner (0, 0) of self.image.
visible = 1
Normally this is 1. If set to 0, it will not be repainted. (If you
change visible to 1, you must set dirty to 1 for it to be erased from
the screen.)
_layer = 0
A READ ONLY value, it is read when adding it to the LayeredUpdates
group. For details see documentation of sprite.LayeredUpdates.
"""
def __init__(self, *groups):
self.dirty = 1
self.blendmode = 0 # pygame 1.8, referred to as special_flags in
# the documentation of Surface.blit
self._visible = 1
self._layer = 0 # READ ONLY by LayeredUpdates or LayeredDirty
self.source_rect = None
Sprite.__init__(self, *groups)
def _set_visible(self, val):
"""set the visible value (0 or 1) and makes the sprite dirty"""
self._visible = val
if self.dirty < 2:
self.dirty = 1
def _get_visible(self):
"""return the visible value of that sprite"""
return self._visible
visible = property(lambda self: self._get_visible(),
lambda self, value: self._set_visible(value),
doc="you can make this sprite disappear without "
"removing it from the group,\n"
"assign 0 for invisible and 1 for visible")
def __repr__(self):
return "<%s DirtySprite(in %d groups)>" % \
(self.__class__.__name__, len(self.groups()))
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group
Group.sprite(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return list(self.spritedict)
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
#from javascript import console
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
#console.log(spr.image, spr.rect)
#console.log(spr.image._canvas.width, spr.image._canvas.height)
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
class RenderUpdates(Group):
"""Group class that tracks dirty updates
pygame.sprite.RenderUpdates(*sprites): return RenderUpdates
This class is derived from pygame.sprite.Group(). It has an enhanced draw
method that tracks the changed areas of the screen.
"""
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
newrect = surface_blit(s.image, s.rect)
if r:
if newrect.colliderect(r):
dirty_append(newrect.union(r))
else:
dirty_append(newrect)
dirty_append(r)
else:
dirty_append(newrect)
spritedict[s] = newrect
return dirty
class OrderedUpdates(RenderUpdates):
"""RenderUpdates class that draws Sprites in order of addition
pygame.sprite.OrderedUpdates(*spites): return OrderedUpdates
This class derives from pygame.sprite.RenderUpdates(). It maintains
the order in which the Sprites were added to the Group for rendering.
This makes adding and removing Sprites from the Group a little
slower than regular Groups.
"""
def __init__(self, *sprites):
self._spritelist = []
RenderUpdates.__init__(self, *sprites)
def sprites(self):
return list(self._spritelist)
def add_internal(self, sprite):
RenderUpdates.add_internal(self, sprite)
self._spritelist.append(sprite)
def remove_internal(self, sprite):
RenderUpdates.remove_internal(self, sprite)
self._spritelist.remove(sprite)
class LayeredUpdates(AbstractGroup):
"""LayeredUpdates Group handles layers, which are drawn like OrderedUpdates
pygame.sprite.LayeredUpdates(*spites, **kwargs): return LayeredUpdates
This group is fully compatible with pygame.sprite.Sprite.
New in pygame 1.8.0
"""
_init_rect = Rect(0, 0, 0, 0)
def __init__(self, *sprites, **kwargs):
"""initialize an instance of LayeredUpdates with the given attributes
You can set the default layer through kwargs using 'default_layer'
and an integer for the layer. The default layer is 0.
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
self._spritelayers = {}
self._spritelist = []
AbstractGroup.__init__(self)
self._default_layer = kwargs.get('default_layer', 0)
self.add(*sprites, **kwargs)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
self.spritedict[sprite] = self._init_rect
if layer is None:
try:
layer = sprite._layer
except AttributeError:
layer = sprite._layer = self._default_layer
elif hasattr(sprite, '_layer'):
sprite._layer = layer
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers
sprites_layers[sprite] = layer
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= layer:
mid += 1
sprites.insert(mid, sprite)
def add(self, *sprites, **kwargs):
"""add a sprite or sequence of sprites to a group
LayeredUpdates.add(*sprites, **kwargs): return None
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
if not sprites:
return
if 'layer' in kwargs:
layer = kwargs['layer']
else:
layer = None
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite, **kwargs)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr, layer)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
def remove_internal(self, sprite):
"""Do not use this method directly.
The group uses it to add a sprite.
"""
self._spritelist.remove(sprite)
# these dirty rects are suboptimal for one frame
r = self.spritedict[sprite]
if r is not self._init_rect:
self.lostsprites.append(r) # dirty rect
if hasattr(sprite, 'rect'):
self.lostsprites.append(sprite.rect) # dirty rect
del self.spritedict[sprite]
del self._spritelayers[sprite]
def sprites(self):
"""return a ordered list of sprites (first back, last top).
LayeredUpdates.sprites(): return sprites
"""
return list(self._spritelist)
def draw(self, surface):
"""draw all sprites in the right order onto the passed surface
LayeredUpdates.draw(surface): return Rect_list
"""
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
init_rect = self._init_rect
for spr in self.sprites():
rec = spritedict[spr]
newrect = surface_blit(spr.image, spr.rect)
if rec is init_rect:
dirty_append(newrect)
else:
if newrect.colliderect(rec):
dirty_append(newrect.union(rec))
else:
dirty_append(newrect)
dirty_append(rec)
spritedict[spr] = newrect
return dirty
def get_sprites_at(self, pos):
"""return a list with all sprites at that position
LayeredUpdates.get_sprites_at(pos): return colliding_sprites
Bottom sprites are listed first; the top ones are listed last.
"""
_sprites = self._spritelist
rect = Rect(pos, (0, 0))
colliding_idx = rect.collidelistall(_sprites)
colliding = [_sprites[i] for i in colliding_idx]
return colliding
def get_sprite(self, idx):
"""return the sprite at the index idx from the groups sprites
LayeredUpdates.get_sprite(idx): return sprite
Raises IndexOutOfBounds if the idx is not within range.
"""
return self._spritelist[idx]
def remove_sprites_of_layer(self, layer_nr):
"""remove all sprites from a layer and return them as a list
LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites
"""
sprites = self.get_sprites_from_layer(layer_nr)
self.remove(*sprites)
return sprites
#---# layer methods
def layers(self):
"""return a list of unique defined layers defined.
LayeredUpdates.layers(): return layers
"""
return sorted(set(self._spritelayers.values()))
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers # speedup
sprites.remove(sprite)
sprites_layers.pop(sprite)
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= new_layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= new_layer:
mid += 1
sprites.insert(mid, sprite)
if hasattr(sprite, 'layer'):
sprite.layer = new_layer
# add layer info
sprites_layers[sprite] = new_layer
def get_layer_of_sprite(self, sprite):
"""return the layer that sprite is currently in
If the sprite is not found, then it will return the default layer.
"""
return self._spritelayers.get(sprite, self._default_layer)
def get_top_layer(self):
"""return the top layer
LayeredUpdates.get_top_layer(): return layer
"""
return self._spritelayers[self._spritelist[-1]]
def get_bottom_layer(self):
"""return the bottom layer
LayeredUpdates.get_bottom_layer(): return layer
"""
return self._spritelayers[self._spritelist[0]]
def move_to_front(self, sprite):
"""bring the sprite to front layer
LayeredUpdates.move_to_front(sprite): return None
Brings the sprite to front by changing the sprite layer to the top-most
layer. The sprite is added at the end of the list of sprites in that
top-most layer.
"""
self.change_layer(sprite, self.get_top_layer())
def move_to_back(self, sprite):
"""move the sprite to the bottom layer
LayeredUpdates.move_to_back(sprite): return None
Moves the sprite to the bottom layer by moving it to a new layer below
the current bottom layer.
"""
self.change_layer(sprite, self.get_bottom_layer() - 1)
def get_top_sprite(self):
"""return the topmost sprite
LayeredUpdates.get_top_sprite(): return Sprite
"""
return self._spritelist[-1]
def get_sprites_from_layer(self, layer):
"""return all sprites from a layer ordered as they where added
LayeredUpdates.get_sprites_from_layer(layer): return sprites
Returns all sprites from a layer. The sprites are ordered in the
sequence that they where added. (The sprites are not removed from the
layer.
"""
sprites = []
sprites_append = sprites.append
sprite_layers = self._spritelayers
for spr in self._spritelist:
if sprite_layers[spr] == layer:
sprites_append(spr)
elif sprite_layers[spr] > layer:# break after because no other will
# follow with same layer
break
return sprites
def switch_layer(self, layer1_nr, layer2_nr):
"""switch the sprites from layer1_nr to layer2_nr
LayeredUpdates.switch_layer(layer1_nr, layer2_nr): return None
The layers number must exist. This method does not check for the
existence of the given layers.
"""
sprites1 = self.remove_sprites_of_layer(layer1_nr)
for spr in self.get_sprites_from_layer(layer2_nr):
self.change_layer(spr, layer1_nr)
self.add(layer=layer2_nr, *sprites1)
class LayeredDirty(LayeredUpdates):
"""LayeredDirty Group is for DirtySprites; subclasses LayeredUpdates
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
This group requires pygame.sprite.DirtySprite or any sprite that
has the following attributes:
image, rect, dirty, visible, blendmode (see doc of DirtySprite).
It uses the dirty flag technique and is therefore faster than
pygame.sprite.RenderUpdates if you have many static sprites. It
also switches automatically between dirty rect updating and full
screen drawing, so you do no have to worry which would be faster.
As with the pygame.sprite.Group, you can specify some additional attributes
through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect mode
and fullscreen mode; defaults to updating at 80 frames per second,
which is equal to 1000.0 / 80.0
New in pygame 1.8.0
"""
def __init__(self, *sprites, **kwargs):
"""initialize group.
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
You can specify some additional attributes through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect
mode and fullscreen mode; defaults to updating at 80 frames per
second, which is equal to 1000.0 / 80.0
"""
LayeredUpdates.__init__(self, *sprites, **kwargs)
self._clip = None
self._use_update = False
self._time_threshold = 1000.0 / 80.0 # 1000.0 / fps
self._bgd = None
for key, val in kwargs.items():
if key in ['_use_update', '_time_threshold', '_default_layer']:
if hasattr(self, key):
setattr(self, key, val)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
# check if all needed attributes are set
if not hasattr(sprite, 'dirty'):
raise AttributeError()
if not hasattr(sprite, 'visible'):
raise AttributeError()
if not hasattr(sprite, 'blendmode'):
raise AttributeError()
if not isinstance(sprite, DirtySprite):
raise TypeError()
if sprite.dirty == 0: # set it dirty if it is not
sprite.dirty = 1
LayeredUpdates.add_internal(self, sprite, layer)
def draw(self, surface, bgd=None):
"""draw all sprites in the right order onto the given surface
LayeredDirty.draw(surface, bgd=None): return Rect_list
You can pass the background too. If a self.bgd is already set to some
value that is not None, then the bgd argument has no effect.
"""
# speedups
_orig_clip = surface.get_clip()
_clip = self._clip
if _clip is None:
_clip = _orig_clip
_surf = surface
_sprites = self._spritelist
_old_rect = self.spritedict
_update = self.lostsprites
_update_append = _update.append
_ret = None
_surf_blit = _surf.blit
_rect = Rect
if bgd is not None:
self._bgd = bgd
_bgd = self._bgd
init_rect = self._init_rect
_surf.set_clip(_clip)
# -------
# 0. decide whether to render with update or flip
start_time = get_ticks()
if self._use_update: # dirty rects mode
# 1. find dirty area on screen and put the rects into _update
# still not happy with that part
for spr in _sprites:
if 0 < spr.dirty:
# chose the right rect
if spr.source_rect:
_union_rect = _rect(spr.rect.topleft,
spr.source_rect.size)
else:
_union_rect = _rect(spr.rect)
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
if _old_rect[spr] is not init_rect:
_union_rect = _rect(_old_rect[spr])
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
# can it be done better? because that is an O(n**2) algorithm in
# worst case
# clear using background
if _bgd is not None:
for rec in _update:
_surf_blit(_bgd, rec, rec)
# 2. draw
for spr in _sprites:
if 1 > spr.dirty:
if spr._visible:
# sprite not dirty; blit only the intersecting part
_spr_rect = spr.rect
if spr.source_rect is not None:
_spr_rect = Rect(spr.rect.topleft,
spr.source_rect.size)
_spr_rect_clip = _spr_rect.clip
for idx in _spr_rect.collidelistall(_update):
# clip
clip = _spr_rect_clip(_update[idx])
_surf_blit(spr.image,
clip,
(clip[0] - _spr_rect[0],
clip[1] - _spr_rect[1],
clip[2],
clip[3]),
spr.blendmode)
else: # dirty sprite
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
if spr.dirty == 1:
spr.dirty = 0
_ret = list(_update)
else: # flip, full screen mode
if _bgd is not None:
_surf_blit(_bgd, (0, 0))
for spr in _sprites:
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
_ret = [_rect(_clip)] # return only the part of the screen changed
# timing for switching modes
# How may a good threshold be found? It depends on the hardware.
end_time = get_ticks()
if end_time-start_time > self._time_threshold:
self._use_update = False
else:
self._use_update = True
## # debug
## print " check: using dirty rects:", self._use_update
# emtpy dirty rects list
_update[:] = []
# -------
# restore original clip
_surf.set_clip(_orig_clip)
return _ret
def clear(self, surface, bgd):
"""use to set background
Group.clear(surface, bgd): return None
"""
self._bgd = bgd
def repaint_rect(self, screen_rect):
"""repaint the given area
LayeredDirty.repaint_rect(screen_rect): return None
screen_rect is in screen coordinates.
"""
if self._clip:
self.lostsprites.append(screen_rect.clip(self._clip))
else:
self.lostsprites.append(Rect(screen_rect))
def set_clip(self, screen_rect=None):
"""clip the area where to draw; pass None (default) to reset the clip
LayeredDirty.set_clip(screen_rect=None): return None
"""
if screen_rect is None:
self._clip = pygame.display.get_surface().get_rect()
else:
self._clip = screen_rect
self._use_update = False
def get_clip(self):
"""get the area where drawing will occur
LayeredDirty.get_clip(): return Rect
"""
return self._clip
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
LayeredUpdates.change_layer(self, sprite, new_layer)
if sprite.dirty == 0:
sprite.dirty = 1
def set_timing_treshold(self, time_ms):
"""set the treshold in milliseconds
set_timing_treshold(time_ms): return None
Defaults to 1000.0 / 80.0. This means that the screen will be painted
using the flip method rather than the update method if the update
method is taking so long to update the screen that the frame rate falls
below 80 frames per second.
"""
self._time_threshold = time_ms
class GroupSingle(AbstractGroup):
"""A group container that holds a single most recent item.
This class works just like a regular group, but it only keeps a single
sprite in the group. Whatever sprite has been added to the group last will
be the only sprite in the group.
You can access its one sprite as the .sprite attribute. Assigning to this
attribute will properly remove the old sprite and then add the new one.
"""
def __init__(self, sprite=None):
AbstractGroup.__init__(self)
self.__sprite = None
if sprite is not None:
self.add(sprite)
def copy(self):
return GroupSingle(self.__sprite)
def sprites(self):
if self.__sprite is not None:
return [self.__sprite]
else:
return []
def add_internal(self, sprite):
if self.__sprite is not None:
self.__sprite.remove_internal(self)
self.remove_internal(self.__sprite)
self.__sprite = sprite
def __nonzero__(self):
return self.__sprite is not None
def _get_sprite(self):
return self.__sprite
def _set_sprite(self, sprite):
self.add_internal(sprite)
sprite.add_internal(self)
return sprite
sprite = property(_get_sprite,
_set_sprite,
None,
"The sprite contained in this group")
def remove_internal(self, sprite):
if sprite is self.__sprite:
self.__sprite = None
if sprite in self.spritedict:
AbstractGroup.remove_internal(self, sprite)
def has_internal(self, sprite):
return self.__sprite is sprite
# Optimizations...
def __contains__(self, sprite):
return self.__sprite is sprite
# Some different collision detection functions that could be used.
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided callback function to the *collide functions. Sprites must have
"rect" attributes.
New in pygame 1.8.0
"""
return left.rect.colliderect(right.rect)
class collide_rect_ratio:
"""A callable class that checks for collisions using scaled rects
The class checks for collisions between two sprites using a scaled version
of the sprites' rects. Is created with a ratio; the instance is then
intended to be passed as a collided callback function to the *collide
functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""create a new collide_rect_ratio callable
Ratio is expected to be a floating point value used to scale
the underlying sprite rect before checking for collisions.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled rects
pygame.sprite.collide_rect_ratio(ratio)(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect
colliderect function to calculate the collision after scaling the rects
by the stored ratio. Sprites must have "rect" attributes.
"""
ratio = self.ratio
leftrect = left.rect
width = leftrect.width
height = leftrect.height
leftrect = leftrect.inflate(width * ratio - width,
height * ratio - height)
rightrect = right.rect
width = rightrect.width
height = rightrect.height
rightrect = rightrect.inflate(width * ratio - width,
height * ratio - height)
return leftrect.colliderect(rightrect)
def collide_circle(left, right):
"""detect collision between two sprites using circles
pygame.sprite.collide_circle(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap. If the sprites have a "radius" attribute,
then that radius is used to create the circle; otherwise, a circle is
created that is big enough to completely enclose the sprite's rect as
given by the "rect" attribute. This function is intended to be passed as
a collided callback function to the *collide functions. Sprites must have a
"rect" and an optional "radius" attribute.
New in pygame 1.8.0
"""
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, 'radius'):
leftradius = left.radius
else:
leftrect = left.rect
# approximating the radius of a square by using half of the diagonal,
# might give false positives (especially if its a long small rect)
leftradius = 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, 'radius'):
rightradius = right.radius
else:
rightrect = right.rect
# approximating the radius of a square by using half of the diagonal
# might give false positives (especially if its a long small rect)
rightradius = 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
class collide_circle_ratio(object):
"""detect collision between two sprites using scaled circles
This callable class checks for collisions between two sprites using a
scaled version of a sprite's radius. It is created with a ratio as the
argument to the constructor. The instance is then intended to be passed as
a collided callback function to the *collide functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""creates a new collide_circle_ratio callable instance
The given ratio is expected to be a floating point value used to scale
the underlying sprite radius before checking for collisions.
When the ratio is ratio=1.0, then it behaves exactly like the
collide_circle method.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled circles
pygame.sprite.collide_circle_radio(ratio)(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap after scaling the circle's radius by
the stored ratio. If the sprites have a "radius" attribute, that is
used to create the circle; otherwise, a circle is created that is big
enough to completely enclose the sprite's rect as given by the "rect"
attribute. Intended to be passed as a collided callback function to the
*collide functions. Sprites must have a "rect" and an optional "radius"
attribute.
"""
ratio = self.ratio
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, "radius"):
leftradius = left.radius * ratio
else:
leftrect = left.rect
leftradius = ratio * 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, "radius"):
rightradius = right.radius * ratio
else:
rightrect = right.rect
rightradius = ratio * 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
def collide_mask(left, right):
"""collision detection between two sprites, using masks.
pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool
Tests for collision between two sprites by testing if their bitmasks
overlap. If the sprites have a "mask" attribute, that is used as the mask;
otherwise, a mask is created from the sprite image. Intended to be passed
as a collided callback function to the *collide functions. Sprites must
have a "rect" and an optional "mask" attribute.
New in pygame 1.8.0
"""
xoffset = right.rect[0] - left.rect[0]
yoffset = right.rect[1] - left.rect[1]
try:
leftmask = left.mask
except AttributeError:
leftmask = from_surface(left.image)
try:
rightmask = right.mask
except AttributeError:
rightmask = from_surface(right.image)
return leftmask.overlap(rightmask, (xoffset, yoffset))
def spritecollide(sprite, group, dokill, collided=None):
"""find Sprites in a Group that intersect another Sprite
pygame.sprite.spritecollide(sprite, group, dokill, collided=None):
return Sprite_list
Return a list containing all Sprites in a Group that intersect with another
Sprite. Intersection is determined by comparing the Sprite.rect attribute
of each Sprite.
The dokill argument is a bool. If set to True, all Sprites that collide
will be removed from the Group.
The collided argument is a callback function used to calculate if two
sprites are colliding. it should take two sprites as values, and return a
bool value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if dokill:
crashed = []
append = crashed.append
if collided:
for s in group.sprites():
if collided(sprite, s):
s.kill()
append(s)
else:
spritecollide = sprite.rect.colliderect
for s in group.sprites():
if spritecollide(s.rect):
s.kill()
append(s)
return crashed
elif collided:
return [s for s in group if collided(sprite, s)]
else:
spritecollide = sprite.rect.colliderect
return [s for s in group if spritecollide(s.rect)]
def groupcollide(groupa, groupb, dokilla, dokillb, collided=None):
"""detect collision between a group and another group
pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb):
return dict
Given two groups, this will find the intersections between all sprites in
each group. It returns a dictionary of all sprites in the first group that
collide. The value for each item in the dictionary is a list of the sprites
in the second group it collides with. The two dokill arguments control if
the sprites from either group will be automatically removed from all
groups. Collided is a callback function used to calculate if two sprites
are colliding. it should take two sprites as values, and return a bool
value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area
that will be used to calculate the collision.
"""
crashed = {}
SC = spritecollide
if dokilla:
for s in groupa.sprites():
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
s.kill()
else:
for s in groupa:
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
return crashed
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None
|
gpl-3.0
|
yakky/django-cms
|
cms/models/static_placeholder.py
|
1
|
3681
|
import uuid
from django.contrib.auth import get_permission_codename
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from six import text_type, python_2_unicode_compatible
from cms.models.fields import PlaceholderField
from cms.utils.copy_plugins import copy_plugins_to
def static_slotname(instance):
"""
Returns a string to be used as the slot
for the static placeholder field.
"""
return instance.code
@python_2_unicode_compatible
class StaticPlaceholder(models.Model):
CREATION_BY_TEMPLATE = 'template'
CREATION_BY_CODE = 'code'
CREATION_METHODS = (
(CREATION_BY_TEMPLATE, _('by template')),
(CREATION_BY_CODE, _('by code')),
)
name = models.CharField(
verbose_name=_(u'static placeholder name'), max_length=255, blank=True, default='',
help_text=_(u'Descriptive name to identify this static placeholder. Not displayed to users.'))
code = models.CharField(
verbose_name=_(u'placeholder code'), max_length=255, blank=True,
help_text=_(u'To render the static placeholder in templates.'))
draft = PlaceholderField(static_slotname, verbose_name=_(u'placeholder content'), related_name='static_draft')
public = PlaceholderField(static_slotname, editable=False, related_name='static_public')
dirty = models.BooleanField(default=False, editable=False)
creation_method = models.CharField(
verbose_name=_('creation_method'), choices=CREATION_METHODS,
default=CREATION_BY_CODE, max_length=20, blank=True,
)
site = models.ForeignKey(Site, on_delete=models.CASCADE, null=True, blank=True)
class Meta:
verbose_name = _(u'static placeholder')
verbose_name_plural = _(u'static placeholders')
app_label = 'cms'
unique_together = (('code', 'site'),)
def __str__(self):
return self.get_name()
def get_name(self):
return self.name or self.code or text_type(self.pk)
get_name.short_description = _(u'static placeholder name')
def clean(self):
# TODO: check for clashes if the random code is already taken
if not self.code:
self.code = u'static-%s' % uuid.uuid4()
if not self.site:
placeholders = StaticPlaceholder.objects.filter(code=self.code, site__isnull=True)
if self.pk:
placeholders = placeholders.exclude(pk=self.pk)
if placeholders.exists():
raise ValidationError(_("A static placeholder with the same site and code already exists"))
def publish(self, request, language, force=False):
if force or self.has_publish_permission(request):
self.public.clear(language=language)
self.public.clear_cache(language=language)
plugins = self.draft.get_plugins_list(language=language)
copy_plugins_to(plugins, self.public, no_signals=True)
self.dirty = False
self.save()
return True
return False
def has_change_permission(self, request):
if request.user.is_superuser:
return True
opts = self._meta
return request.user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts))
def has_publish_permission(self, request):
if request.user.is_superuser:
return True
opts = self._meta
return request.user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts)) and \
request.user.has_perm(opts.app_label + '.' + 'publish_page')
|
bsd-3-clause
|
Triv90/Nova
|
nova/tests/test_wsgi.py
|
10
|
7206
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for `nova.wsgi`."""
import os.path
import tempfile
import eventlet
import httplib2
import paste
import nova.exception
from nova import test
import nova.wsgi
import urllib2
import webob
SSL_CERT_DIR = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'ssl_cert'))
class TestLoaderNothingExists(test.TestCase):
"""Loader tests where os.path.exists always returns False."""
def setUp(self):
super(TestLoaderNothingExists, self).setUp()
self.stubs.Set(os.path, 'exists', lambda _: False)
def test_config_not_found(self):
self.assertRaises(
nova.exception.ConfigNotFound,
nova.wsgi.Loader,
)
class TestLoaderNormalFilesystem(test.TestCase):
"""Loader tests with normal filesystem (unmodified os.path module)."""
_paste_config = """
[app:test_app]
use = egg:Paste#static
document_root = /tmp
"""
def setUp(self):
super(TestLoaderNormalFilesystem, self).setUp()
self.config = tempfile.NamedTemporaryFile(mode="w+t")
self.config.write(self._paste_config.lstrip())
self.config.seek(0)
self.config.flush()
self.loader = nova.wsgi.Loader(self.config.name)
def test_config_found(self):
self.assertEquals(self.config.name, self.loader.config_path)
def test_app_not_found(self):
self.assertRaises(
nova.exception.PasteAppNotFound,
self.loader.load_app,
"nonexistent app",
)
def test_app_found(self):
url_parser = self.loader.load_app("test_app")
self.assertEquals("/tmp", url_parser.directory)
def tearDown(self):
self.config.close()
super(TestLoaderNormalFilesystem, self).tearDown()
class TestWSGIServer(test.TestCase):
"""WSGI server tests."""
def test_no_app(self):
server = nova.wsgi.Server("test_app", None)
self.assertEquals("test_app", server.name)
def test_start_random_port(self):
server = nova.wsgi.Server("test_random_port", None,
host="127.0.0.1", port=0)
server.start()
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_start_random_port_with_ipv6(self):
server = nova.wsgi.Server("test_random_port", None,
host="::1", port=0)
server.start()
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_uri_length_limit(self):
server = nova.wsgi.Server("test_uri_length_limit", None,
host="127.0.0.1", max_url_len=16384)
server.start()
uri = "http://127.0.0.1:%d/%s" % (server.port, 10000 * 'x')
resp, _ = httplib2.Http().request(uri)
eventlet.sleep(0)
self.assertNotEqual(resp.status,
paste.httpexceptions.HTTPRequestURITooLong.code)
uri = "http://127.0.0.1:%d/%s" % (server.port, 20000 * 'x')
resp, _ = httplib2.Http().request(uri)
eventlet.sleep(0)
self.assertEqual(resp.status,
paste.httpexceptions.HTTPRequestURITooLong.code)
server.stop()
server.wait()
class TestWSGIServerWithSSL(test.TestCase):
"""WSGI server with SSL tests."""
def setUp(self):
super(TestWSGIServerWithSSL, self).setUp()
self.flags(enabled_ssl_apis=['fake_ssl'],
ssl_cert_file=os.path.join(SSL_CERT_DIR, 'certificate.crt'),
ssl_key_file=os.path.join(SSL_CERT_DIR, 'privatekey.key'))
def test_ssl_server(self):
def test_app(env, start_response):
start_response('200 OK', {})
return ['PONG']
fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
host="127.0.0.1", port=0,
use_ssl=True)
fake_ssl_server.start()
self.assertNotEqual(0, fake_ssl_server.port)
cli = eventlet.connect(("localhost", fake_ssl_server.port))
cli = eventlet.wrap_ssl(cli,
ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-length:4\r\n\r\nPING')
response = cli.read(8192)
self.assertEquals(response[-4:], "PONG")
fake_ssl_server.stop()
fake_ssl_server.wait()
def test_two_servers(self):
def test_app(env, start_response):
start_response('200 OK', {})
return ['PONG']
fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
host="127.0.0.1", port=0, use_ssl=True)
fake_ssl_server.start()
self.assertNotEqual(0, fake_ssl_server.port)
fake_server = nova.wsgi.Server("fake", test_app,
host="127.0.0.1", port=0)
fake_server.start()
self.assertNotEquals(0, fake_server.port)
cli = eventlet.connect(("localhost", fake_ssl_server.port))
cli = eventlet.wrap_ssl(cli,
ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-length:4\r\n\r\nPING')
response = cli.read(8192)
self.assertEquals(response[-4:], "PONG")
cli = eventlet.connect(("localhost", fake_server.port))
cli.sendall('POST / HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-length:4\r\n\r\nPING')
response = cli.recv(8192)
self.assertEquals(response[-4:], "PONG")
fake_ssl_server.stop()
fake_ssl_server.wait()
def test_app_using_ipv6_and_ssl(self):
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = nova.wsgi.Server("fake_ssl",
hello_world,
host="::1",
port=0,
use_ssl=True)
server.start()
response = urllib2.urlopen('https://[::1]:%d/' % server.port)
self.assertEquals(greetings, response.read())
server.stop()
server.wait()
|
apache-2.0
|
2014c2g4/2015cda_g7
|
static/Brython3.1.0-20150301-090019/Lib/unittest/main.py
|
739
|
10385
|
"""Unittest main program"""
import sys
import optparse
import os
from . import loader, runner
from .signals import installHandler
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s module.TestClass - run tests from module.TestClass
%(progName)s module.Class.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def _convert_name(name):
# on Linux / Mac OS X 'foo.PY' is not importable, but on
# Windows it is. Simpler to do a case insensitive match
# a better check would be to check that the name is a
# valid Python module name.
if os.path.isfile(name) and name.lower().endswith('.py'):
if os.path.isabs(name):
rel_path = os.path.relpath(name, os.getcwd())
if os.path.isabs(rel_path) or rel_path.startswith(os.pardir):
return name
name = rel_path
# on Windows both '\' and '/' are used as path
# separators. Better to replace both than rely on os.path.sep
return name[:-3].replace('\\', '.').replace('/', '.')
return name
def _convert_names(names):
return [_convert_name(name) for name in names]
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = warnings = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, warnings=None):
if isinstance(module, str):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
if warnings is None and not sys.warnoptions:
# even if DreprecationWarnings are ignored by default
# print them anyway unless other warnings settings are
# specified by the warnings arg or the -W python flag
self.warnings = 'default'
else:
# here self.warnings is set either to the value passed
# to the warnings args or to None.
# If the user didn't pass a value self.warnings will
# be None. This means that the behavior is unchanged
# and depends on the values passed to -W.
self.warnings = warnings
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print(msg)
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print(self.USAGE % usage)
sys.exit(2)
def parseArgs(self, argv):
if ((len(argv) > 1 and argv[1].lower() == 'discover') or
(len(argv) == 1 and self.module is None)):
self._do_discovery(argv[2:])
return
parser = self._getOptParser()
options, args = parser.parse_args(argv[1:])
self._setAttributesFromOptions(options)
if len(args) == 0 and self.module is None:
# this allows "python -m unittest -v" to still work for
# test discovery. This means -c / -b / -v / -f options will
# be handled twice, which is harmless but not ideal.
self._do_discovery(argv[1:])
return
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = _convert_names(args)
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _getOptParser(self):
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
parser.add_option('-q', '--quiet', dest='quiet', default=False,
help='Quiet output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
return parser
def _setAttributesFromOptions(self, options):
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
elif options.quiet:
self.verbosity = 0
def _addDiscoveryOptions(self, parser):
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
def _do_discovery(self, argv, Loader=None):
if Loader is None:
Loader = lambda: self.testLoader
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
parser = self._getOptParser()
self._addDiscoveryOptions(parser)
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
self._setAttributesFromOptions(options)
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, type):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer,
warnings=self.warnings)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
|
gpl-3.0
|
mathpresso/adminpp
|
example_project/post/migrations/0001_initial.py
|
2
|
1451
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-22 12:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.CharField(max_length=100)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PostReply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=100)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='replies', to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='replies', to='post.Post')),
],
),
]
|
mit
|
nickgentoo/LSTM-timepredictionPMdata
|
code/nick_evaluate_suffix_and_remaining_time_only_time_OHenc.py
|
1
|
15048
|
'''
this script takes as input the LSTM or RNN weights found by train.py
change the path in line 178 of this script to point to the h5 file
with LSTM or RNN weights generated by train.py
Author: Niek Tax
'''
from __future__ import division
from keras.models import load_model
import csv
import copy
import numpy as np
import distance
from itertools import izip
from jellyfish._jellyfish import damerau_levenshtein_distance
import unicodecsv
from sklearn import metrics
from math import sqrt
import time
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from collections import Counter
from keras.models import model_from_json
import sys
fileprefix=sys.argv[1]
eventlog = sys.argv[2]
csvfile = open('../data/%s' % eventlog, 'r')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip the headers
ascii_offset = 161
lastcase = ''
line = ''
firstLine = True
lines = []
timeseqs = []
timeseqs2 = []
timeseqs3 = []
timeseqs4 = []
y_times = []
times = []
times2 = []
times3 = []
times4 = []
# nick
attributes = []
attributes_dict = []
attributes_sizes = []
numlines = 0
casestarttime = None
lasteventtime = None
csvfile = open('../data/%s' % eventlog, 'r')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip the headers
ascii_offset = 161
y = []
for row in spamreader:
#print(row)
t = time.strptime(row[2], "%Y-%m-%d %H:%M:%S")
#test different format
#t = 0#time.strptime(row[2], "%Y/%m/%d %H:%M:%S")
if row[0]!=lastcase:
casestarttime = t
lasteventtime = t
lastcase = row[0]
if not firstLine:
#print (line)
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
#target
y_times.extend([times2[-1]-k for k in times2])
timeseqs3.append(times3)
timeseqs4.append(times4)
for i in xrange(len(attributes)):
#print(attributesvalues[i])
attributes[i].append(attributesvalues[i])
else:
#if firstline. I have to add te elements to attributes
for a in row[3:]:
attributes.append([])
attributes_dict.append({})
attributes_sizes.append(0)
#print(attributes)
n_events_in_trace=0
line = ''
times = []
times2 = []
times3 = []
times4 = []
attributesvalues = [ ]
numlines+=1
n_events_in_trace+=1
line+=unichr(int(row[1])+ascii_offset)
timesincelastevent = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(lasteventtime))
timesincecasestart = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(casestarttime))
midnight = datetime.fromtimestamp(time.mktime(t)).replace(hour=0, minute=0, second=0, microsecond=0)
timesincemidnight = datetime.fromtimestamp(time.mktime(t))-midnight
timediff = 86400 * timesincelastevent.days + timesincelastevent.seconds
timediff2 = 86400 * timesincecasestart.days + timesincecasestart.seconds
timediff3 = timesincemidnight.seconds
timediff4 = datetime.fromtimestamp(time.mktime(t)).weekday()
times.append(timediff)
times2.append(timediff2)
times3.append(timediff3)
times4.append(timediff4)
lasteventtime = t
firstLine = False
indexnick=0
for a in row[3:]:
if len(attributesvalues)<=indexnick:
attributesvalues.append([])
a=a.strip('"')
#todo cast a intero se e intero if
if a!="":
try:
attr=float(a)
attributesvalues[indexnick].append(attr)
#print("float attr")
#print(a)
except:
if a not in attributes_dict[indexnick]:
attributes_dict[indexnick][a]=attributes_sizes[indexnick]+1
attributes_sizes[indexnick]=attributes_sizes[indexnick]+1
attributesvalues[indexnick].append(attributes_dict[indexnick][a])
else:
attributesvalues[indexnick].append(-1)
# if a in attributes_dict[indexnick]:
# attributesvalues.append(attributes_dict[indexnick][a])
# else:
# attributes_dict[indexnick][a]=attributes_sizes[indexnick]
# attributes_sizes[indexnick]+=1
# attributesvalues.append(attributes_dict[indexnick][a])
indexnick+=1
# add last case
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
y_times.extend([times2[-1] - k for k in times2])
for i in xrange(len(attributes)):
attributes[i].append(attributesvalues[i])
numlines+=1
divisor = np.mean([item for sublist in timeseqs for item in sublist])
print('divisor: {}'.format(divisor))
divisor2 = np.mean([item for sublist in timeseqs2 for item in sublist])
print('divisor2: {}'.format(divisor2))
step = 1
sentences = []
softness = 0
next_chars = []
lines = map(lambda x: x + '!', lines)
maxlen = max(map(lambda x: len(x), lines))
chars = map(lambda x: set(x), lines)
chars = list(set().union(*chars))
chars.sort()
target_chars = copy.copy(chars)
chars.remove('!')
lines = map(lambda x: x[:-2], lines)
print('total chars: {}, target chars: {}'.format(len(chars), len(target_chars)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
target_char_indices = dict((c, i) for i, c in enumerate(target_chars))
target_indices_char = dict((i, c) for i, c in enumerate(target_chars))
#print(indices_char)
elems_per_fold = int(round(numlines / 3))
fold1 = lines[:elems_per_fold]
fold1_t = timeseqs[:elems_per_fold]
fold1_t2 = timeseqs2[:elems_per_fold]
fold1_t3 = timeseqs3[:elems_per_fold]
fold1_t4 = timeseqs4[:elems_per_fold]
with open('output_files/folds/' + eventlog + 'fold1.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold1, fold1_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
fold2 = lines[elems_per_fold:2 * elems_per_fold]
fold2_t = timeseqs[elems_per_fold:2 * elems_per_fold]
fold2_t2 = timeseqs2[elems_per_fold:2 * elems_per_fold]
fold2_t3 = timeseqs3[elems_per_fold:2 * elems_per_fold]
fold2_t4 = timeseqs4[elems_per_fold:2 * elems_per_fold]
with open('output_files/folds/' + eventlog + 'fold2.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold2, fold2_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
fold3 = lines[2 * elems_per_fold:]
fold3_t = timeseqs[2 * elems_per_fold:]
fold3_t2 = timeseqs2[2 * elems_per_fold:]
fold3_t3 = timeseqs3[2 * elems_per_fold:]
fold3_t4 = timeseqs4[2 * elems_per_fold:]
fold3_a=[a[2*elems_per_fold:] for a in attributes]
with open('output_files/folds/' + eventlog + 'fold3.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold3, fold3_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
y_t_seq=[]
for line in fold1+fold2:
for i in range(0, len(line), 1):
if i == 0:
continue
y_t_seq.append(y_times[0:i])
divisory = np.mean([item for sublist in y_t_seq for item in sublist])
print('divisory: {}'.format(divisory))
lines = fold3
lines_t = fold3_t
lines_t2 = fold3_t2
lines_t3 = fold3_t3
lines_t4 = fold3_t4
attributes=fold3_a
# set parameters
predict_size = maxlen
# load json and create model
json_file = open('output_files/models/'+fileprefix+'_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("output_files/models/"+fileprefix+"_weights_best.h5")
print("Loaded model from disk")
y_t_seq=[]
# load model, set this to the model generated by train.py
#model = load_model('output_files/models/200_model_59-1.50.h5')
# define helper functions
def encode(ex, sentence, times,times2, times3,times4, sentences_attributes,maxlen=maxlen):
#num_features = len(chars)+5+len(sentences_attributes)
num_features = len(chars) + 5
for idx in xrange(len(attributes)):
num_features += attributes_sizes[idx] + 1
#print(num_features)
X = np.zeros((1, maxlen, num_features), dtype=np.float32)
leftpad = maxlen-len(sentence)
times2 = np.cumsum(times)
#print "sentence",len(sentence)
for t, char in enumerate(sentence):
#print(t)
#midnight = times3[t].replace(hour=0, minute=0, second=0, microsecond=0)
#timesincemidnight = times3[t]-midnight
multiset_abstraction = Counter(sentence[:t+1])
for c in chars:
if c==char:
X[0, t+leftpad, char_indices[c]] = 1
X[0, t+leftpad, len(chars)] = t+1
X[0, t+leftpad, len(chars)+1] = times[t]/divisor
X[0, t+leftpad, len(chars)+2] = times2[t]/divisor2
X[0, t+leftpad, len(chars)+3] = times3[t]/86400
X[0, t+leftpad, len(chars)+4] = times4[t]/7
# for i in xrange(len(sentences_attributes)):
# #print(str(i)+" "+str(t))
# #print(sentences_attributes[i][t])
# #nick check the zero, it is there because it was a list
# X[0, t + leftpad, len(chars) + 5 + i] = sentences_attributes[i][t]
startoh = 0
for j in xrange(len(attributes)):
# X[i, t + leftpad, len(chars) + 5+j]=sentences_attributes[j][i][t]
if attributes_sizes[j] > 0:
X[0, t + leftpad, len(chars) + 5 + startoh + sentences_attributes[j][t]] = 1
else:
X[0, t + leftpad, len(chars) + 5 + startoh] = sentences_attributes[j][t]
startoh += (attributes_sizes[j] + 1)
return X
# # define helper functions
# def encode(sentence, times, times3, sentences_attributes,maxlen=maxlen):
# num_features = len(chars)+5+len(sentences_attributes)
# X = np.zeros((1, maxlen, num_features), dtype=np.float32)
# leftpad = maxlen-len(sentence)
# times2 = np.cumsum(times)
# print "sentence",len(sentence)
# for t, char in enumerate(sentence):
# midnight = times3[t].replace(hour=0, minute=0, second=0, microsecond=0)
# timesincemidnight = times3[t]-midnight
# multiset_abstraction = Counter(sentence[:t+1])
# for c in chars:
# if c==char:
# X[0, t+leftpad, char_indices[c]] = 1
# X[0, t+leftpad, len(chars)] = t+1
# X[0, t+leftpad, len(chars)+1] = times[t]/divisor
# X[0, t+leftpad, len(chars)+2] = times2[t]/divisor2
# X[0, t+leftpad, len(chars)+3] = timesincemidnight.seconds/86400
# X[0, t+leftpad, len(chars)+4] = times3[t].weekday()/7
# for i in xrange(len(sentences_attributes)):
# print(str(i)+" "+str(t))
# print(sentences_attributes[i][t])
# #nick check the zero, it is there because it was a list
# X[0, t + leftpad, len(chars) + 5+i]=sentences_attributes[i][t]
# return X,y
def getSymbol(predictions):
maxPrediction = 0
symbol = ''
i = 0;
for prediction in predictions:
if(prediction>=maxPrediction):
maxPrediction = prediction
symbol = target_indices_char[i]
i += 1
return symbol
one_ahead_gt = []
one_ahead_pred = []
two_ahead_gt = []
two_ahead_pred = []
three_ahead_gt = []
three_ahead_pred = []
y_t_seq=[]
# make predictions
with open('output_files/results/'+fileprefix+'_suffix_and_remaining_time_%s' % eventlog, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(["Prefix length", "Groud truth", "Ground truth times", "Predicted times", "RMSE", "MAE", "Median AE"])
#considering also size 1 prefixes
#for prefix_size in range(1,maxlen):
#print(prefix_size)
#print(len(lines),len(attributes[0]))
for ex, (line, times, times2, times3, times4) in enumerate(izip(lines, lines_t, lines_t2, lines_t3, lines_t3)):
for prefix_size in range(1, len(line)):#aggiunto -1 perche non voglio avere 0 nel ground truth
#print(line,ex,len(line), len(attributes[0][ex]))
times.append(0)
cropped_line = ''.join(line[:prefix_size])
cropped_times = times[:prefix_size]
#print "times_len",len(cropped_times)
cropped_times2 = times2[:prefix_size]
cropped_times4 = times4[:prefix_size]
cropped_times3 = times3[:prefix_size]
cropped_attributes = [[] for i in xrange(len(attributes))]
for j in xrange(len(attributes)):
#print(attributes[j][ex])
cropped_attributes[j].extend(attributes[j][ex][0:prefix_size])
#print cropped_attributes
#y_t_seq.append(y_times[0:prefix_size])
#cropped_attributes= [a[:prefix_size] for a in attributes]
#print cropped_attribute
ground_truth = ''.join(line[prefix_size:prefix_size+predict_size])
ground_truth_t = times2[prefix_size-1] # era -1
#print(prefix_size,len(times2)-1)
case_end_time = times2[len(times2)-1]
ground_truth_t = case_end_time-ground_truth_t
predicted = ''
total_predicted_time = 0
#perform single prediction
enc = encode(ex,cropped_line, cropped_times,cropped_times2, cropped_times3,cropped_times4, cropped_attributes)
y = model.predict(enc, verbose=0) # make predictions
# split predictions into seperate activity and time predictions
#print y
y_t = y[0][0]
#prediction = getSymbol(y_char) # undo one-hot encoding
#cropped_line += prediction
if y_t<0:
y_t=0
cropped_times.append(y_t)
y_t = y_t * divisor
#cropped_times3.append(cropped_times3[-1] + timedelta(seconds=y_t))
total_predicted_time = total_predicted_time + y_t
output = []
if len(ground_truth)>0:
output.append(prefix_size)
output.append(unicode(ground_truth).encode("utf-8"))
output.append(ground_truth_t)
output.append(total_predicted_time)
output.append(metrics.mean_squared_error([ground_truth_t], [total_predicted_time]))
output.append(metrics.mean_absolute_error([ground_truth_t], [total_predicted_time]))
output.append(metrics.median_absolute_error([ground_truth_t], [total_predicted_time]))
spamwriter.writerow(output)
|
gpl-3.0
|
jarodwilson/linux-muck
|
Documentation/conf.py
|
57
|
16596
|
# -*- coding: utf-8 -*-
#
# The Linux Kernel documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 12 13:51:46 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx
# Get Sphinx version
major, minor, patch = map(int, sphinx.__version__.split("."))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinx'))
from load_config import loadConfig
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['kernel-doc', 'rstFlatTable', 'kernel_include', 'cdomain']
# The name of the math extension changed on Sphinx 1.4
if minor > 3:
extensions.append("sphinx.ext.imgmath")
else:
extensions.append("sphinx.ext.pngmath")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'The Linux Kernel'
copyright = '2016, The kernel development community'
author = 'The kernel development community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# In a normal build, version and release are are set to KERNELVERSION and
# KERNELRELEASE, respectively, from the Makefile via Sphinx command line
# arguments.
#
# The following code tries to extract the information by reading the Makefile,
# when Sphinx is run directly (e.g. by Read the Docs).
try:
makefile_version = None
makefile_patchlevel = None
for line in open('../Makefile'):
key, val = [x.strip() for x in line.split('=', 2)]
if key == 'VERSION':
makefile_version = val
elif key == 'PATCHLEVEL':
makefile_patchlevel = val
if makefile_version and makefile_patchlevel:
break
except:
pass
finally:
if makefile_version and makefile_patchlevel:
version = release = makefile_version + '.' + makefile_patchlevel
else:
sys.stderr.write('Warning: Could not extract kernel version\n')
version = release = "unknown version"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['output']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
primary_domain = 'C'
highlight_language = 'guess'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# The Read the Docs theme is available from
# - https://github.com/snide/sphinx_rtd_theme
# - https://pypi.python.org/pypi/sphinx_rtd_theme
# - python-sphinx-rtd-theme package (on Debian)
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
sys.stderr.write('Warning: The Sphinx \'sphinx_rtd_theme\' HTML theme was not found. Make sure you have the theme installed to produce pretty HTML output. Falling back to the default theme.\n')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['sphinx-static']
html_context = {
'css_files': [
'_static/theme_overrides.css',
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TheLinuxKerneldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '8pt',
# Latex figure (float) alignment
#'figure_align': 'htbp',
# Don't mangle with UTF-8 chars
'inputenc': '',
'utf8extra': '',
# Additional stuff for the LaTeX preamble.
'preamble': '''
% Adjust margins
\\usepackage[margin=0.5in, top=1in, bottom=1in]{geometry}
% Allow generate some pages in landscape
\\usepackage{lscape}
% Put notes in color and let them be inside a table
\\definecolor{NoteColor}{RGB}{204,255,255}
\\definecolor{WarningColor}{RGB}{255,204,204}
\\definecolor{AttentionColor}{RGB}{255,255,204}
\\definecolor{OtherColor}{RGB}{204,204,204}
\\newlength{\\mynoticelength}
\\makeatletter\\newenvironment{coloredbox}[1]{%
\\setlength{\\fboxrule}{1pt}
\\setlength{\\fboxsep}{7pt}
\\setlength{\\mynoticelength}{\\linewidth}
\\addtolength{\\mynoticelength}{-2\\fboxsep}
\\addtolength{\\mynoticelength}{-2\\fboxrule}
\\begin{lrbox}{\\@tempboxa}\\begin{minipage}{\\mynoticelength}}{\\end{minipage}\\end{lrbox}%
\\ifthenelse%
{\\equal{\\py@noticetype}{note}}%
{\\colorbox{NoteColor}{\\usebox{\\@tempboxa}}}%
{%
\\ifthenelse%
{\\equal{\\py@noticetype}{warning}}%
{\\colorbox{WarningColor}{\\usebox{\\@tempboxa}}}%
{%
\\ifthenelse%
{\\equal{\\py@noticetype}{attention}}%
{\\colorbox{AttentionColor}{\\usebox{\\@tempboxa}}}%
{\\colorbox{OtherColor}{\\usebox{\\@tempboxa}}}%
}%
}%
}\\makeatother
\\makeatletter
\\renewenvironment{notice}[2]{%
\\def\\py@noticetype{#1}
\\begin{coloredbox}{#1}
\\bf\\it
\\par\\strong{#2}
\\csname py@noticestart@#1\\endcsname
}
{
\\csname py@noticeend@\\py@noticetype\\endcsname
\\end{coloredbox}
}
\\makeatother
% Use some font with UTF-8 support with XeLaTeX
\\usepackage{fontspec}
\\setsansfont{DejaVu Serif}
\\setromanfont{DejaVu Sans}
\\setmonofont{DejaVu Sans Mono}
% To allow adjusting table sizes
\\usepackage{adjustbox}
'''
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('kernel-documentation', 'kernel-documentation.tex', 'The Linux Kernel Documentation',
'The kernel development community', 'manual'),
('development-process/index', 'development-process.tex', 'Linux Kernel Development Documentation',
'The kernel development community', 'manual'),
('gpu/index', 'gpu.tex', 'Linux GPU Driver Developer\'s Guide',
'The kernel development community', 'manual'),
('media/index', 'media.tex', 'Linux Media Subsystem Documentation',
'The kernel development community', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'thelinuxkernel', 'The Linux Kernel Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TheLinuxKernel', 'The Linux Kernel Documentation',
author, 'TheLinuxKernel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
#=======
# rst2pdf
#
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
# See the Sphinx chapter of http://ralsina.me/static/manual.pdf
#
# FIXME: Do not add the index file here; the result will be too big. Adding
# multiple PDF files here actually tries to get the cross-referencing right
# *between* PDF files.
pdf_documents = [
('kernel-documentation', u'Kernel', u'Kernel', u'J. Random Bozo'),
]
# kernel-doc extension configuration for running Sphinx directly (e.g. by Read
# the Docs). In a normal build, these are supplied from the Makefile via command
# line arguments.
kerneldoc_bin = '../scripts/kernel-doc'
kerneldoc_srctree = '..'
# ------------------------------------------------------------------------------
# Since loadConfig overwrites settings from the global namespace, it has to be
# the last statement in the conf.py file
# ------------------------------------------------------------------------------
loadConfig(globals())
|
gpl-2.0
|
prodromou87/gem5
|
src/mem/slicc/ast/MemberExprAST.py
|
86
|
3128
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.ExprAST import ExprAST
class MemberExprAST(ExprAST):
def __init__(self, slicc, expr_ast, field):
super(MemberExprAST, self).__init__(slicc)
self.expr_ast = expr_ast
self.field = field
def __repr__(self):
return "[MemberExprAST: %r.%r]" % (self.expr_ast, self.field)
def generate(self, code):
return_type, gcode = self.expr_ast.inline(True)
fix = code.nofix()
if str(return_type) == "TBE" \
or ("interface" in return_type and
(return_type["interface"] == "AbstractCacheEntry" or
return_type["interface"] == "AbstractEntry")):
code("(*$gcode).m_${{self.field}}")
else:
code("($gcode).m_${{self.field}}")
code.fix(fix)
# Verify that this is a valid field name for this type
if self.field in return_type.data_members:
# Return the type of the field
return return_type.data_members[self.field].type
else:
if "interface" in return_type:
interface_type = self.symtab.find(return_type["interface"]);
if interface_type != None:
if self.field in interface_type.data_members:
# Return the type of the field
return interface_type.data_members[self.field].type
self.error("Invalid object field: " +
"Type '%s' does not have data member %s" % \
(return_type, self.field))
|
bsd-3-clause
|
xin3liang/platform_external_chromium_org
|
tools/idl_parser/idl_parser_test.py
|
176
|
3689
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import unittest
from idl_lexer import IDLLexer
from idl_parser import IDLParser, ParseFile
from idl_ppapi_lexer import IDLPPAPILexer
from idl_ppapi_parser import IDLPPAPIParser
def ParseCommentTest(comment):
comment = comment.strip()
comments = comment.split(None, 1)
return comments[0], comments[1]
class WebIDLParser(unittest.TestCase):
def setUp(self):
self.parser = IDLParser(IDLLexer(), mute_error=True)
self.filenames = glob.glob('test_parser/*_web.idl')
def _TestNode(self, node):
comments = node.GetListOf('Comment')
for comment in comments:
check, value = ParseCommentTest(comment.GetName())
if check == 'BUILD':
msg = 'Expecting %s, but found %s.\n' % (value, str(node))
self.assertEqual(value, str(node), msg)
if check == 'ERROR':
msg = node.GetLogLine('Expecting\n\t%s\nbut found \n\t%s\n' % (
value, str(node)))
self.assertEqual(value, node.GetName(), msg)
if check == 'PROP':
key, expect = value.split('=')
actual = str(node.GetProperty(key))
msg = 'Mismatched property %s: %s vs %s.\n' % (key, expect, actual)
self.assertEqual(expect, actual, msg)
if check == 'TREE':
quick = '\n'.join(node.Tree())
lineno = node.GetProperty('LINENO')
msg = 'Mismatched tree at line %d:\n%sVS\n%s' % (lineno, value, quick)
self.assertEqual(value, quick, msg)
def testExpectedNodes(self):
for filename in self.filenames:
filenode = ParseFile(self.parser, filename)
children = filenode.GetChildren()
self.assertTrue(len(children) > 2, 'Expecting children in %s.' %
filename)
for node in filenode.GetChildren()[2:]:
self._TestNode(node)
class PepperIDLParser(unittest.TestCase):
def setUp(self):
self.parser = IDLPPAPIParser(IDLPPAPILexer(), mute_error=True)
self.filenames = glob.glob('test_parser/*_ppapi.idl')
def _TestNode(self, filename, node):
comments = node.GetListOf('Comment')
for comment in comments:
check, value = ParseCommentTest(comment.GetName())
if check == 'BUILD':
msg = '%s - Expecting %s, but found %s.\n' % (
filename, value, str(node))
self.assertEqual(value, str(node), msg)
if check == 'ERROR':
msg = node.GetLogLine('%s - Expecting\n\t%s\nbut found \n\t%s\n' % (
filename, value, str(node)))
self.assertEqual(value, node.GetName(), msg)
if check == 'PROP':
key, expect = value.split('=')
actual = str(node.GetProperty(key))
msg = '%s - Mismatched property %s: %s vs %s.\n' % (
filename, key, expect, actual)
self.assertEqual(expect, actual, msg)
if check == 'TREE':
quick = '\n'.join(node.Tree())
lineno = node.GetProperty('LINENO')
msg = '%s - Mismatched tree at line %d:\n%sVS\n%s' % (
filename, lineno, value, quick)
self.assertEqual(value, quick, msg)
def testExpectedNodes(self):
for filename in self.filenames:
filenode = ParseFile(self.parser, filename)
children = filenode.GetChildren()
self.assertTrue(len(children) > 2, 'Expecting children in %s.' %
filename)
for node in filenode.GetChildren()[2:]:
self._TestNode(filename, node)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
bsd-3-clause
|
shoyer/xarray
|
xarray/tests/test_variable.py
|
1
|
87655
|
import warnings
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import pytz
from xarray import Coordinate, Dataset, IndexVariable, Variable, set_options
from xarray.core import dtypes, duck_array_ops, indexing
from xarray.core.common import full_like, ones_like, zeros_like
from xarray.core.indexing import (
BasicIndexer,
CopyOnWriteArray,
DaskIndexingAdapter,
LazilyOuterIndexedArray,
MemoryCachedArray,
NumpyIndexingAdapter,
OuterIndexer,
PandasIndexAdapter,
VectorizedIndexer,
)
from xarray.core.pycompat import dask_array_type
from xarray.core.utils import NDArrayMixin
from xarray.core.variable import as_compatible_data, as_variable
from xarray.tests import requires_bottleneck
from . import (
assert_allclose,
assert_array_equal,
assert_equal,
assert_identical,
raises_regex,
requires_dask,
requires_sparse,
source_ndarray,
)
_PAD_XR_NP_ARGS = [
[{"x": (2, 1)}, ((2, 1), (0, 0), (0, 0))],
[{"x": 1}, ((1, 1), (0, 0), (0, 0))],
[{"y": (0, 3)}, ((0, 0), (0, 3), (0, 0))],
[{"x": (3, 1), "z": (2, 0)}, ((3, 1), (0, 0), (2, 0))],
[{"x": (3, 1), "z": 2}, ((3, 1), (0, 0), (2, 2))],
]
class VariableSubclassobjects:
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(["time"], data, {"foo": "bar"})
assert v.dims == ("time",)
assert_array_equal(v.values, data)
assert v.dtype == float
assert v.shape == (10,)
assert v.size == 10
assert v.sizes == {"time": 10}
assert v.nbytes == 80
assert v.ndim == 1
assert len(v) == 10
assert v.attrs == {"foo": "bar"}
def test_attrs(self):
v = self.cls(["time"], 0.5 * np.arange(10))
assert v.attrs == {}
attrs = {"foo": "bar"}
v.attrs = attrs
assert v.attrs == attrs
assert isinstance(v.attrs, dict)
v.attrs["foo"] = "baz"
assert v.attrs["foo"] == "baz"
def test_getitem_dict(self):
v = self.cls(["x"], np.random.randn(5))
actual = v[{"x": 0}]
expected = v[0]
assert_identical(expected, actual)
def test_getitem_1d(self):
data = np.array([0, 1, 2])
v = self.cls(["x"], data)
v_new = v[dict(x=[0, 1])]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data[[0, 1]])
v_new = v[dict(x=slice(None))]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data)
v_new = v[dict(x=Variable("a", [0, 1]))]
assert v_new.dims == ("a",)
assert_array_equal(v_new, data[[0, 1]])
v_new = v[dict(x=1)]
assert v_new.dims == ()
assert_array_equal(v_new, data[1])
# tuple argument
v_new = v[slice(None)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data)
def test_getitem_1d_fancy(self):
v = self.cls(["x"], [0, 1, 2])
# 1d-variable should be indexable by multi-dimensional Variable
ind = Variable(("a", "b"), [[0, 1], [0, 1]])
v_new = v[ind]
assert v_new.dims == ("a", "b")
expected = np.array(v._data)[([0, 1], [0, 1]), ...]
assert_array_equal(v_new, expected)
# boolean indexing
ind = Variable(("x",), [True, False, True])
v_new = v[ind]
assert_identical(v[[0, 2]], v_new)
v_new = v[[True, False, True]]
assert_identical(v[[0, 2]], v_new)
with raises_regex(IndexError, "Boolean indexer should"):
ind = Variable(("a",), [True, False, True])
v[ind]
def test_getitem_with_mask(self):
v = self.cls(["x"], [0, 1, 2])
assert_identical(v._getitem_with_mask(-1), Variable((), np.nan))
assert_identical(
v._getitem_with_mask([0, -1, 1]), self.cls(["x"], [0, np.nan, 1])
)
assert_identical(v._getitem_with_mask(slice(2)), self.cls(["x"], [0, 1]))
assert_identical(
v._getitem_with_mask([0, -1, 1], fill_value=-99),
self.cls(["x"], [0, -99, 1]),
)
def test_getitem_with_mask_size_zero(self):
v = self.cls(["x"], [])
assert_identical(v._getitem_with_mask(-1), Variable((), np.nan))
assert_identical(
v._getitem_with_mask([-1, -1, -1]),
self.cls(["x"], [np.nan, np.nan, np.nan]),
)
def test_getitem_with_mask_nd_indexer(self):
v = self.cls(["x"], [0, 1, 2])
indexer = Variable(("x", "y"), [[0, -1], [-1, 2]])
assert_identical(v._getitem_with_mask(indexer, fill_value=-1), indexer)
def _assertIndexedLikeNDArray(self, variable, expected_value0, expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
assert variable[0].shape == ()
assert variable[0].ndim == 0
assert variable[0].size == 1
# test identity
assert variable.equals(variable.copy())
assert variable.identical(variable.copy())
# check value is equal for both ndarray and Variable
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "In the future, 'NAT == x'")
np.testing.assert_equal(variable.values[0], expected_value0)
np.testing.assert_equal(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
assert type(variable.values[0]) == type(expected_value0)
assert type(variable[0].values) == type(expected_value0)
elif expected_dtype is not False:
assert variable.values[0].dtype == expected_dtype
assert variable[0].values.dtype == expected_dtype
def test_index_0d_int(self):
for value, dtype in [(0, np.int_), (np.int32(0), np.int32)]:
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_), (np.float32(0.5), np.float32)]:
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
value = "foo"
dtype = np.dtype("U3")
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(["x"], [d])
self._assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(["x"], [np.datetime64(d)])
self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[ns]")
x = self.cls(["x"], pd.DatetimeIndex([d]))
self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[ns]")
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(["x"], [np.timedelta64(td)])
self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]")
x = self.cls(["x"], pd.to_timedelta([td]))
self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]")
def test_index_0d_not_a_time(self):
d = np.datetime64("NaT", "ns")
x = self.cls(["x"], [d])
self._assertIndexedLikeNDArray(x, d)
def test_index_0d_object(self):
class HashableItemWrapper:
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return "{}(item={!r})".format(type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls("x", [item])
self._assertIndexedLikeNDArray(x, item, expected_dtype=False)
def test_0d_object_array_with_list(self):
listarray = np.empty((1,), dtype=object)
listarray[0] = [1, 2, 3]
x = self.cls("x", listarray)
assert_array_equal(x.data, listarray)
assert_array_equal(x[0].data, listarray.squeeze())
assert_array_equal(x.squeeze().data, listarray.squeeze())
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range("2011-09-01", periods=10)
for dates in [date_range, date_range.values, date_range.to_pydatetime()]:
expected = self.cls("t", dates)
for times in [
[expected[i] for i in range(10)],
[expected[i : (i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)],
]:
actual = Variable.concat(times, "t")
assert expected.dtype == actual.dtype
assert_array_equal(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls("time", pd.date_range("2000-01-01", periods=5))
expected = np.datetime64("2000-01-01", "ns")
assert x[0].values == expected
def test_datetime64_conversion(self):
times = pd.date_range("2000-01-01", periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype("datetime64[s]"), False),
(times.to_pydatetime(), False),
]:
v = self.cls(["t"], values)
assert v.dtype == np.dtype("datetime64[ns]")
assert_array_equal(v.values, times.values)
assert v.values.dtype == np.dtype("datetime64[ns]")
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype("timedelta64[s]"), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(["t"], values)
assert v.dtype == np.dtype("timedelta64[ns]")
assert_array_equal(v.values, times.values)
assert v.values.dtype == np.dtype("timedelta64[ns]")
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls("x", data)
assert actual.dtype == data.dtype
def test_pandas_data(self):
v = self.cls(["x"], pd.Series([0, 1, 2], index=[3, 2, 1]))
assert_identical(v, v[[0, 1, 2]])
v = self.cls(["x"], pd.Index([0, 1, 2]))
assert v[0].values == v.values[0]
def test_pandas_period_index(self):
v = self.cls(["x"], pd.period_range(start="2000", periods=20, freq="B"))
v = v.load() # for dask-based Variable
assert v[0] == pd.Period("2000", freq="B")
assert "Period('2000-01-03', 'B')" in repr(v)
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
# should we need `.to_base_variable()`?
# probably a break that `+v` changes type?
v = self.cls(["x"], x)
base_v = v.to_base_variable()
# unary ops
assert_identical(base_v, +v)
assert_identical(base_v, abs(v))
assert_array_equal((-v).values, -x)
# binary ops with numbers
assert_identical(base_v, v + 0)
assert_identical(base_v, 0 + v)
assert_identical(base_v, v * 1)
# binary ops with numpy arrays
assert_array_equal((v * x).values, x ** 2)
assert_array_equal((x * v).values, x ** 2)
assert_array_equal(v - y, v - 1)
assert_array_equal(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(["x"], x, {"units": "meters"})
assert_identical(base_v, +v2)
# binary ops with all variables
assert_array_equal(v + v, 2 * v)
w = self.cls(["x"], y, {"foo": "bar"})
assert_identical(v + w, self.cls(["x"], x + y).to_base_variable())
assert_array_equal((v * w).values, x * y)
# something complicated
assert_array_equal((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
assert float == (+v).dtype
assert float == (+v).values.dtype
assert float == (0 + v).dtype
assert float == (0 + v).values.dtype
# check types of returned data
assert isinstance(+v, Variable)
assert not isinstance(+v, IndexVariable)
assert isinstance(0 + v, Variable)
assert not isinstance(0 + v, IndexVariable)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(["x"], x)
actual = v.sum()
expected = Variable((), 10)
assert_identical(expected, actual)
assert type(actual) is Variable
def test_array_interface(self):
x = np.arange(5)
v = self.cls(["x"], x)
assert_array_equal(np.asarray(v), x)
# test patched in methods
assert_array_equal(v.astype(float), x.astype(float))
# think this is a break, that argsort changes the type
assert_identical(v.argsort(), v.to_base_variable())
assert_identical(v.clip(2, 3), self.cls("x", x.clip(2, 3)).to_base_variable())
# test ufuncs
assert_identical(np.sin(v), self.cls(["x"], np.sin(x)).to_base_variable())
assert isinstance(np.sin(v), Variable)
assert not isinstance(np.sin(v), IndexVariable)
def example_1d_objects(self):
for data in [
range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
pd.date_range("2000-01-01", periods=3),
np.array(["a", "b", "c"], dtype=object),
]:
yield (self.cls("x", data), data)
def test___array__(self):
for v, data in self.example_1d_objects():
assert_array_equal(v.values, np.asarray(data))
assert_array_equal(np.asarray(v), np.asarray(data))
assert v[0].values == np.asarray(data)[0]
assert np.asarray(v[0]) == np.asarray(data)[0]
def test_equals_all_dtypes(self):
for v, _ in self.example_1d_objects():
v2 = v.copy()
assert v.equals(v2)
assert v.identical(v2)
assert v.no_conflicts(v2)
assert v[0].equals(v2[0])
assert v[0].identical(v2[0])
assert v[0].no_conflicts(v2[0])
assert v[:2].equals(v2[:2])
assert v[:2].identical(v2[:2])
assert v[:2].no_conflicts(v2[:2])
def test_eq_all_dtypes(self):
# ensure that we don't choke on comparisons for which numpy returns
# scalars
expected = Variable("x", 3 * [False])
for v, _ in self.example_1d_objects():
actual = "z" == v
assert_identical(expected, actual)
actual = ~("z" != v)
assert_identical(expected, actual)
def test_encoding_preserved(self):
expected = self.cls("x", range(3), {"foo": 1}, {"bar": 2})
for actual in [
expected.T,
expected[...],
expected.squeeze(),
expected.isel(x=slice(None)),
expected.set_dims({"x": 3}),
expected.copy(deep=True),
expected.copy(deep=False),
]:
assert_identical(expected.to_base_variable(), actual.to_base_variable())
assert expected.encoding == actual.encoding
def test_concat(self):
x = np.arange(5)
y = np.arange(5, 10)
v = self.cls(["a"], x)
w = self.cls(["a"], y)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat([v, w], "b")
)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b")
)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b")
)
with raises_regex(ValueError, "Variable has dimensions"):
Variable.concat([v, Variable(["c"], y)], "b")
# test indexers
actual = Variable.concat(
[v, w], positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)], dim="a"
)
expected = Variable("a", np.array([x, y]).ravel(order="F"))
assert_identical(expected, actual)
# test concatenating along a dimension
v = Variable(["time", "x"], np.random.random((10, 8)))
assert_identical(v, Variable.concat([v[:5], v[5:]], "time"))
assert_identical(v, Variable.concat([v[:5], v[5:6], v[6:]], "time"))
assert_identical(v, Variable.concat([v[:1], v[1:]], "time"))
# test dimension order
assert_identical(v, Variable.concat([v[:, :5], v[:, 5:]], "x"))
with raises_regex(ValueError, "all input arrays must have"):
Variable.concat([v[:, 0], v[:, 1:]], "x")
def test_concat_attrs(self):
# always keep attrs from first variable
v = self.cls("a", np.arange(5), {"foo": "bar"})
w = self.cls("a", np.ones(5))
expected = self.cls(
"a", np.concatenate([np.arange(5), np.ones(5)])
).to_base_variable()
expected.attrs["foo"] = "bar"
assert_identical(expected, Variable.concat([v, w], "a"))
def test_concat_fixed_len_str(self):
# regression test for #217
for kind in ["S", "U"]:
x = self.cls("animal", np.array(["horse"], dtype=kind))
y = self.cls("animal", np.array(["aardvark"], dtype=kind))
actual = Variable.concat([x, y], "animal")
expected = Variable("animal", np.array(["horse", "aardvark"], dtype=kind))
assert_equal(expected, actual)
def test_concat_number_strings(self):
# regression test for #305
a = self.cls("x", ["0", "1", "2"])
b = self.cls("x", ["3", "4"])
actual = Variable.concat([a, b], dim="x")
expected = Variable("x", np.arange(5).astype(str))
assert_identical(expected, actual)
assert actual.dtype.kind == expected.dtype.kind
def test_concat_mixed_dtypes(self):
a = self.cls("x", [0, 1])
b = self.cls("x", ["two"])
actual = Variable.concat([a, b], dim="x")
expected = Variable("x", np.array([0, 1, "two"], dtype=object))
assert_identical(expected, actual)
assert actual.dtype == object
@pytest.mark.parametrize("deep", [True, False])
@pytest.mark.parametrize("astype", [float, int, str])
def test_copy(self, deep, astype):
v = self.cls("x", (0.5 * np.arange(10)).astype(astype), {"foo": "bar"})
w = v.copy(deep=deep)
assert type(v) is type(w)
assert_identical(v, w)
assert v.dtype == w.dtype
if self.cls is Variable:
if deep:
assert source_ndarray(v.values) is not source_ndarray(w.values)
else:
assert source_ndarray(v.values) is source_ndarray(w.values)
assert_identical(v, copy(v))
def test_copy_index(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
v = self.cls("x", midx)
for deep in [True, False]:
w = v.copy(deep=deep)
assert isinstance(w._data, PandasIndexAdapter)
assert isinstance(w.to_index(), pd.MultiIndex)
assert_array_equal(v._data.array, w._data.array)
def test_copy_with_data(self):
orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
new_data = np.array([[2.5, 5.0], [7.1, 43]])
actual = orig.copy(data=new_data)
expected = orig.copy()
expected.data = new_data
assert_identical(expected, actual)
def test_copy_with_data_errors(self):
orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
new_data = [2.5, 5.0]
with raises_regex(ValueError, "must match shape of object"):
orig.copy(data=new_data)
def test_copy_index_with_data(self):
orig = IndexVariable("x", np.arange(5))
new_data = np.arange(5, 10)
actual = orig.copy(data=new_data)
expected = IndexVariable("x", np.arange(5, 10))
assert_identical(expected, actual)
def test_copy_index_with_data_errors(self):
orig = IndexVariable("x", np.arange(5))
new_data = np.arange(5, 20)
with raises_regex(ValueError, "must match shape of object"):
orig.copy(data=new_data)
with raises_regex(ValueError, "Cannot assign to the .data"):
orig.data = new_data
with raises_regex(ValueError, "Cannot assign to the .values"):
orig.values = new_data
def test_replace(self):
var = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
result = var._replace()
assert_identical(result, var)
new_data = np.arange(4).reshape(2, 2)
result = var._replace(data=new_data)
assert_array_equal(result.data, new_data)
def test_real_and_imag(self):
v = self.cls("x", np.arange(3) - 1j * np.arange(3), {"foo": "bar"})
expected_re = self.cls("x", np.arange(3), {"foo": "bar"})
assert_identical(v.real, expected_re)
expected_im = self.cls("x", -np.arange(3), {"foo": "bar"})
assert_identical(v.imag, expected_im)
expected_abs = self.cls("x", np.sqrt(2 * np.arange(3) ** 2)).to_base_variable()
assert_allclose(abs(v), expected_abs)
def test_aggregate_complex(self):
# should skip NaNs
v = self.cls("x", [1, 2j, np.nan])
expected = Variable((), 0.5 + 1j)
assert_allclose(v.mean(), expected)
def test_pandas_cateogrical_dtype(self):
data = pd.Categorical(np.arange(10, dtype="int64"))
v = self.cls("x", data)
print(v) # should not error
assert v.dtype == "int64"
def test_pandas_datetime64_with_tz(self):
data = pd.date_range(
start="2000-01-01",
tz=pytz.timezone("America/New_York"),
periods=10,
freq="1h",
)
v = self.cls("x", data)
print(v) # should not error
if "America/New_York" in str(data.dtype):
# pandas is new enough that it has datetime64 with timezone dtype
assert v.dtype == "object"
def test_multiindex(self):
idx = pd.MultiIndex.from_product([list("abc"), [0, 1]])
v = self.cls("x", idx)
assert_identical(Variable((), ("a", 0)), v[0])
assert_identical(v, v[:])
def test_load(self):
array = self.cls("x", np.arange(5))
orig_data = array._data
copied = array.copy(deep=True)
if array.chunks is None:
array.load()
assert type(array._data) is type(orig_data)
assert type(copied._data) is type(orig_data)
assert_identical(array, copied)
def test_getitem_advanced(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
# orthogonal indexing
v_new = v[([0, 1], [1, 0])]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[[0, 1]][:, [1, 0]])
v_new = v[[0, 1]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[[0, 1]])
# with mixed arguments
ind = Variable(["a"], [0, 1])
v_new = v[dict(x=[0, 1], y=ind)]
assert v_new.dims == ("x", "a")
assert_array_equal(v_new, v_data[[0, 1]][:, [0, 1]])
# boolean indexing
v_new = v[dict(x=[True, False], y=[False, True, False])]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[0][1])
# with scalar variable
ind = Variable((), 2)
v_new = v[dict(y=ind)]
expected = v[dict(y=2)]
assert_array_equal(v_new, expected)
# with boolean variable with wrong shape
ind = np.array([True, False])
with raises_regex(IndexError, "Boolean array size 2 is "):
v[Variable(("a", "b"), [[0, 1]]), ind]
# boolean indexing with different dimension
ind = Variable(["a"], [True, False, False])
with raises_regex(IndexError, "Boolean indexer should be"):
v[dict(y=ind)]
def test_getitem_uint_1d(self):
# regression test for #1405
v = self.cls(["x"], [0, 1, 2])
v_data = v.compute().data
v_new = v[np.array([0])]
assert_array_equal(v_new, v_data[0])
v_new = v[np.array([0], dtype="uint64")]
assert_array_equal(v_new, v_data[0])
def test_getitem_uint(self):
# regression test for #1405
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
v_new = v[np.array([0])]
assert_array_equal(v_new, v_data[[0], :])
v_new = v[np.array([0], dtype="uint64")]
assert_array_equal(v_new, v_data[[0], :])
v_new = v[np.uint64(0)]
assert_array_equal(v_new, v_data[0, :])
def test_getitem_0d_array(self):
# make sure 0d-np.array can be used as an indexer
v = self.cls(["x"], [0, 1, 2])
v_data = v.compute().data
v_new = v[np.array([0])[0]]
assert_array_equal(v_new, v_data[0])
v_new = v[np.array(0)]
assert_array_equal(v_new, v_data[0])
v_new = v[Variable((), np.array(0))]
assert_array_equal(v_new, v_data[0])
def test_getitem_fancy(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
ind = Variable(["a", "b"], [[0, 1, 1], [1, 1, 0]])
v_new = v[ind]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :])
# It would be ok if indexed with the multi-dimensional array including
# the same name
ind = Variable(["x", "b"], [[0, 1, 1], [1, 1, 0]])
v_new = v[ind]
assert v_new.dims == ("x", "b", "y")
assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :])
ind = Variable(["a", "b"], [[0, 1, 2], [2, 1, 0]])
v_new = v[dict(y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[:, ([0, 1, 2], [2, 1, 0])])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=[1, 0], y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[[1, 0]][:, ind])
# along diagonal
ind = Variable(["a"], [0, 1])
v_new = v[ind, ind]
assert v_new.dims == ("a",)
assert_array_equal(v_new, v_data[[0, 1], [0, 1]])
# with integer
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=0, y=ind)]
assert v_new.dims == ("a", "b")
assert_array_equal(v_new[0], v_data[0][[0, 0]])
assert_array_equal(v_new[1], v_data[0][[1, 1]])
# with slice
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=slice(None), y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[:, [[0, 0], [1, 1]]])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=ind, y=slice(None))]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], :])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=ind, y=slice(None, 1))]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], slice(None, 1)])
# slice matches explicit dimension
ind = Variable(["y"], [0, 1])
v_new = v[ind, :2]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v_data[[0, 1], [0, 1]])
# with multiple slices
v = self.cls(["x", "y", "z"], [[[1, 2, 3], [4, 5, 6]]])
ind = Variable(["a", "b"], [[0]])
v_new = v[ind, :, :]
expected = Variable(["a", "b", "y", "z"], v.data[np.newaxis, ...])
assert_identical(v_new, expected)
v = Variable(["w", "x", "y", "z"], [[[[1, 2, 3], [4, 5, 6]]]])
ind = Variable(["y"], [0])
v_new = v[ind, :, 1:2, 2]
expected = Variable(["y", "x"], [[6]])
assert_identical(v_new, expected)
# slice and vector mixed indexing resulting in the same dimension
v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5))
ind = Variable(["x"], [0, 1, 2])
v_new = v[:, ind]
expected = Variable(("x", "z"), np.zeros((3, 5)))
expected[0] = v.data[0, 0]
expected[1] = v.data[1, 1]
expected[2] = v.data[2, 2]
assert_identical(v_new, expected)
v_new = v[:, ind.data]
assert v_new.shape == (3, 3, 5)
def test_getitem_error(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
with raises_regex(IndexError, "labeled multi-"):
v[[[0, 1], [1, 2]]]
ind_x = Variable(["a"], [0, 1, 1])
ind_y = Variable(["a"], [0, 1])
with raises_regex(IndexError, "Dimensions of indexers "):
v[ind_x, ind_y]
ind = Variable(["a", "b"], [[True, False], [False, True]])
with raises_regex(IndexError, "2-dimensional boolean"):
v[dict(x=ind)]
v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5))
ind = Variable(["x"], [0, 1])
with raises_regex(IndexError, "Dimensions of indexers mis"):
v[:, ind]
@pytest.mark.parametrize(
"mode",
[
"mean",
pytest.param(
"median",
marks=pytest.mark.xfail(reason="median is not implemented by Dask"),
),
pytest.param(
"reflect", marks=pytest.mark.xfail(reason="dask.array.pad bug")
),
"edge",
pytest.param(
"linear_ramp",
marks=pytest.mark.xfail(
reason="pint bug: https://github.com/hgrecco/pint/issues/1026"
),
),
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad(self, mode, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(mode=mode, **xr_arg)
expected = np.pad(data, np_arg, mode=mode)
assert_array_equal(actual, expected)
assert isinstance(actual._data, type(v._data))
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad_constant_values(self, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(**xr_arg)
expected = np.pad(
np.array(v.data.astype(float)),
np_arg,
mode="constant",
constant_values=np.nan,
)
assert_array_equal(actual, expected)
assert isinstance(actual._data, type(v._data))
# for the boolean array, we pad False
data = np.full_like(data, False, dtype=bool).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(mode="constant", constant_values=False, **xr_arg)
expected = np.pad(
np.array(v.data), np_arg, mode="constant", constant_values=False
)
assert_array_equal(actual, expected)
def test_rolling_window(self):
# Just a working test. See test_nputils for the algorithm validation
v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2))
for (d, w) in [("x", 3), ("y", 5)]:
v_rolling = v.rolling_window(d, w, d + "_window")
assert v_rolling.dims == ("x", "y", "z", d + "_window")
assert v_rolling.shape == v.shape + (w,)
v_rolling = v.rolling_window(d, w, d + "_window", center=True)
assert v_rolling.dims == ("x", "y", "z", d + "_window")
assert v_rolling.shape == v.shape + (w,)
# dask and numpy result should be the same
v_loaded = v.load().rolling_window(d, w, d + "_window", center=True)
assert_array_equal(v_rolling, v_loaded)
# numpy backend should not be over-written
if isinstance(v._data, np.ndarray):
with pytest.raises(ValueError):
v_loaded[0] = 1.0
class TestVariable(VariableSubclassobjects):
cls = staticmethod(Variable)
@pytest.fixture(autouse=True)
def setup(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def test_data_and_values(self):
v = Variable(["time", "x"], self.d)
assert_array_equal(v.data, self.d)
assert_array_equal(v.values, self.d)
assert source_ndarray(v.values) is self.d
with pytest.raises(ValueError):
# wrong size
v.values = np.random.random(5)
d2 = np.random.random((10, 3))
v.values = d2
assert source_ndarray(v.values) is d2
d3 = np.random.random((10, 3))
v.data = d3
assert source_ndarray(v.data) is d3
def test_numpy_same_methods(self):
v = Variable([], np.float32(0.0))
assert v.item() == 0
assert type(v.item()) is float
v = IndexVariable("x", np.arange(5))
assert 2 == v.searchsorted(2)
def test_datetime64_conversion_scalar(self):
expected = np.datetime64("2000-01-01", "ns")
for values in [
np.datetime64("2000-01-01"),
pd.Timestamp("2000-01-01T00"),
datetime(2000, 1, 1),
]:
v = Variable([], values)
assert v.dtype == np.dtype("datetime64[ns]")
assert v.values == expected
assert v.values.dtype == np.dtype("datetime64[ns]")
def test_timedelta64_conversion_scalar(self):
expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, "ns")
for values in [
np.timedelta64(1, "D"),
pd.Timedelta("1 day"),
timedelta(days=1),
]:
v = Variable([], values)
assert v.dtype == np.dtype("timedelta64[ns]")
assert v.values == expected
assert v.values.dtype == np.dtype("timedelta64[ns]")
def test_0d_str(self):
v = Variable([], "foo")
assert v.dtype == np.dtype("U3")
assert v.values == "foo"
v = Variable([], np.string_("foo"))
assert v.dtype == np.dtype("S3")
assert v.values == bytes("foo", "ascii")
def test_0d_datetime(self):
v = Variable([], pd.Timestamp("2000-01-01"))
assert v.dtype == np.dtype("datetime64[ns]")
assert v.values == np.datetime64("2000-01-01", "ns")
def test_0d_timedelta(self):
for td in [pd.to_timedelta("1s"), np.timedelta64(1, "s")]:
v = Variable([], td)
assert v.dtype == np.dtype("timedelta64[ns]")
assert v.values == np.timedelta64(10 ** 9, "ns")
def test_equals_and_identical(self):
d = np.random.rand(10, 3)
d[0, 0] = np.nan
v1 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]})
v2 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]})
assert v1.equals(v2)
assert v1.identical(v2)
v3 = Variable(("dim1", "dim3"), data=d)
assert not v1.equals(v3)
v4 = Variable(("dim1", "dim2"), data=d)
assert v1.equals(v4)
assert not v1.identical(v4)
v5 = deepcopy(v1)
v5.values[:] = np.random.rand(10, 3)
assert not v1.equals(v5)
assert not v1.equals(None)
assert not v1.equals(d)
assert not v1.identical(None)
assert not v1.identical(d)
def test_broadcast_equals(self):
v1 = Variable((), np.nan)
v2 = Variable(("x"), [np.nan, np.nan])
assert v1.broadcast_equals(v2)
assert not v1.equals(v2)
assert not v1.identical(v2)
v3 = Variable(("x"), [np.nan])
assert v1.broadcast_equals(v3)
assert not v1.equals(v3)
assert not v1.identical(v3)
assert not v1.broadcast_equals(None)
v4 = Variable(("x"), [np.nan] * 3)
assert not v2.broadcast_equals(v4)
def test_no_conflicts(self):
v1 = Variable(("x"), [1, 2, np.nan, np.nan])
v2 = Variable(("x"), [np.nan, 2, 3, np.nan])
assert v1.no_conflicts(v2)
assert not v1.equals(v2)
assert not v1.broadcast_equals(v2)
assert not v1.identical(v2)
assert not v1.no_conflicts(None)
v3 = Variable(("y"), [np.nan, 2, 3, np.nan])
assert not v3.no_conflicts(v1)
d = np.array([1, 2, np.nan, np.nan])
assert not v1.no_conflicts(d)
assert not v2.no_conflicts(d)
v4 = Variable(("w", "x"), [d])
assert v1.no_conflicts(v4)
def test_as_variable(self):
data = np.arange(10)
expected = Variable("x", data)
expected_extra = Variable(
"x", data, attrs={"myattr": "val"}, encoding={"scale_factor": 1}
)
assert_identical(expected, as_variable(expected))
ds = Dataset({"x": expected})
var = as_variable(ds["x"]).to_base_variable()
assert_identical(expected, var)
assert not isinstance(ds["x"], Variable)
assert isinstance(as_variable(ds["x"]), Variable)
xarray_tuple = (
expected_extra.dims,
expected_extra.values,
expected_extra.attrs,
expected_extra.encoding,
)
assert_identical(expected_extra, as_variable(xarray_tuple))
with raises_regex(TypeError, "tuple of form"):
as_variable(tuple(data))
with raises_regex(ValueError, "tuple of form"): # GH1016
as_variable(("five", "six", "seven"))
with raises_regex(TypeError, "without an explicit list of dimensions"):
as_variable(data)
actual = as_variable(data, name="x")
assert_identical(expected.to_index_variable(), actual)
actual = as_variable(0)
expected = Variable([], 0)
assert_identical(expected, actual)
data = np.arange(9).reshape((3, 3))
expected = Variable(("x", "y"), data)
with raises_regex(ValueError, "without explicit dimension names"):
as_variable(data, name="x")
with raises_regex(ValueError, "has more than 1-dimension"):
as_variable(expected, name="x")
# test datetime, timedelta conversion
dt = np.array([datetime(1999, 1, 1) + timedelta(days=x) for x in range(10)])
assert as_variable(dt, "time").dtype.kind == "M"
td = np.array([timedelta(days=x) for x in range(10)])
assert as_variable(td, "time").dtype.kind == "m"
def test_repr(self):
v = Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"})
expected = dedent(
"""
<xarray.Variable (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Attributes:
foo: bar
"""
).strip()
assert expected == repr(v)
def test_repr_lazy_data(self):
v = Variable("x", LazilyOuterIndexedArray(np.arange(2e5)))
assert "200000 values with dtype" in repr(v)
assert isinstance(v._data, LazilyOuterIndexedArray)
def test_detect_indexer_type(self):
""" Tests indexer type was correctly detected. """
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
_, ind, _ = v._broadcast_indexes((0, 1))
assert type(ind) == indexing.BasicIndexer
_, ind, _ = v._broadcast_indexes((0, slice(0, 8, 2)))
assert type(ind) == indexing.BasicIndexer
_, ind, _ = v._broadcast_indexes((0, [0, 1]))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], 1))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], [1, 2]))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], slice(0, 8, 2)))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("a",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, slice(0, 8, 2)))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("y",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, 3))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("a",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, vind))
assert type(ind) == indexing.VectorizedIndexer
vind = Variable(("a", "b"), [[0, 2], [1, 3]])
_, ind, _ = v._broadcast_indexes((vind, 3))
assert type(ind) == indexing.VectorizedIndexer
def test_indexer_type(self):
# GH:issue:1688. Wrong indexer type induces NotImplementedError
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
def assert_indexer_type(key, object_type):
dims, index_tuple, new_order = v._broadcast_indexes(key)
assert isinstance(index_tuple, object_type)
# should return BasicIndexer
assert_indexer_type((0, 1), BasicIndexer)
assert_indexer_type((0, slice(None, None)), BasicIndexer)
assert_indexer_type((Variable([], 3), slice(None, None)), BasicIndexer)
assert_indexer_type((Variable([], 3), (Variable([], 6))), BasicIndexer)
# should return OuterIndexer
assert_indexer_type(([0, 1], 1), OuterIndexer)
assert_indexer_type(([0, 1], [1, 2]), OuterIndexer)
assert_indexer_type((Variable(("x"), [0, 1]), 1), OuterIndexer)
assert_indexer_type((Variable(("x"), [0, 1]), slice(None, None)), OuterIndexer)
assert_indexer_type(
(Variable(("x"), [0, 1]), Variable(("y"), [0, 1])), OuterIndexer
)
# should return VectorizedIndexer
assert_indexer_type((Variable(("y"), [0, 1]), [0, 1]), VectorizedIndexer)
assert_indexer_type(
(Variable(("z"), [0, 1]), Variable(("z"), [0, 1])), VectorizedIndexer
)
assert_indexer_type(
(
Variable(("a", "b"), [[0, 1], [1, 2]]),
Variable(("a", "b"), [[0, 1], [1, 2]]),
),
VectorizedIndexer,
)
def test_items(self):
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
# test slicing
assert_identical(v, v[:])
assert_identical(v, v[...])
assert_identical(Variable(["y"], data[0]), v[0])
assert_identical(Variable(["x"], data[:, 0]), v[:, 0])
assert_identical(Variable(["x", "y"], data[:3, :2]), v[:3, :2])
# test array indexing
x = Variable(["x"], np.arange(10))
y = Variable(["y"], np.arange(11))
assert_identical(v, v[x.values])
assert_identical(v, v[x])
assert_identical(v[:3], v[x < 3])
assert_identical(v[:, 3:], v[:, y >= 3])
assert_identical(v[:3, 3:], v[x < 3, y >= 3])
assert_identical(v[:3, :2], v[x[:3], y[:2]])
assert_identical(v[:3, :2], v[range(3), range(2)])
# test iteration
for n, item in enumerate(v):
assert_identical(Variable(["y"], data[n]), item)
with raises_regex(TypeError, "iteration over a 0-d"):
iter(Variable([], 0))
# test setting
v.values[:] = 0
assert np.all(v.values == 0)
# test orthogonal setting
v[range(10), range(11)] = 1
assert_array_equal(v.values, np.ones((10, 11)))
def test_getitem_basic(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
# int argument
v_new = v[0]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
# slice argument
v_new = v[:2]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[:2])
# list arguments
v_new = v[[0]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[[0]])
v_new = v[[]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[[]])
# dict arguments
v_new = v[dict(x=0)]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
v_new = v[dict(x=0, y=slice(None))]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
v_new = v[dict(x=0, y=1)]
assert v_new.dims == ()
assert_array_equal(v_new, v._data[0, 1])
v_new = v[dict(y=1)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, v._data[:, 1])
# tuple argument
v_new = v[(slice(None), 1)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, v._data[:, 1])
# test that we obtain a modifiable view when taking a 0d slice
v_new = v[0, 0]
v_new[...] += 99
assert_array_equal(v_new, v._data[0, 0])
def test_getitem_with_mask_2d_input(self):
v = Variable(("x", "y"), [[0, 1, 2], [3, 4, 5]])
assert_identical(
v._getitem_with_mask(([-1, 0], [1, -1])),
Variable(("x", "y"), [[np.nan, np.nan], [1, np.nan]]),
)
assert_identical(v._getitem_with_mask((slice(2), [0, 1, 2])), v)
def test_isel(self):
v = Variable(["time", "x"], self.d)
assert_identical(v.isel(time=slice(None)), v)
assert_identical(v.isel(time=0), v[0])
assert_identical(v.isel(time=slice(0, 3)), v[:3])
assert_identical(v.isel(x=0), v[:, 0])
assert_identical(v.isel(x=[0, 2]), v[:, [0, 2]])
assert_identical(v.isel(time=[]), v[[]])
with raises_regex(
ValueError,
r"dimensions {'not_a_dim'} do not exist. Expected one or more of "
r"\('time', 'x'\)",
):
v.isel(not_a_dim=0)
with pytest.warns(
UserWarning,
match=r"dimensions {'not_a_dim'} do not exist. Expected one or more of "
r"\('time', 'x'\)",
):
v.isel(not_a_dim=0, missing_dims="warn")
assert_identical(v, v.isel(not_a_dim=0, missing_dims="ignore"))
def test_index_0d_numpy_string(self):
# regression test to verify our work around for indexing 0d strings
v = Variable([], np.string_("asdf"))
assert_identical(v[()], v)
v = Variable([], np.unicode_("asdf"))
assert_identical(v[()], v)
def test_indexing_0d_unicode(self):
# regression test for GH568
actual = Variable(("x"), ["tmax"])[0][()]
expected = Variable((), "tmax")
assert_identical(actual, expected)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0])
def test_shift(self, fill_value):
v = Variable("x", [1, 2, 3, 4, 5])
assert_identical(v, v.shift(x=0))
assert v is not v.shift(x=0)
expected = Variable("x", [np.nan, np.nan, 1, 2, 3])
assert_identical(expected, v.shift(x=2))
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_exp = np.nan
else:
fill_value_exp = fill_value
expected = Variable("x", [fill_value_exp, 1, 2, 3, 4])
assert_identical(expected, v.shift(x=1, fill_value=fill_value))
expected = Variable("x", [2, 3, 4, 5, fill_value_exp])
assert_identical(expected, v.shift(x=-1, fill_value=fill_value))
expected = Variable("x", [fill_value_exp] * 5)
assert_identical(expected, v.shift(x=5, fill_value=fill_value))
assert_identical(expected, v.shift(x=6, fill_value=fill_value))
with raises_regex(ValueError, "dimension"):
v.shift(z=0)
v = Variable("x", [1, 2, 3, 4, 5], {"foo": "bar"})
assert_identical(v, v.shift(x=0))
expected = Variable("x", [fill_value_exp, 1, 2, 3, 4], {"foo": "bar"})
assert_identical(expected, v.shift(x=1, fill_value=fill_value))
def test_shift2d(self):
v = Variable(("x", "y"), [[1, 2], [3, 4]])
expected = Variable(("x", "y"), [[np.nan, np.nan], [np.nan, 1]])
assert_identical(expected, v.shift(x=1, y=1))
def test_roll(self):
v = Variable("x", [1, 2, 3, 4, 5])
assert_identical(v, v.roll(x=0))
assert v is not v.roll(x=0)
expected = Variable("x", [5, 1, 2, 3, 4])
assert_identical(expected, v.roll(x=1))
assert_identical(expected, v.roll(x=-4))
assert_identical(expected, v.roll(x=6))
expected = Variable("x", [4, 5, 1, 2, 3])
assert_identical(expected, v.roll(x=2))
assert_identical(expected, v.roll(x=-3))
with raises_regex(ValueError, "dimension"):
v.roll(z=0)
def test_roll_consistency(self):
v = Variable(("x", "y"), np.random.randn(5, 6))
for axis, dim in [(0, "x"), (1, "y")]:
for shift in [-3, 0, 1, 7, 11]:
expected = np.roll(v.values, shift, axis=axis)
actual = v.roll(**{dim: shift}).values
assert_array_equal(expected, actual)
def test_transpose(self):
v = Variable(["time", "x"], self.d)
v2 = Variable(["x", "time"], self.d.T)
assert_identical(v, v2.transpose())
assert_identical(v.transpose(), v.T)
x = np.random.randn(2, 3, 4, 5)
w = Variable(["a", "b", "c", "d"], x)
w2 = Variable(["d", "b", "c", "a"], np.einsum("abcd->dbca", x))
assert w2.shape == (5, 3, 4, 2)
assert_identical(w2, w.transpose("d", "b", "c", "a"))
assert_identical(w2, w.transpose("d", ..., "a"))
assert_identical(w2, w.transpose("d", "b", "c", ...))
assert_identical(w2, w.transpose(..., "b", "c", "a"))
assert_identical(w, w2.transpose("a", "b", "c", "d"))
w3 = Variable(["b", "c", "d", "a"], np.einsum("abcd->bcda", x))
assert_identical(w, w3.transpose("a", "b", "c", "d"))
def test_transpose_0d(self):
for value in [
3.5,
("a", 1),
np.datetime64("2000-01-01"),
np.timedelta64(1, "h"),
None,
object(),
]:
variable = Variable([], value)
actual = variable.transpose()
assert actual.identical(variable)
def test_squeeze(self):
v = Variable(["x", "y"], [[1]])
assert_identical(Variable([], 1), v.squeeze())
assert_identical(Variable(["y"], [1]), v.squeeze("x"))
assert_identical(Variable(["y"], [1]), v.squeeze(["x"]))
assert_identical(Variable(["x"], [1]), v.squeeze("y"))
assert_identical(Variable([], 1), v.squeeze(["x", "y"]))
v = Variable(["x", "y"], [[1, 2]])
assert_identical(Variable(["y"], [1, 2]), v.squeeze())
assert_identical(Variable(["y"], [1, 2]), v.squeeze("x"))
with raises_regex(ValueError, "cannot select a dimension"):
v.squeeze("y")
def test_get_axis_num(self):
v = Variable(["x", "y", "z"], np.random.randn(2, 3, 4))
assert v.get_axis_num("x") == 0
assert v.get_axis_num(["x"]) == (0,)
assert v.get_axis_num(["x", "y"]) == (0, 1)
assert v.get_axis_num(["z", "y", "x"]) == (2, 1, 0)
with raises_regex(ValueError, "not found in array dim"):
v.get_axis_num("foobar")
def test_set_dims(self):
v = Variable(["x"], [0, 1])
actual = v.set_dims(["x", "y"])
expected = Variable(["x", "y"], [[0], [1]])
assert_identical(actual, expected)
actual = v.set_dims(["y", "x"])
assert_identical(actual, expected.T)
actual = v.set_dims({"x": 2, "y": 2})
expected = Variable(["x", "y"], [[0, 0], [1, 1]])
assert_identical(actual, expected)
v = Variable(["foo"], [0, 1])
actual = v.set_dims("foo")
expected = v
assert_identical(actual, expected)
with raises_regex(ValueError, "must be a superset"):
v.set_dims(["z"])
def test_set_dims_object_dtype(self):
v = Variable([], ("a", 1))
actual = v.set_dims(("x",), (3,))
exp_values = np.empty((3,), dtype=object)
for i in range(3):
exp_values[i] = ("a", 1)
expected = Variable(["x"], exp_values)
assert actual.identical(expected)
def test_stack(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
actual = v.stack(z=("x", "y"))
expected = Variable("z", [0, 1, 2, 3], v.attrs)
assert_identical(actual, expected)
actual = v.stack(z=("x",))
expected = Variable(("y", "z"), v.data.T, v.attrs)
assert_identical(actual, expected)
actual = v.stack(z=())
assert_identical(actual, v)
actual = v.stack(X=("x",), Y=("y",)).transpose("X", "Y")
expected = Variable(("X", "Y"), v.data, v.attrs)
assert_identical(actual, expected)
def test_stack_errors(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
with raises_regex(ValueError, "invalid existing dim"):
v.stack(z=("x1",))
with raises_regex(ValueError, "cannot create a new dim"):
v.stack(x=("x",))
def test_unstack(self):
v = Variable("z", [0, 1, 2, 3], {"foo": "bar"})
actual = v.unstack(z={"x": 2, "y": 2})
expected = Variable(("x", "y"), [[0, 1], [2, 3]], v.attrs)
assert_identical(actual, expected)
actual = v.unstack(z={"x": 4, "y": 1})
expected = Variable(("x", "y"), [[0], [1], [2], [3]], v.attrs)
assert_identical(actual, expected)
actual = v.unstack(z={"x": 4})
expected = Variable("x", [0, 1, 2, 3], v.attrs)
assert_identical(actual, expected)
def test_unstack_errors(self):
v = Variable("z", [0, 1, 2, 3])
with raises_regex(ValueError, "invalid existing dim"):
v.unstack(foo={"x": 4})
with raises_regex(ValueError, "cannot create a new dim"):
v.stack(z=("z",))
with raises_regex(ValueError, "the product of the new dim"):
v.unstack(z={"x": 5})
def test_unstack_2d(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]])
actual = v.unstack(y={"z": 2})
expected = Variable(["x", "z"], v.data)
assert_identical(actual, expected)
actual = v.unstack(x={"z": 2})
expected = Variable(["y", "z"], v.data.T)
assert_identical(actual, expected)
def test_stack_unstack_consistency(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]])
actual = v.stack(z=("x", "y")).unstack(z={"x": 2, "y": 2})
assert_identical(actual, v)
def test_broadcasting_math(self):
x = np.random.randn(2, 3)
v = Variable(["a", "b"], x)
# 1d to 2d broadcasting
assert_identical(v * v, Variable(["a", "b"], np.einsum("ab,ab->ab", x, x)))
assert_identical(v * v[0], Variable(["a", "b"], np.einsum("ab,b->ab", x, x[0])))
assert_identical(v[0] * v, Variable(["b", "a"], np.einsum("b,ab->ba", x[0], x)))
assert_identical(
v[0] * v[:, 0], Variable(["b", "a"], np.einsum("b,a->ba", x[0], x[:, 0]))
)
# higher dim broadcasting
y = np.random.randn(3, 4, 5)
w = Variable(["b", "c", "d"], y)
assert_identical(
v * w, Variable(["a", "b", "c", "d"], np.einsum("ab,bcd->abcd", x, y))
)
assert_identical(
w * v, Variable(["b", "c", "d", "a"], np.einsum("bcd,ab->bcda", y, x))
)
assert_identical(
v * w[0], Variable(["a", "b", "c", "d"], np.einsum("ab,cd->abcd", x, y[0]))
)
def test_broadcasting_failures(self):
a = Variable(["x"], np.arange(10))
b = Variable(["x"], np.arange(5))
c = Variable(["x", "x"], np.arange(100).reshape(10, 10))
with raises_regex(ValueError, "mismatched lengths"):
a + b
with raises_regex(ValueError, "duplicate dimensions"):
a + c
def test_inplace_math(self):
x = np.arange(5)
v = Variable(["x"], x)
v2 = v
v2 += 1
assert v is v2
# since we provided an ndarray for data, it is also modified in-place
assert source_ndarray(v.values) is x
assert_array_equal(v.values, np.arange(5) + 1)
with raises_regex(ValueError, "dimensions cannot change"):
v += Variable("y", np.arange(5))
def test_reduce(self):
v = Variable(["x", "y"], self.d, {"ignored": "attributes"})
assert_identical(v.reduce(np.std, "x"), Variable(["y"], self.d.std(axis=0)))
assert_identical(v.reduce(np.std, axis=0), v.reduce(np.std, dim="x"))
assert_identical(
v.reduce(np.std, ["y", "x"]), Variable([], self.d.std(axis=(0, 1)))
)
assert_identical(v.reduce(np.std), Variable([], self.d.std()))
assert_identical(
v.reduce(np.mean, "x").reduce(np.std, "y"),
Variable([], self.d.mean(axis=0).std()),
)
assert_allclose(v.mean("x"), v.reduce(np.mean, "x"))
with raises_regex(ValueError, "cannot supply both"):
v.mean(dim="x", axis=0)
with pytest.warns(DeprecationWarning, match="allow_lazy is deprecated"):
v.mean(dim="x", allow_lazy=True)
with pytest.warns(DeprecationWarning, match="allow_lazy is deprecated"):
v.mean(dim="x", allow_lazy=False)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
@pytest.mark.parametrize(
"axis, dim", zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]])
)
def test_quantile(self, q, axis, dim, skipna):
v = Variable(["x", "y"], self.d)
actual = v.quantile(q, dim=dim, skipna=skipna)
_percentile_func = np.nanpercentile if skipna else np.percentile
expected = _percentile_func(self.d, np.array(q) * 100, axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
@pytest.mark.parametrize("axis, dim", [[1, "y"], [[1], ["y"]]])
def test_quantile_dask(self, q, axis, dim):
v = Variable(["x", "y"], self.d).chunk({"x": 2})
actual = v.quantile(q, dim=dim)
assert isinstance(actual.data, dask_array_type)
expected = np.nanpercentile(self.d, np.array(q) * 100, axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
def test_quantile_chunked_dim_error(self):
v = Variable(["x", "y"], self.d).chunk({"x": 2})
with raises_regex(ValueError, "dimension 'x'"):
v.quantile(0.5, dim="x")
@pytest.mark.parametrize("q", [-0.1, 1.1, [2], [0.25, 2]])
def test_quantile_out_of_bounds(self, q):
v = Variable(["x", "y"], self.d)
# escape special characters
with raises_regex(ValueError, r"Quantiles must be in the range \[0, 1\]"):
v.quantile(q, dim="x")
@requires_dask
@requires_bottleneck
def test_rank_dask_raises(self):
v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0]).chunk(2)
with raises_regex(TypeError, "arrays stored as dask"):
v.rank("x")
@requires_bottleneck
def test_rank(self):
import bottleneck as bn
# floats
v = Variable(["x", "y"], [[3, 4, np.nan, 1]])
expect_0 = bn.nanrankdata(v.data, axis=0)
expect_1 = bn.nanrankdata(v.data, axis=1)
np.testing.assert_allclose(v.rank("x").values, expect_0)
np.testing.assert_allclose(v.rank("y").values, expect_1)
# int
v = Variable(["x"], [3, 2, 1])
expect = bn.rankdata(v.data, axis=0)
np.testing.assert_allclose(v.rank("x").values, expect)
# str
v = Variable(["x"], ["c", "b", "a"])
expect = bn.rankdata(v.data, axis=0)
np.testing.assert_allclose(v.rank("x").values, expect)
# pct
v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0])
v_expect = Variable(["x"], [0.75, 0.25, np.nan, 0.5, 1.0])
assert_equal(v.rank("x", pct=True), v_expect)
# invalid dim
with raises_regex(ValueError, "not found"):
v.rank("y")
def test_big_endian_reduce(self):
# regression test for GH489
data = np.ones(5, dtype=">f4")
v = Variable(["x"], data)
expected = Variable([], 5)
assert_identical(expected, v.sum())
def test_reduce_funcs(self):
v = Variable("x", np.array([1, np.nan, 2, 3]))
assert_identical(v.mean(), Variable([], 2))
assert_identical(v.mean(skipna=True), Variable([], 2))
assert_identical(v.mean(skipna=False), Variable([], np.nan))
assert_identical(np.mean(v), Variable([], 2))
assert_identical(v.prod(), Variable([], 6))
assert_identical(v.cumsum(axis=0), Variable("x", np.array([1, 1, 3, 6])))
assert_identical(v.cumprod(axis=0), Variable("x", np.array([1, 1, 2, 6])))
assert_identical(v.var(), Variable([], 2.0 / 3))
assert_identical(v.median(), Variable([], 2))
v = Variable("x", [True, False, False])
assert_identical(v.any(), Variable([], True))
assert_identical(v.all(dim="x"), Variable([], False))
v = Variable("t", pd.date_range("2000-01-01", periods=3))
assert v.argmax(skipna=True) == 2
assert_identical(v.max(), Variable([], pd.Timestamp("2000-01-03")))
def test_reduce_keepdims(self):
v = Variable(["x", "y"], self.d)
assert_identical(
v.mean(keepdims=True), Variable(v.dims, np.mean(self.d, keepdims=True))
)
assert_identical(
v.mean(dim="x", keepdims=True),
Variable(v.dims, np.mean(self.d, axis=0, keepdims=True)),
)
assert_identical(
v.mean(dim="y", keepdims=True),
Variable(v.dims, np.mean(self.d, axis=1, keepdims=True)),
)
assert_identical(
v.mean(dim=["y", "x"], keepdims=True),
Variable(v.dims, np.mean(self.d, axis=(1, 0), keepdims=True)),
)
v = Variable([], 1.0)
assert_identical(
v.mean(keepdims=True), Variable([], np.mean(v.data, keepdims=True))
)
@requires_dask
def test_reduce_keepdims_dask(self):
import dask.array
v = Variable(["x", "y"], self.d).chunk()
actual = v.mean(keepdims=True)
assert isinstance(actual.data, dask.array.Array)
expected = Variable(v.dims, np.mean(self.d, keepdims=True))
assert_identical(actual, expected)
actual = v.mean(dim="y", keepdims=True)
assert isinstance(actual.data, dask.array.Array)
expected = Variable(v.dims, np.mean(self.d, axis=1, keepdims=True))
assert_identical(actual, expected)
def test_reduce_keep_attrs(self):
_attrs = {"units": "test", "long_name": "testing"}
v = Variable(["x", "y"], self.d, _attrs)
# Test dropped attrs
vm = v.mean()
assert len(vm.attrs) == 0
assert vm.attrs == {}
# Test kept attrs
vm = v.mean(keep_attrs=True)
assert len(vm.attrs) == len(_attrs)
assert vm.attrs == _attrs
def test_binary_ops_keep_attrs(self):
_attrs = {"units": "test", "long_name": "testing"}
a = Variable(["x", "y"], np.random.randn(3, 3), _attrs)
b = Variable(["x", "y"], np.random.randn(3, 3), _attrs)
# Test dropped attrs
d = a - b # just one operation
assert d.attrs == {}
# Test kept attrs
with set_options(keep_attrs=True):
d = a - b
assert d.attrs == _attrs
def test_count(self):
expected = Variable([], 3)
actual = Variable(["x"], [1, 2, 3, np.nan]).count()
assert_identical(expected, actual)
v = Variable(["x"], np.array(["1", "2", "3", np.nan], dtype=object))
actual = v.count()
assert_identical(expected, actual)
actual = Variable(["x"], [True, False, True]).count()
assert_identical(expected, actual)
assert actual.dtype == int
expected = Variable(["x"], [2, 3])
actual = Variable(["x", "y"], [[1, 0, np.nan], [1, 1, 1]]).count("y")
assert_identical(expected, actual)
def test_setitem(self):
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[0, 1] = 1
assert v[0, 1] == 1
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[0, 1])] = 1
assert_array_equal(v[[0, 1]], np.ones_like(v[[0, 1]]))
# boolean indexing
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[True, False])] = 1
assert_array_equal(v[0], np.ones_like(v[0]))
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[True, False], y=[False, True, False])] = 1
assert v[0, 1] == 1
def test_setitem_fancy(self):
# assignment which should work as np.ndarray does
def assert_assigned_2d(array, key_x, key_y, values):
expected = array.copy()
expected[key_x, key_y] = values
v = Variable(["x", "y"], array)
v[dict(x=key_x, y=key_y)] = values
assert_array_equal(expected, v)
# 1d vectorized indexing
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=0,
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=Variable((), 0),
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=Variable(("a"), [3, 2]),
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=slice(None),
key_y=Variable(["a"], [0, 1]),
values=Variable(("a"), [3, 2]),
)
# 2d-vectorized indexing
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a", "b"], [[0, 1]]),
key_y=Variable(["a", "b"], [[1, 0]]),
values=0,
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a", "b"], [[0, 1]]),
key_y=Variable(["a", "b"], [[1, 0]]),
values=[0],
)
assert_assigned_2d(
np.random.randn(5, 4),
key_x=Variable(["a", "b"], [[0, 1], [2, 3]]),
key_y=Variable(["a", "b"], [[1, 0], [3, 3]]),
values=[2, 3],
)
# vindex with slice
v = Variable(["x", "y", "z"], np.ones((4, 3, 2)))
ind = Variable(["a"], [0, 1])
v[dict(x=ind, z=ind)] = 0
expected = Variable(["x", "y", "z"], np.ones((4, 3, 2)))
expected[0, :, 0] = 0
expected[1, :, 1] = 0
assert_identical(expected, v)
# dimension broadcast
v = Variable(["x", "y"], np.ones((3, 2)))
ind = Variable(["a", "b"], [[0, 1]])
v[ind, :] = 0
expected = Variable(["x", "y"], [[0, 0], [0, 0], [1, 1]])
assert_identical(expected, v)
with raises_regex(ValueError, "shape mismatch"):
v[ind, ind] = np.zeros((1, 2, 1))
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
ind = Variable(["a"], [0, 1])
v[dict(x=ind)] = Variable(["a", "y"], np.ones((2, 3), dtype=int) * 10)
assert_array_equal(v[0], np.ones_like(v[0]) * 10)
assert_array_equal(v[1], np.ones_like(v[1]) * 10)
assert v.dims == ("x", "y") # dimension should not change
# increment
v = Variable(["x", "y"], np.arange(6).reshape(3, 2))
ind = Variable(["a"], [0, 1])
v[dict(x=ind)] += 1
expected = Variable(["x", "y"], [[1, 2], [3, 4], [4, 5]])
assert_identical(v, expected)
ind = Variable(["a"], [0, 0])
v[dict(x=ind)] += 1
expected = Variable(["x", "y"], [[2, 3], [3, 4], [4, 5]])
assert_identical(v, expected)
def test_coarsen(self):
v = self.cls(["x"], [0, 1, 2, 3, 4])
actual = v.coarsen({"x": 2}, boundary="pad", func="mean")
expected = self.cls(["x"], [0.5, 2.5, 4])
assert_identical(actual, expected)
actual = v.coarsen({"x": 2}, func="mean", boundary="pad", side="right")
expected = self.cls(["x"], [0, 1.5, 3.5])
assert_identical(actual, expected)
actual = v.coarsen({"x": 2}, func=np.mean, side="right", boundary="trim")
expected = self.cls(["x"], [1.5, 3.5])
assert_identical(actual, expected)
# working test
v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2))
for windows, func, side, boundary in [
({"x": 2}, np.mean, "left", "trim"),
({"x": 2}, np.median, {"x": "left"}, "pad"),
({"x": 2, "y": 3}, np.max, "left", {"x": "pad", "y": "trim"}),
]:
v.coarsen(windows, func, boundary, side)
def test_coarsen_2d(self):
# 2d-mean should be the same with the successive 1d-mean
v = self.cls(["x", "y"], np.arange(6 * 12).reshape(6, 12))
actual = v.coarsen({"x": 3, "y": 4}, func="mean")
expected = v.coarsen({"x": 3}, func="mean").coarsen({"y": 4}, func="mean")
assert_equal(actual, expected)
v = self.cls(["x", "y"], np.arange(7 * 12).reshape(7, 12))
actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim")
expected = v.coarsen({"x": 3}, func="mean", boundary="trim").coarsen(
{"y": 4}, func="mean", boundary="trim"
)
assert_equal(actual, expected)
# if there is nan, the two should be different
v = self.cls(["x", "y"], 1.0 * np.arange(6 * 12).reshape(6, 12))
v[2, 4] = np.nan
v[3, 5] = np.nan
actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim")
expected = (
v.coarsen({"x": 3}, func="sum", boundary="trim").coarsen(
{"y": 4}, func="sum", boundary="trim"
)
/ 12
)
assert not actual.equals(expected)
# adjusting the nan count
expected[0, 1] *= 12 / 11
expected[1, 1] *= 12 / 11
assert_allclose(actual, expected)
v = self.cls(("x", "y"), np.arange(4 * 4, dtype=np.float32).reshape(4, 4))
actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact")
expected = self.cls(("x", "y"), 4 * np.ones((2, 2)))
assert_equal(actual, expected)
v[0, 0] = np.nan
v[-1, -1] = np.nan
expected[0, 0] = 3
expected[-1, -1] = 3
actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact")
assert_equal(actual, expected)
actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=False)
expected = self.cls(("x", "y"), [[np.nan, 18], [42, np.nan]])
assert_equal(actual, expected)
actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=True)
expected = self.cls(("x", "y"), [[10, 18], [42, 35]])
assert_equal(actual, expected)
# perhaps @pytest.mark.parametrize("operation", [f for f in duck_array_ops])
def test_coarsen_keep_attrs(self, operation="mean"):
_attrs = {"units": "test", "long_name": "testing"}
test_func = getattr(duck_array_ops, operation, None)
# Test dropped attrs
with set_options(keep_attrs=False):
new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen(
windows={"coord": 1}, func=test_func, boundary="exact", side="left"
)
assert new.attrs == {}
# Test kept attrs
with set_options(keep_attrs=True):
new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen(
windows={"coord": 1}, func=test_func, boundary="exact", side="left"
)
assert new.attrs == _attrs
@requires_dask
class TestVariableWithDask(VariableSubclassobjects):
cls = staticmethod(lambda *args: Variable(*args).chunk())
@pytest.mark.xfail
def test_0d_object_array_with_list(self):
super().test_0d_object_array_with_list()
@pytest.mark.xfail
def test_array_interface(self):
# dask array does not have `argsort`
super().test_array_interface()
@pytest.mark.xfail
def test_copy_index(self):
super().test_copy_index()
@pytest.mark.xfail
def test_eq_all_dtypes(self):
super().test_eq_all_dtypes()
def test_getitem_fancy(self):
super().test_getitem_fancy()
def test_getitem_1d_fancy(self):
super().test_getitem_1d_fancy()
def test_getitem_with_mask_nd_indexer(self):
import dask.array as da
v = Variable(["x"], da.arange(3, chunks=3))
indexer = Variable(("x", "y"), [[0, -1], [-1, 2]])
assert_identical(
v._getitem_with_mask(indexer, fill_value=-1),
self.cls(("x", "y"), [[0, -1], [-1, 2]]),
)
@requires_sparse
class TestVariableWithSparse:
# TODO inherit VariableSubclassobjects to cover more tests
def test_as_sparse(self):
data = np.arange(12).reshape(3, 4)
var = Variable(("x", "y"), data)._as_sparse(fill_value=-1)
actual = var._to_dense()
assert_identical(var, actual)
class TestIndexVariable(VariableSubclassobjects):
cls = staticmethod(IndexVariable)
def test_init(self):
with raises_regex(ValueError, "must be 1-dimensional"):
IndexVariable((), 0)
def test_to_index(self):
data = 0.5 * np.arange(10)
v = IndexVariable(["time"], data, {"foo": "bar"})
assert pd.Index(data, name="time").identical(v.to_index())
def test_multiindex_default_level_names(self):
midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]])
v = IndexVariable(["x"], midx, {"foo": "bar"})
assert v.to_index().names == ("x_level_0", "x_level_1")
def test_data(self):
x = IndexVariable("x", np.arange(3.0))
assert isinstance(x._data, PandasIndexAdapter)
assert isinstance(x.data, np.ndarray)
assert float == x.dtype
assert_array_equal(np.arange(3), x)
assert float == x.values.dtype
with raises_regex(TypeError, "cannot be modified"):
x[:] = 0
def test_name(self):
coord = IndexVariable("x", [10.0])
assert coord.name == "x"
with pytest.raises(AttributeError):
coord.name = "y"
def test_level_names(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["level_1", "level_2"]
)
x = IndexVariable("x", midx)
assert x.level_names == midx.names
assert IndexVariable("y", [10.0]).level_names is None
def test_get_level_variable(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["level_1", "level_2"]
)
x = IndexVariable("x", midx)
level_1 = IndexVariable("x", midx.get_level_values("level_1"))
assert_identical(x.get_level_variable("level_1"), level_1)
with raises_regex(ValueError, "has no MultiIndex"):
IndexVariable("y", [10.0]).get_level_variable("level")
def test_concat_periods(self):
periods = pd.period_range("2000-01-01", periods=10)
coords = [IndexVariable("t", periods[:5]), IndexVariable("t", periods[5:])]
expected = IndexVariable("t", periods)
actual = IndexVariable.concat(coords, dim="t")
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
positions = [list(range(5)), list(range(5, 10))]
actual = IndexVariable.concat(coords, dim="t", positions=positions)
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
def test_concat_multiindex(self):
idx = pd.MultiIndex.from_product([[0, 1, 2], ["a", "b"]])
coords = [IndexVariable("x", idx[:2]), IndexVariable("x", idx[2:])]
expected = IndexVariable("x", idx)
actual = IndexVariable.concat(coords, dim="x")
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.MultiIndex)
def test_coordinate_alias(self):
with pytest.warns(Warning, match="deprecated"):
x = Coordinate("x", [1, 2, 3])
assert isinstance(x, IndexVariable)
def test_datetime64(self):
# GH:1932 Make sure indexing keeps precision
t = np.array([1518418799999986560, 1518418799999996560], dtype="datetime64[ns]")
v = IndexVariable("t", t)
assert v[0].data == t[0]
# These tests make use of multi-dimensional variables, which are not valid
# IndexVariable objects:
@pytest.mark.xfail
def test_getitem_error(self):
super().test_getitem_error()
@pytest.mark.xfail
def test_getitem_advanced(self):
super().test_getitem_advanced()
@pytest.mark.xfail
def test_getitem_fancy(self):
super().test_getitem_fancy()
@pytest.mark.xfail
def test_getitem_uint(self):
super().test_getitem_fancy()
@pytest.mark.xfail
@pytest.mark.parametrize(
"mode",
[
"mean",
"median",
"reflect",
"edge",
"linear_ramp",
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad(self, mode, xr_arg, np_arg):
super().test_pad(mode, xr_arg, np_arg)
@pytest.mark.xfail
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad_constant_values(self, xr_arg, np_arg):
super().test_pad_constant_values(xr_arg, np_arg)
@pytest.mark.xfail
def test_rolling_window(self):
super().test_rolling_window()
@pytest.mark.xfail
def test_coarsen_2d(self):
super().test_coarsen_2d()
class TestAsCompatibleData:
def test_unchanged_types(self):
types = (np.asarray, PandasIndexAdapter, LazilyOuterIndexedArray)
for t in types:
for data in [
np.arange(3),
pd.date_range("2000-01-01", periods=3),
pd.date_range("2000-01-01", periods=3).values,
]:
x = t(data)
assert source_ndarray(x) is source_ndarray(as_compatible_data(x))
def test_converted_types(self):
for input_array in [[[0, 1, 2]], pd.DataFrame([[0, 1, 2]])]:
actual = as_compatible_data(input_array)
assert_array_equal(np.asarray(input_array), actual)
assert np.ndarray == type(actual)
assert np.asarray(input_array).dtype == actual.dtype
def test_masked_array(self):
original = np.ma.MaskedArray(np.arange(5))
expected = np.arange(5)
actual = as_compatible_data(original)
assert_array_equal(expected, actual)
assert np.dtype(int) == actual.dtype
original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
expected = np.arange(5.0)
expected[-1] = np.nan
actual = as_compatible_data(original)
assert_array_equal(expected, actual)
assert np.dtype(float) == actual.dtype
def test_datetime(self):
expected = np.datetime64("2000-01-01")
actual = as_compatible_data(expected)
assert expected == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
expected = np.array([np.datetime64("2000-01-01")])
actual = as_compatible_data(expected)
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
expected = np.array([np.datetime64("2000-01-01", "ns")])
actual = as_compatible_data(expected)
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
assert expected is source_ndarray(np.asarray(actual))
expected = np.datetime64("2000-01-01", "ns")
actual = as_compatible_data(datetime(2000, 1, 1))
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
def test_full_like(self):
# For more thorough tests, see test_variable.py
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
expect = orig.copy(deep=True)
expect.values = [[2.0, 2.0], [2.0, 2.0]]
assert_identical(expect, full_like(orig, 2))
# override dtype
expect.values = [[True, True], [True, True]]
assert expect.dtype == bool
assert_identical(expect, full_like(orig, True, dtype=bool))
# raise error on non-scalar fill_value
with raises_regex(ValueError, "must be scalar"):
full_like(orig, [1.0, 2.0])
@requires_dask
def test_full_like_dask(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
).chunk(((1, 1), (2,)))
def check(actual, expect_dtype, expect_values):
assert actual.dtype == expect_dtype
assert actual.shape == orig.shape
assert actual.dims == orig.dims
assert actual.attrs == orig.attrs
assert actual.chunks == orig.chunks
assert_array_equal(actual.values, expect_values)
check(full_like(orig, 2), orig.dtype, np.full_like(orig.values, 2))
# override dtype
check(
full_like(orig, True, dtype=bool),
bool,
np.full_like(orig.values, True, dtype=bool),
)
# Check that there's no array stored inside dask
# (e.g. we didn't create a numpy array and then we chunked it!)
dsk = full_like(orig, 1).data.dask
for v in dsk.values():
if isinstance(v, tuple):
for vi in v:
assert not isinstance(vi, np.ndarray)
else:
assert not isinstance(v, np.ndarray)
def test_zeros_like(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
assert_identical(zeros_like(orig), full_like(orig, 0))
assert_identical(zeros_like(orig, dtype=int), full_like(orig, 0, dtype=int))
def test_ones_like(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
assert_identical(ones_like(orig), full_like(orig, 1))
assert_identical(ones_like(orig, dtype=int), full_like(orig, 1, dtype=int))
def test_unsupported_type(self):
# Non indexable type
class CustomArray(NDArrayMixin):
def __init__(self, array):
self.array = array
class CustomIndexable(CustomArray, indexing.ExplicitlyIndexed):
pass
array = CustomArray(np.arange(3))
orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"})
assert isinstance(orig._data, np.ndarray) # should not be CustomArray
array = CustomIndexable(np.arange(3))
orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"})
assert isinstance(orig._data, CustomIndexable)
def test_raise_no_warning_for_nan_in_binary_ops():
with pytest.warns(None) as record:
Variable("x", [1, 2, np.NaN]) > 0
assert len(record) == 0
class TestBackendIndexing:
""" Make sure all the array wrappers can be indexed. """
@pytest.fixture(autouse=True)
def setUp(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def check_orthogonal_indexing(self, v):
assert np.allclose(v.isel(x=[8, 3], y=[2, 1]), self.d[[8, 3]][:, [2, 1]])
def check_vectorized_indexing(self, v):
ind_x = Variable("z", [0, 2])
ind_y = Variable("z", [2, 1])
assert np.allclose(v.isel(x=ind_x, y=ind_y), self.d[ind_x, ind_y])
def test_NumpyIndexingAdapter(self):
v = Variable(dims=("x", "y"), data=NumpyIndexingAdapter(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# could not doubly wrapping
with raises_regex(TypeError, "NumpyIndexingAdapter only wraps "):
v = Variable(
dims=("x", "y"), data=NumpyIndexingAdapter(NumpyIndexingAdapter(self.d))
)
def test_LazilyOuterIndexedArray(self):
v = Variable(dims=("x", "y"), data=LazilyOuterIndexedArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(
dims=("x", "y"),
data=LazilyOuterIndexedArray(LazilyOuterIndexedArray(self.d)),
)
self.check_orthogonal_indexing(v)
# hierarchical wrapping
v = Variable(
dims=("x", "y"), data=LazilyOuterIndexedArray(NumpyIndexingAdapter(self.d))
)
self.check_orthogonal_indexing(v)
def test_CopyOnWriteArray(self):
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(
dims=("x", "y"), data=CopyOnWriteArray(LazilyOuterIndexedArray(self.d))
)
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
def test_MemoryCachedArray(self):
v = Variable(dims=("x", "y"), data=MemoryCachedArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(MemoryCachedArray(self.d)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
@requires_dask
def test_DaskIndexingAdapter(self):
import dask.array as da
da = da.asarray(self.d)
v = Variable(dims=("x", "y"), data=DaskIndexingAdapter(da))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(DaskIndexingAdapter(da)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
|
apache-2.0
|
etherkit/OpenBeacon2
|
client/linux-arm/venv/lib/python3.5/site-packages/PyInstaller/loader/pyiboot01_bootstrap.py
|
2
|
7533
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2019, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
### Start bootstrap process
# Only python built-in modules can be used.
import sys
import pyimod03_importers
# Extend Python import machinery by adding PEP302 importers to sys.meta_path.
pyimod03_importers.install()
### Bootstrap process is complete.
# We can use other python modules (e.g. os)
import os
# Let other python modules know that the code is running in frozen mode.
if not hasattr(sys, 'frozen'):
sys.frozen = True
# sys._MEIPASS is now set in the bootloader. Hooray.
# Python 3 C-API function Py_SetPath() resets sys.prefix to empty string.
# Python 2 was using PYTHONHOME for sys.prefix. Let's do the same for Python 3.
sys.prefix = sys._MEIPASS
sys.exec_prefix = sys.prefix
# Python 3.3+ defines also sys.base_prefix. Let's set them too.
# TODO Do these variables does not hurt on Python 3.2 and 2.7?
sys.base_prefix = sys.prefix
sys.base_exec_prefix = sys.exec_prefix
# Some packages behaves differently when running inside virtual environment.
# E.g. IPython tries to append path VIRTUAL_ENV to sys.path.
# For the frozen app we want to prevent this behavior.
VIRTENV = 'VIRTUAL_ENV'
if VIRTENV in os.environ:
# On some platforms (e.g. AIX) 'os.unsetenv()' is not available and then
# deleting the var from os.environ does not delete it from the environment.
os.environ[VIRTENV] = ''
del os.environ[VIRTENV]
# Ensure sys.path contains absolute paths. Otherwise import of other python
# modules will fail when current working directory is changed by frozen
# application.
python_path = []
for pth in sys.path:
if not os.path.isabs(pth):
# careful about using abspath with non-unicode path,
# it breaks multibyte character that contain slash under win32/Python 2
# TODO: Revert when dropping suport for is_py2.
pth = os.path.abspath(pth)
python_path.append(pth)
sys.path = python_path
# Implement workaround for prints in non-console mode. In non-console mode
# (with "pythonw"), print randomly fails with "[errno 9] Bad file descriptor"
# when the printed text is flushed (eg: buffer full); this is because the
# sys.stdout object is bound to an invalid file descriptor.
# Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we
# feel that a workaround in PyInstaller is a good thing since most people
# found this problem for the first time with PyInstaller as they don't
# usually run their code with "pythonw" (and it's hard to debug anyway).
class NullWriter:
softspace = 0
encoding = 'UTF-8'
def write(*args):
pass
def flush(*args):
pass
# Some packages are checking if stdout/stderr is available.
# e.g. youtube-dl for details see #1883
def isatty(self):
return False
# In Python 3 sys.stdout/err is None in GUI mode on Windows.
# In Python 2 we need to check .fileno().
if sys.stdout is None or sys.stdout.fileno() < 0:
sys.stdout = NullWriter()
if sys.stderr is None or sys.stderr.fileno() < 0:
sys.stderr = NullWriter()
# At least on Windows, Python seems to hook up the codecs on this
# import, so it's not enough to just package up all the encodings.
#
# It was also reported that without 'encodings' module the frozen executable
# will fail to load in some configurations:
#
# http://www.pyinstaller.org/ticket/651
#
# Import 'encodings' module in a run-time hook is not enough since some
# run-time hooks require this module and the order of running code from
# from run-time hooks is not defined.
try:
import encodings
except ImportError:
pass
# In the Python interpreter 'warnings' module is imported when 'sys.warnoptions'
# is not empty. Mimic this behavior in PyInstaller.
if sys.warnoptions:
import warnings
try:
import ctypes
import os
from ctypes import LibraryLoader, DEFAULT_MODE
def _frozen_name(name):
if name:
frozen_name = os.path.join(sys._MEIPASS, os.path.basename(name))
if os.path.exists(frozen_name):
name = frozen_name
return name
class PyInstallerImportError(OSError):
def __init__(self, name):
self.msg = ("Failed to load dynlib/dll %r. "
"Most probably this dynlib/dll was not found "
"when the application was frozen.") % name
self.args = (self.msg,)
class PyInstallerCDLL(ctypes.CDLL):
def __init__(self, name, *args, **kwargs):
name = _frozen_name(name)
try:
super(PyInstallerCDLL, self).__init__(name, *args, **kwargs)
except Exception as base_error:
raise PyInstallerImportError(name)
ctypes.CDLL = PyInstallerCDLL
ctypes.cdll = LibraryLoader(PyInstallerCDLL)
class PyInstallerPyDLL(ctypes.PyDLL):
def __init__(self, name, *args, **kwargs):
name = _frozen_name(name)
try:
super(PyInstallerPyDLL, self).__init__(name, *args, **kwargs)
except Exception as base_error:
raise PyInstallerImportError(name)
ctypes.PyDLL = PyInstallerPyDLL
ctypes.pydll = LibraryLoader(PyInstallerPyDLL)
if sys.platform.startswith('win'):
class PyInstallerWinDLL(ctypes.WinDLL):
def __init__(self, name,*args, **kwargs):
name = _frozen_name(name)
try:
super(PyInstallerWinDLL, self).__init__(name, *args, **kwargs)
except Exception as base_error:
raise PyInstallerImportError(name)
ctypes.WinDLL = PyInstallerWinDLL
ctypes.windll = LibraryLoader(PyInstallerWinDLL)
class PyInstallerOleDLL(ctypes.OleDLL):
def __init__(self, name,*args, **kwargs):
name = _frozen_name(name)
try:
super(PyInstallerOleDLL, self).__init__(name, *args, **kwargs)
except Exception as base_error:
raise PyInstallerImportError(name)
ctypes.OleDLL = PyInstallerOleDLL
ctypes.oledll = LibraryLoader(PyInstallerOleDLL)
except ImportError:
pass
# On Mac OS X insert sys._MEIPASS in the first position of the list of paths
# that ctypes uses to search for libraries.
#
# Note: 'ctypes' module will NOT be bundled with every app because code in this
# module is not scanned for module dependencies. It is safe to wrap
# 'ctypes' module into 'try/except ImportError' block.
if sys.platform.startswith('darwin'):
try:
from ctypes.macholib import dyld
dyld.DEFAULT_LIBRARY_FALLBACK.insert(0, sys._MEIPASS)
except ImportError:
# Do nothing when module 'ctypes' is not available.
pass
# Make .eggs and zipfiles available at runtime
d = "eggs"
d = os.path.join(sys._MEIPASS, d)
# Test if the 'eggs' directory exists. This allows to
# opportunistically including this script into the packaged exe, even
# if no eggs as found when packaging the program. (Which may be a
# use-case, see issue #653.
if os.path.isdir(d):
for fn in os.listdir(d):
sys.path.append(os.path.join(d, fn))
|
gpl-3.0
|
AndreyPopovNew/asuswrt-merlin-rt-n
|
release/src/router/samba-3.0.25b/source/stf/pythoncheck.py
|
55
|
1787
|
#! /usr/bin/python
# Comfychair test cases for Samba python extensions
# Copyright (C) 2003 by Tim Potter <tpot@samba.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
"""These tests are run by Samba's "make check"."""
import sys, comfychair
class ImportTest(comfychair.TestCase):
"""Check that all modules can be imported without error."""
def runtest(self):
python_modules = ['spoolss', 'lsa', 'samr', 'winbind', 'winreg',
'srvsvc', 'tdb', 'smb', 'tdbpack']
for m in python_modules:
try:
__import__('samba.%s' % m)
except ImportError, msg:
self.log(str(msg))
self.fail('error importing %s module' % m)
tests = [ImportTest]
if __name__ == '__main__':
# Some magic to repend build directory to python path so we see the
# objects we have built and not previously installed stuff.
from distutils.util import get_platform
from os import getcwd
sys.path.insert(0, '%s/build/lib.%s-%s' %
(getcwd(), get_platform(), sys.version[0:3]))
comfychair.main(tests)
|
gpl-2.0
|
bountyful/bountyfulcoins
|
bountyfulcoinsapp/migrations/0001_initial.py
|
1
|
9626
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Link'
db.create_table(u'bountyfulcoinsapp_link', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')
(unique=True, max_length=200)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['Link'])
# Adding model 'Bounty'
db.create_table(u'bountyfulcoinsapp_bounty', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')
(max_length=200)),
('user', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['auth.User'])),
('link', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['bountyfulcoinsapp.Link'])),
('amount', self.gf('django.db.models.fields.DecimalField')
(default=0.0, max_digits=20, decimal_places=2)),
('currency', self.gf('django.db.models.fields.CharField')
(default='BTC', max_length=15)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['Bounty'])
# Adding model 'Tag'
db.create_table(u'bountyfulcoinsapp_tag', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')
(unique=True, max_length=64)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['Tag'])
# Adding M2M table for field bounties on 'Tag'
m2m_table_name = db.shorten_name(u'bountyfulcoinsapp_tag_bounties')
db.create_table(m2m_table_name, (
('id', models.AutoField(
verbose_name='ID', primary_key=True, auto_created=True)),
('tag', models.ForeignKey(
orm[u'bountyfulcoinsapp.tag'], null=False)),
('bounty', models.ForeignKey(
orm[u'bountyfulcoinsapp.bounty'], null=False))
))
db.create_unique(m2m_table_name, ['tag_id', 'bounty_id'])
# Adding model 'SharedBounty'
db.create_table(u'bountyfulcoinsapp_sharedbounty', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('bounty', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['bountyfulcoinsapp.Bounty'], unique=True)),
('date', self.gf('django.db.models.fields.DateTimeField')
(auto_now_add=True, blank=True)),
('votes', self.gf(
'django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['SharedBounty'])
# Adding M2M table for field users_voted on 'SharedBounty'
m2m_table_name = db.shorten_name(
u'bountyfulcoinsapp_sharedbounty_users_voted')
db.create_table(m2m_table_name, (
('id', models.AutoField(
verbose_name='ID', primary_key=True, auto_created=True)),
('sharedbounty', models.ForeignKey(
orm[u'bountyfulcoinsapp.sharedbounty'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['sharedbounty_id', 'user_id'])
def backwards(self, orm):
# Deleting model 'Link'
db.delete_table(u'bountyfulcoinsapp_link')
# Deleting model 'Bounty'
db.delete_table(u'bountyfulcoinsapp_bounty')
# Deleting model 'Tag'
db.delete_table(u'bountyfulcoinsapp_tag')
# Removing M2M table for field bounties on 'Tag'
db.delete_table(db.shorten_name(u'bountyfulcoinsapp_tag_bounties'))
# Deleting model 'SharedBounty'
db.delete_table(u'bountyfulcoinsapp_sharedbounty')
# Removing M2M table for field users_voted on 'SharedBounty'
db.delete_table(
db.shorten_name(u'bountyfulcoinsapp_sharedbounty_users_voted'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'bountyfulcoinsapp.bounty': {
'Meta': {'object_name': 'Bounty'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '20', 'decimal_places': '2'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'BTC'", 'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bountyfulcoinsapp.Link']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'bountyfulcoinsapp.link': {
'Meta': {'object_name': 'Link'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'})
},
u'bountyfulcoinsapp.sharedbounty': {
'Meta': {'object_name': 'SharedBounty'},
'bounty': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bountyfulcoinsapp.Bounty']", 'unique': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'users_voted': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'bountyfulcoinsapp.tag': {
'Meta': {'object_name': 'Tag'},
'bounties': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['bountyfulcoinsapp.Bounty']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bountyfulcoinsapp']
|
mit
|
Pluto-tv/chromium-crosswalk
|
chrome/common/extensions/docs/server2/fake_url_fetcher.py
|
85
|
4886
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import posixpath
from future import Future
from path_util import AssertIsDirectory, IsDirectory
class _Response(object):
def __init__(self, content=''):
self.content = content
self.headers = {'Content-Type': 'none'}
self.status_code = 200
class FakeUrlFetcher(object):
def __init__(self, base_path):
self._base_path = base_path
# Mock capabilities. Perhaps this class should be MockUrlFetcher.
self._sync_count = 0
self._async_count = 0
self._async_resolve_count = 0
def _ReadFile(self, filename):
# Fake DownloadError, the error that appengine usually raises.
class DownloadError(Exception): pass
try:
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
except IOError as e:
raise DownloadError(e)
def _ListDir(self, directory):
# In some tests, we need to test listing a directory from the HTML returned
# from SVN. This reads an HTML file that has the directories HTML.
if not os.path.isdir(os.path.join(self._base_path, directory)):
return self._ReadFile(directory[:-1])
files = os.listdir(os.path.join(self._base_path, directory))
html = '<html><title>Revision: 00000</title>\n'
for filename in files:
if filename.startswith('.'):
continue
if os.path.isdir(os.path.join(self._base_path, directory, filename)):
html += '<a>' + filename + '/</a>\n'
else:
html += '<a>' + filename + '</a>\n'
html += '</html>'
return html
def FetchAsync(self, url):
self._async_count += 1
url = url.rsplit('?', 1)[0]
def resolve():
self._async_resolve_count += 1
return self._DoFetch(url)
return Future(callback=resolve)
def Fetch(self, url):
self._sync_count += 1
return self._DoFetch(url)
def _DoFetch(self, url):
url = url.rsplit('?', 1)[0]
result = _Response()
if IsDirectory(url):
result.content = self._ListDir(url)
else:
result.content = self._ReadFile(url)
return result
def CheckAndReset(self, sync_count=0, async_count=0, async_resolve_count=0):
'''Returns a tuple (success, error). Use in tests like:
self.assertTrue(*fetcher.CheckAndReset(...))
'''
errors = []
for desc, expected, actual in (
('sync_count', sync_count, self._sync_count),
('async_count', async_count, self._async_count),
('async_resolve_count', async_resolve_count,
self._async_resolve_count)):
if actual != expected:
errors.append('%s: expected %s got %s' % (desc, expected, actual))
try:
return (len(errors) == 0, ', '.join(errors))
finally:
self.Reset()
def Reset(self):
self._sync_count = 0
self._async_count = 0
self._async_resolve_count = 0
class FakeURLFSFetcher(object):
'''Use a file_system to resolve fake fetches. Mimics the interface of Google
Appengine's urlfetch.
'''
def __init__(self, file_system, base_path):
AssertIsDirectory(base_path)
self._base_path = base_path
self._file_system = file_system
def FetchAsync(self, url, **kwargs):
return Future(value=self.Fetch(url))
def Fetch(self, url, **kwargs):
return _Response(self._file_system.ReadSingle(
posixpath.join(self._base_path, url)).Get())
def UpdateFS(self, file_system, base_path=None):
'''Replace the underlying FileSystem used to reslove URLs.
'''
self._file_system = file_system
self._base_path = base_path or self._base_path
class MockURLFetcher(object):
def __init__(self, fetcher):
self._fetcher = fetcher
self.Reset()
def Fetch(self, url, **kwargs):
self._fetch_count += 1
return self._fetcher.Fetch(url, **kwargs)
def FetchAsync(self, url, **kwargs):
self._fetch_async_count += 1
def next(result):
self._fetch_resolve_count += 1
return result
return self._fetcher.FetchAsync(url, **kwargs).Then(next)
def CheckAndReset(self,
fetch_count=0,
fetch_async_count=0,
fetch_resolve_count=0):
errors = []
for desc, expected, actual in (
('fetch_count', fetch_count, self._fetch_count),
('fetch_async_count', fetch_async_count, self._fetch_async_count),
('fetch_resolve_count', fetch_resolve_count,
self._fetch_resolve_count)):
if actual != expected:
errors.append('%s: expected %s got %s' % (desc, expected, actual))
try:
return (len(errors) == 0, ', '.join(errors))
finally:
self.Reset()
def Reset(self):
self._fetch_count = 0
self._fetch_async_count = 0
self._fetch_resolve_count = 0
|
bsd-3-clause
|
HengeSense/website
|
apps/news/widgets.py
|
1
|
2559
|
############################################################################
# This file is part of the Maui Web site.
#
# Copyright (c) 2012 Pier Luigi Fiorini
# Copyright (c) 2009-2010 Krzysztof Grodzicki
#
# Author(s):
# Pier Luigi Fiorini <pierluigi.fiorini@gmail.com>
#
# $BEGIN_LICENSE:AGPL3+$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# $END_LICENSE$
############################################################################
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
class TinyMCEEditor(forms.Textarea):
class Media:
js = (
"js/jquery-1.8.1.min.js",
"js/jquery.tinymce.js",
)
def __init__(self, language=None):
self.language = language or settings.LANGUAGE_CODE[:2]
super(TinyMCEEditor, self).__init__()
def render(self, name, value, attrs=None):
rendered = super(TinyMCEEditor, self).render(name, value, attrs)
context = {
"name": name,
"lang": self.language[:2],
"language": self.language,
"STATIC_URL": settings.STATIC_URL,
}
return rendered + mark_safe(render_to_string(
"admin/news/widgets/tinymce.html", context))
class WYMEditor(forms.Textarea):
class Media:
js = (
"js/jquery-1.8.1.min.js",
"cms/wymeditor/jquery.wymeditor.pack.js",
)
def __init__(self, language=None, attrs=None):
self.language = language or settings.LANGUAGE_CODE[:2]
self.attrs = {"class": "wymeditor"}
if attrs:
self.attrs.update(attrs)
super(WYMEditor, self).__init__(attrs)
def render(self, name, value, attrs=None):
rendered = super(WYMEditor, self).render(name, value, attrs)
context = {
"name": name,
"lang": self.language[:2],
"language": self.language,
"STATIC_URL": settings.STATIC_URL,
"page_link_wymeditor": 0,
"filebrowser": 0,
}
return rendered + mark_safe(render_to_string(
"admin/news/widgets/wymeditor.html", context))
|
agpl-3.0
|
afaheem88/tempest
|
tempest/api/image/v2/test_images_tags.py
|
9
|
1504
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest.api.image import base
from tempest import test
class ImagesTagsTest(base.BaseV2ImageTest):
@test.idempotent_id('10407036-6059-4f95-a2cd-cbbbee7ed329')
def test_update_delete_tags_for_image(self):
body = self.create_image(container_format='bare',
disk_format='raw',
visibility='private')
image_id = body['id']
tag = data_utils.rand_name('tag')
self.addCleanup(self.client.delete_image, image_id)
# Creating image tag and verify it.
self.client.add_image_tag(image_id, tag)
body = self.client.show_image(image_id)
self.assertIn(tag, body['tags'])
# Deleting image tag and verify it.
self.client.delete_image_tag(image_id, tag)
body = self.client.show_image(image_id)
self.assertNotIn(tag, body['tags'])
|
apache-2.0
|
vine-comment/live_portal
|
live_portal/urls.py
|
1
|
1677
|
from live_portal import views
from views import *
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from registration.backends.simple.views import RegistrationView
from forms import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# admin
url(r'^admin/', include(admin.site.urls)),
# favicon
url(r'^favicon\.ico$', RedirectView.as_view(url=settings.STATIC_URL + 'favicon/1.ico')),
# auth & accounts
url(r'^auth', TemplateView.as_view(template_name='registration/auth.html'), name='auth'),
url(r'^accounts/register/$',
RegistrationView.as_view(form_class=LivePortalRegistrationForm),
name='registration_register'),
url(r'^accounts/', include('registration.urls')),
url(r'^resetpassword/passwordsent/$', 'django.contrib.auth.views.password_reset', name='auth_password_reset'),
url(r'^changepassword/passwordsent/$', 'django.contrib.auth.views.password_change', name='auth_password_change'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', name='auth_logout'),
url(r'^users/(?P<name>.*?)/?$', HomeView.as_view(), name='user'),
# follows
url(r'^user/follows$', login_required(UserFollowsView.as_view()), name='user_follows'),
# show anchors
url(r'^show/(?P<tag>.*?)/?$', ShowView.as_view(), name='show'),
url(r'^/?$', HomeView.as_view(), name='home'),
# ajax
url(r'^ajax/enter_room/(?P<room>.*?)/?$', views.enter_room),
url(r'^ajax/follow_room/(?P<room>.*?)/?$', views.follow_room),
url(r'^ajax/unfollow_room/(?P<room>.*?)/?$', views.unfollow_room),
)
|
gpl-3.0
|
karimbahgat/Pure-Python-Greiner-Hormann-Polygon-Clipping
|
GreinerHorman_Algo/KimKim/puremidpoints_v16(k&k,tryfixcrosschange).py
|
1
|
36981
|
# -*- coding: UTF-8 -*-
# Efficient Clipping of Arbitrary Polygons
#
# Copyright (c) 2011, 2012 Helder Correia <helder.mc@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# FINAL BEST IDEA, IMPLEMENTED NOW BUT DOESNT WORK CUS INTERSECTION STAGE NOT FINDING ALL INTERSECTIONS
# USE PREV AND NEXT MIDPOINT LOCS FOR DETERMINING ENTRY FLAG
# NORMAL RULES, EXCEPT FOR INTERSECTIONMODE TURN OFF INTERSECTIONFLAGS FOR OUT-ON-ON and ON-ON-OUT BC THEY ARE JUST TANGENT AND NOT RELATED TO INSIDES
# FINALLY WHEN TRAVERSING, AFTER COMPLETING ONE POLY, SEARCH FOR NEXT ISECT THAT IS UNCHECK IN BOTH CURRENT AND NEIGHRBOUR
"""
# Greiner-Hormann Polygon Clipping with support for degenerates
This is a fork aimed to improve Helder Correia's pure-Python Greiner-Hormann implementation for polygon clipping. Partly for educational purposes and partly for portable pure-Python clipping.
Status: Incomplete/unstable.
Fork author: Karim Bahgat <karim.bahgat.norway@gmail.com>
-----------------------------------------------------------
# Efficient Clipping of Arbitrary Polygons
Based on the paper "Efficient Clipping of Arbitrary Polygons" by Günther
Greiner (greiner[at]informatik.uni-erlangen.de) and Kai Hormann
(hormann[at]informatik.tu-clausthal.de), ACM Transactions on Graphics
1998;17(2):71-83.
Available at: http://www.inf.usi.ch/hormann/papers/Greiner.1998.ECO.pdf
You should have received the README file along with this program.
If not, see <https://github.com/helderco/polyclip>
"""
DEBUG = False
class Vertex(object):
"""Node in a circular doubly linked list.
This class is almost exactly as described in the paper by Günther/Greiner.
"""
def __init__(self, vertex, alpha=0.0, intersect=False, entry=None, checked=False, degen=False):
if isinstance(vertex, Vertex):
vertex = (vertex.x, vertex.y)
# checked = True
self.x, self.y = vertex # point coordinates of the vertex
self.next = None # reference to the next vertex of the polygon
self.prev = None # reference to the previous vertex of the polygon
self.neighbour = None # reference to the corresponding intersection vertex in the other polygon
self.entry = entry # True if intersection is an entry point, False if exit
self.alpha = alpha # intersection point's relative distance from previous vertex
self.intersect = intersect # True if vertex is an intersection
self.checked = checked # True if the vertex has been checked (last phase)
self.couple = None
self.cross_change = None
@property
def xy(self):
return self.x, self.y
def isInside(self, poly):
if testLocation(self, poly) in ("in","on"):
return True
else: return False
def setChecked(self):
self.checked = True
if self.neighbour and not self.neighbour.checked:
self.neighbour.setChecked()
def copy(self):
copy = Vertex(self) # point coordinates of the vertex
copy.next = self.next # reference to the next vertex of the polygon
copy.prev = self.prev # reference to the previous vertex of the polygon
copy.neighbour = self.neighbour # reference to the corresponding intersection vertex in the other polygon
copy.entry = self.entry # True if intersection is an entry point, False if exit
copy.alpha = self.alpha # intersection point's relative distance from previous vertex
copy.intersect = self.intersect # True if vertex is an intersection
copy.couple = self.couple
copy.cross_change = self.cross_change
copy.checked = self.checked
return copy
def __repr__(self):
"""String representation of the vertex for debugging purposes."""
return "(%.2f, %.2f) <-> %s(%.2f, %.2f)%s <-> (%.2f, %.2f) %s" % (
self.prev.x, self.prev.y,
'i' if self.intersect else ' ',
self.x, self.y,
('e' if self.entry else 'x') if self.intersect else ' ',
self.next.x, self.next.y,
' !' if self.intersect and not self.checked else ''
)
class Polygon(object):
"""Manages a circular doubly linked list of Vertex objects that represents a polygon."""
first = None
def add(self, vertex):
"""Add a vertex object to the polygon (vertex is added at the 'end' of the list")."""
if not self.first:
self.first = vertex
self.first.next = vertex
self.first.prev = vertex
else:
next = self.first
prev = next.prev
next.prev = vertex
vertex.next = next
vertex.prev = prev
prev.next = vertex
def replace(self, old, new):
# when replacing old normal vertice with new intersection vertice at same xy
# only changes the attributes in place
old.intersect = new.intersect
old.x,old.y = new.x,new.y
old.neighbour = new.neighbour
old.neighbour.neighbour = old
old.entry = new.entry
old.alpha = new.alpha
## new.next = old.next
## new.prev = old.prev
## if old == self.first:
## #print "replaced first", self.first, new
## self.first = new
## old.prev.next = new
## old.next.prev = new
def insert(self, vertex, start, end):
"""Insert and sort a vertex between a specified pair of vertices.
This function inserts a vertex (most likely an intersection point)
between two other vertices (start and end). These other vertices
cannot be intersections (that is, they must be actual vertices of
the original polygon). If there are multiple intersection points
between the two vertices, then the new vertex is inserted based on
its alpha value.
"""
if vertex.xy == start.xy:
copy = vertex.copy()
self.replace(start, copy)
return # dont process further
elif vertex.xy == end.xy:
copy = vertex.copy()
self.replace(end, copy)
return # dont process further
# position based on alpha
curr = start
while curr != end and curr.alpha < vertex.alpha:
curr = curr.next
if vertex.xy == curr.prev.xy:
## if vertex.xy == curr.xy: self.replace(curr, vertex)
## elif vertex.xy == curr.prev.xy: self.replace(curr, vertex.prev)
vertex.neighbour.neighbour = curr.prev
return # dont do it if same as a previously inserted intersection
if vertex.xy == curr.xy:
## if vertex.xy == curr.xy: self.replace(curr, vertex)
## elif vertex.xy == curr.prev.xy: self.replace(curr, vertex.prev)
vertex.neighbour.neighbour = curr
return # dont do it if same as a previously inserted intersection
vertex.next = curr
vertex.prev = curr.prev
vertex.next.prev = vertex
vertex.prev.next = vertex
#print "inserted",vertex
def next(self, v):
"""Return the next non intersecting vertex after the one specified."""
c = v
while c.intersect:
c = c.next
return c
@property
def first_intersect(self):
"""Return the first unchecked intersection point in the polygon."""
for v in self.iter():
if v.intersect and not v.checked:
break
return v
@property
def points(self):
"""Return the polygon's points as a list of tuples (ordered coordinates pair)."""
p = []
for v in self.iter():
p.append((v.x, v.y))
return p
def unprocessed(self):
"""Check if any unchecked intersections remain in the polygon."""
for v in self.iter():
if v.intersect and not v.checked:
yield True
def union(self, clip):
return self.clip(clip, False, False)
def intersect(self, clip):
return self.clip(clip, True, True)
def difference(self, clip):
return self.clip(clip, False, True)
def clip(self, clip, s_entry, c_entry):
"""Clip this polygon using another one as a clipper.
This is where the algorithm is executed. It allows you to make
a UNION, INTERSECT or DIFFERENCE operation between two polygons.
Given two polygons A, B the following operations may be performed:
A|B ... A OR B (Union of A and B)
A&B ... A AND B (Intersection of A and B)
A\B ... A - B
B\A ... B - A
The entry records store the direction the algorithm should take when
it arrives at that entry point in an intersection. Depending on the
operation requested, the direction is set as follows for entry points
(f=forward, b=backward; exit points are always set to the opposite):
Entry
A B
-----
A|B b b
A&B f f
A\B b f
B\A f b
f = True, b = False when stored in the entry record
"""
# detect clip mode
unionmode = not s_entry and not c_entry
intersectionmode = s_entry and c_entry
differencemode = not s_entry and c_entry
# prep by removing repeat of startpoint at end
first = self.first
last = first.prev
if last.x == first.x and last.y == first.y:
first.prev = last.prev
last.prev.next = first
first = clip.first
last = first.prev
if last.x == first.x and last.y == first.y:
first.prev = last.prev
last.prev.next = first
# TODO: maybe also remove repeat points anywhere?
# ...
# phase one - find intersections
# ------------------------------
anyintersection = False
s_intsecs = []
c_intsecs = []
for s in self.iter(): # for each vertex Si of subject polygon do
for c in clip.iter(): # for each vertex Cj of clip polygon do
try:
#print "find isect %s - %s and %s - %s" %(s.xy, self.next(s.next).xy, c.xy, clip.next(c.next).xy )
i, alphaS, alphaC = intersect_or_on(s, self.next(s.next),
c, clip.next(c.next))
iS = Vertex(i, alphaS, intersect=True, entry=False)
iC = Vertex(i, alphaC, intersect=True, entry=False)
iS.neighbour = iC
iC.neighbour = iS
s_intsecs.append( (iS, alphaS, s, self.next(s.next)) )
c_intsecs.append( (iC, alphaC, c, clip.next(c.next)) )
anyintersection = True
except TypeError:
pass # this simply means intersect() returned None
# insert intersections into originals
for iS,a,s,s_next in reversed(s_intsecs):
if a == 0:
self.replace(s, iS)
elif a == 1:
self.replace(s_next, iS)
else:
self.insert(iS, s, s_next)
for iC,a,c,c_next in reversed(c_intsecs):
if a == 0:
self.replace(c, iC)
elif a == 1:
self.replace(c_next, iC)
else:
clip.insert(iC, c, c_next)
#print "testing if insert was done correctly"
for s in self.iter():
#print s
pass
#print "and"
for c in clip.iter():
#print c
pass
# phase one and a half - no intersections between subject and clip, so correctly return results
# --------------------
def specialcase_insidetest():
resultpolys = []
if unionmode: # union
if clip.first.isInside(self):
# clip polygon is entirely inside subject, so just return subject shell
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
elif self.first.isInside(clip):
# subject polygon is entirely inside clip, so just return clip shell
clipped = Polygon()
for c in clip.iter():
clipped.add(Vertex(c))
polytuple = (clipped, [])
resultpolys.append(polytuple)
else:
#clip polygon is entirely outside subject, so return both
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
clipped = Polygon()
for c in clip.iter():
clipped.add(Vertex(c))
polytuple = (clipped, [])
resultpolys.append(polytuple)
elif intersectionmode: # intersection
if clip.first.isInside(self):
# clip polygon is entirely inside subject, so the intersection is only the clip polygon
clipped = Polygon()
for c in clip.iter():
clipped.add(Vertex(c))
polytuple = (clipped, [])
resultpolys.append(polytuple)
elif self.first.isInside(clip):
# subject polygon is entirely inside clip, so the intersection is only the subject polygon
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
else:
#clip polygon is entirely outside subject, so no intersection to return
pass
elif differencemode: # difference
if clip.first.isInside(self):
# clip polygon is entirely inside subject, so the difference is subject with clip as a hole
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
hole = Polygon()
for c in clip.iter():
hole.add(Vertex(c))
polytuple = (clipped, [hole])
resultpolys.append(polytuple)
elif self.first.isInside(clip):
# subject polygon is entirely inside clip, so there is no difference
pass
else:
#clip polygon is entirely outside subject, so difference is simply the subject
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
# no need to continue so just return result
return resultpolys
if not anyintersection:
return specialcase_insidetest()
# phase two - identify entry/exit points
# --------------------------------------
# From K&K
def mark_flags(poly, c, c_entry):
"c and c_entry are not actually the clip, can be for both s and c, just too lazy to change."
#print "intersection"
#print "\t",c
# intersection is degenerate, is the start/endpoint of a line
# so maybe delete intersection flag based on prev/next locations
prevloc = testLocation(c.prev, poly)
nextloc = testLocation(c.next, poly)
if prevloc == "on" or nextloc == "on":
prevmid = Vertex(((c.x+c.prev.x)/2.0,(c.y+c.prev.y)/2.0))
prevloc = testLocation(prevmid, poly)
nextmid = Vertex(((c.x+c.next.x)/2.0,(c.y+c.next.y)/2.0))
nextloc = testLocation(nextmid, poly)
if prevloc == "in" or nextloc == "in":
poly.anyinside = True
#print "\t %s -> degenintsec -> %s" %(prevloc,nextloc)
if prevloc == "out":
if nextloc == "out":
#just touching
c.entry = "en/ex" if c_entry else "ex/en"
elif nextloc == "in":
c.entry = "en" if c_entry else "ex"
elif nextloc == "on":
c.entry = "en" if c_entry else "ex"
elif prevloc == "in":
#union and difference should never go inside the other polygon
#so this should only happen for intersectmode...
if nextloc == "in":
#just touching
c.entry = "ex/en" if c_entry else "en/ex"
elif nextloc == "out":
c.entry = "ex" if c_entry else "en"
elif nextloc == "on":
c.entry = "ex" if c_entry else "en"
elif prevloc == "on":
if nextloc == "on":
c.entry = None
elif nextloc == "out":
c.entry = "ex" if c_entry else "en"
elif nextloc == "in":
c.entry = "en" if c_entry else "ex"
self.anyinside = False
# set clip
prevsingle = None
for c in clip.iter():
if c.intersect:
mark_flags(self, c, c_entry)
# set couple
if c.entry in ("ex","en"):
if prevsingle and c.entry == prevsingle.entry:
c.couple = prevsingle
prevsingle.couple = c
prevsingle = c
# set crosschange
# some modifications based on implementation in qt clipper source code
#if c.entry == "en/ex" == c.neighbour.entry or c.entry == "ex/en" == c.neighbour.entry:
if False: #c.entry == "en/ex" or c.entry == "ex/en":
print "Maybe crosschange..."
# tri1
#a,b,c = c.neighbour.prev, c.prev, c.neighbour.next
a,b,c = c.neighbour.next, c.prev, c.neighbour.prev
dir1 = 0.5 * (a.x * (b.y-c.y) +
b.x * (c.y-a.y) +
c.x * (a.y-b.y))
# tri2
#a,b,c = c.neighbour.prev, c.prev, c.next
a,b,c = c.next, c.prev, c.neighbour.prev
dir2 = 0.5 * (a.x * (b.y-c.y) +
b.x * (c.y-a.y) +
c.x * (a.y-b.y))
print dir1,dir2
#if dir1 < 0 != dir2 < 0: # different orientation
if (dir1 * dir2) < 0: # different orientation means at least one negative, making the results less than 0
print "CROSSCHANGE!!!"
c.cross_change = True
c.neighbour.cross_change = True # not sure if should set neighbour too
# maybe early abort
if not self.anyinside and intersectionmode:
return []
# what about perfect overlap???
# ...
if False: #DEBUG:
print "view clip entries"
for c in clip.iter():
print c, c.entry
# find first isect where both neighbours have valid flag
for c in clip.iter():
if c.entry:
s = c.neighbour
mark_flags(clip, s, s_entry)
if s.entry:
first_c = c
first_s = s
# print 777,s.entry
break
else:
return specialcase_insidetest()
#raise Exception("weird special case, no neighbours that both have flag left")
# autoset subj, if neighbour of first is different, then set all as opposite
# TODO: how deal with s_entry in case of different modes...?
print "view first"
print first_c, first_c.entry
print first_s, first_s.entry
if first_c.entry != first_s.entry: # and s_entry: # this is the behaviour for standard intersect mode, otherwise flip, hence the s_entry
for c in clip.iter():
if c.entry:
if c.entry == "en": c.neighbour.entry = "ex"
elif c.entry == "ex": c.neighbour.entry = "en"
elif c.entry == "en/ex": c.neighbour.entry = "ex/en"
elif c.entry == "ex/en": c.neighbour.entry = "en/ex"
# else set all same
else:
for c in clip.iter():
if c.entry:
c.neighbour.entry = c.entry
# set couple for subj (not sure if needed)
prevsingle = None
for s in self.iter():
if s.entry:
if s.entry in ("ex","en"):
if prevsingle and s.entry == prevsingle.entry:
s.couple = prevsingle
prevsingle.couple = s
prevsingle = s
if False: #DEBUG:
print "view subj entries"
for s in self.iter():
print s, s.entry
# phase three - construct a list of clipped polygons
# --------------------------------------------------
######
# Defs
def next_unprocessed(vert):
origvert = vert
while vert:
if vert.entry and not (vert.checked or vert.neighbour.checked):
#print "vert, found next unproc", vert, vert.checked, vert.neighbour.checked
if vert.couple:
# rule 1
if vert.couple.entry and vert.entry:
# rule 2
if vert.couple.entry == "en" and vert.entry == "en":
return vert.couple
elif vert.couple.entry == "ex" and vert.entry == "ex":
return vert
# rule 3
else:
return vert
vert = vert.next
if vert == origvert:
# if returned to first, return None
return None
def DeleteFlag1(cur, stat):
if cur.entry == "en/ex":
cur.entry = None
if cur.cross_change:
if stat == "D3":
return "D3"
else:
return "D4"
if stat == "D3":
return "D4"
else:
return "D3"
if cur.entry == "ex/en":
if stat == "D3":
cur.entry = "en"
return "D2"
else:
cur.entry = "ex"
return "D1"
if cur.entry == "en":
cur.entry = None
return "D1"
if cur.entry == "ex":
cur.entry = None
return "D2"
def DeleteFlag2(cur, prev, stat):
if cur.entry == "en/ex":
if stat == "D1":
cur.entry = "ex"
else:
cur.entry = "en"
if cur.cross_change:
if stat == "D1":
return "D4"
else:
return "D3"
if stat == "D1":
return "D3"
else:
return "D4"
if cur.entry == "ex/en":
if stat == "D1":
cur.entry = "en"
else:
cur.entry = "ex"
if cur.cross_change:
if stat == "D1":
return "D4"
else:
return "D3"
if stat == "D1":
return "D3"
else:
return "D4"
if cur.entry == "en":
cur.entry = None
if stat == "D1" and cur.couple and prev.couple == cur:
return "D1"
if stat == "D1":
return "D3"
else:
return "D4"
if cur.entry == "ex":
cur.entry = None
if stat != "D1" and cur.couple and prev.couple == cur:
return "D2"
else:
if stat == "D1":
return "D3"
else:
return "D4"
def proceed(cur, stat):
cur.checked = True
if stat == "D1":
clipped.add(Vertex(cur))
return cur.next
elif stat == "D2":
clipped.add(Vertex(cur))
return cur.prev
else:
return cur.neighbour
####
resultpolys = []
self.first.checked = True
cur = prev = start = next_unprocessed(self.first)
while cur:
# each new polygon
print "new poly"
stat = DeleteFlag1(cur, "D3")
if DEBUG: print "v", cur, cur.entry, stat
clipped = Polygon()
cur = proceed(cur, stat)
# collect vertexes
while cur != start:
if DEBUG: print "v", cur, cur.entry, stat
if cur.entry:
if stat == "D1" or stat == "D2":
stat = DeleteFlag2(cur, prev, stat)
else:
stat = DeleteFlag1(cur, stat)
prev = cur
cur = proceed(cur, stat)
# return to first vertex
clipped.add(Vertex(clipped.first))
print clipped
resultpolys.append((clipped,[]))
cur = prev = start = next_unprocessed(self.first)
# finally, sort into exteriors and holes
for pindex,(polyext,polyholes) in enumerate(resultpolys):
for otherext,otherholes in resultpolys:
if polyext == otherext:
continue # don't compare to self
if polyext.first.isInside(otherext):
otherholes.append(polyext) #poly is within other so make into a hole
del resultpolys[pindex] #and delete poly from being an independent poly
return resultpolys
def __repr__(self):
"""String representation of the polygon for debugging purposes."""
count, out = 1, "\n"
for s in self.iter():
out += "%02d: %s\n" % (count, str(s))
count += 1
return out
def iter(self):
"""Iterator generator for this doubly linked list."""
s = self.first
while True:
yield s
s = s.next
if s == self.first:
return
def intersect_or_on(s1, s2, c1, c2):
"""Same as intersect(), except returns
intersection even if degenerate.
"""
den = float( (c2.y - c1.y) * (s2.x - s1.x) - (c2.x - c1.x) * (s2.y - s1.y) )
if not den:
return None
us = ((c2.x - c1.x) * (s1.y - c1.y) - (c2.y - c1.y) * (s1.x - c1.x)) / den
uc = ((s2.x - s1.x) * (s1.y - c1.y) - (s2.y - s1.y) * (s1.x - c1.x)) / den
if (0 <= us <= 1) and (0 <= uc <= 1):
#subj and clip line intersect eachother somewhere in the middle
#this includes the possibility of degenerates (edge intersections)
x = s1.x + us * (s2.x - s1.x)
y = s1.y + us * (s2.y - s1.y)
return (x, y), us, uc
else:
return None
def testLocation(point, polygon):
"""
Effective scanline test for the location of a point vis a vis a polygon.
Returns either "in","on",or "out".
Based on algorithm 7 from:
Kai Horman and Alexander Agathos,
"The point in polygon problem for arbitrary polygons".
Computational Geometry: Theory and Applications,
Volume 20 Issue 3, November 2001
"""
# begin
if polygon.first.y == point.y and polygon.first.x == point.x:
return "on" # vertex
w =0
for v in polygon.iter():
if v.next.y == point.y:
if v.next.x == point.x:
return "on" # vertex
else:
if v.y == point.y and (v.next.x > point.x) == (v.x < point.x):
return "on" # edge
# if crossing horizontal line
if (v.y < point.y and v.next.y >= point.y)\
or (v.y >= point.y and v.next.y < point.y):
if v.x >= point.x:
if v.next.x > point.x:
# modify w
if v.next.y > v.y: w += 1
else: w -= 1
else:
det = (v.x - point.x) * (v.next.y - point.y) \
- (v.next.x - point.x) * (v.y - point.y)
if det == 0: return "on" # edge
# if right crossing
if (det > 0 and v.next.y > v.y)\
or (det < 0 and v.next.y < v.y):
# modify w
if v.next.y > v.y: w += 1
else: w -= 1
else:
if v.next.x > point.x:
det = (v.x - point.x) * (v.next.y - point.y) \
- (v.next.x - point.x) * (v.y - point.y)
if det == 0: return "on" # edge
# if right crossing
if (det > 0 and v.next.y > v.y)\
or (det < 0 and v.next.y < v.y):
# modify w
if v.next.y > v.y: w += 1
else: w -= 1
if (w % 2) != 0:
return "in"
else:
return "out"
def clip_polygon(subject, clipper, operation = 'difference'):
"""
Higher level function for clipping two polygons (from a list of points).
Since input polygons are lists of points, output is also in list format.
Each polygon in the resultlist is a tuple of: (polygon exterior, list of polygon holes)
"""
Subject = Polygon()
Clipper = Polygon()
for s in subject:
Subject.add(Vertex(s))
for c in clipper:
Clipper.add(Vertex(c))
clipped = Clipper.difference(Subject)\
if operation == 'reversed-diff'\
else Subject.__getattribute__(operation)(Clipper)
clipped = [(ext.points,[hole.points for hole in holes]) for ext,holes in clipped]
return clipped
if __name__ == "__main__":
"""
Test and visualize various polygon overlap scenarios.
Visualization requires the pure-Python PyDraw library from
https://github.com/karimbahgat/PyDraw
"""
subjpoly = [(0,0),(6,0),(6,6),(0,6),(0,0)]
# normal intersections
testpolys_normal = {"simple overlap":
[(4,4),(10,4),(10,10),(4,10),(4,4)],
"jigzaw overlap":
[(1,4),(3,8),(5,4),(5,10),(1,10),(1,4)],
## "smaller, outside":
## [(7,7),(7,9),(9,9),(9,7),(7,7)],
## "smaller, inside":
## [(2,2),(2,4),(4,4),(4,2),(2,2)],
## "larger, covering all":
## [(-1,-1),(-1,7),(7,7),(7,-1),(-1,-1)],
## "larger, outside":
## [(-10,-10),(-10,-70),(-70,-70),(-70,-10),(-10,-10)]
}
# degenerate intersections
testpolys_degens = {"degenerate, starts on edge intersection and goes inside":
[(0,5),(6,4),(10,4),(10,10),(4,10),(0,5)],
## "degenerate, starts on edge intersection and goes outside":
## [(5,6),(5.2,5.5),(5,5.4),(4.8,5.5)],
"degenerate, hesitating to enter and exit":
[(1,5),(6,4),(6,5),(10,4),(10,10),(4,10),(2,6),(1,6),(1,5)],
"degenerate, also multiple degens along shared line":
[(1,5),(6,4),(6,5),(10,4),(10,10),(4,10),(2,6),(1.3,6),(1.6,6),(1,6),(1,5)],
"degenerate, back and forth on-out along shared line":
[(1,5),(6,4),(6,5),(10,4),(10,10),(4,10),(2,6),(1.5,5.7),(1,6),(0,6),(1,5)]
}
# nextto/almost copy special cases
testpolys_nextto_almostsame = {"degenerate, perfect overlap":
[(0,0),(6,0),(6,6),(0,6),(0,0)],
"degenerate, partial inside overlap":
[(1,0),(6,0),(6,6),(1,6),(1,0)],
"degenerate, right next to eachother":
[(0,6),(6,6),(6,10),(0,10),(0,6)],
"degenerate, partial right next to eachother":
[(2,6),(6,6),(6,10),(2,10),(2,6)]
}
#run operation
import os
import time
import pydraw
DEBUG = False
# test geo
## def test_draw(testname, subjpoly, clippoly, mode):
## t = time.time()
## #print testname, mode
## resultpolys = clip_polygon(subjpoly,clippoly,mode)
## print "finished:",len(resultpolys),time.time()-t
## print "start",str(resultpolys)[:100]
## print "end",str(resultpolys)[-100:]
## crs = pydraw.CoordinateSystem([0,80,45,50])
## img = pydraw.Image(300,300, crs=crs)
## img.drawpolygon(subjpoly, fillcolor=(222,0,0,111))
## img.drawpolygon(clippoly, fillcolor=(0,222,0,111))
## for ext,holes in resultpolys:
## img.drawpolygon(ext,holes)
## img.drawgridticks(10,10)
## img.save("test_output/"+testname+"-"+mode+".png")
##
## import pygeoj
## world = pygeoj.load("cshapes.geo.json")
## norw = next(cntr.geometry.coordinates[0][0] for cntr in world if cntr.properties["CNTRY_NAME"] == "Norway")
## swed = next(cntr.geometry.coordinates[0][0] for cntr in world if cntr.properties["CNTRY_NAME"] == "Sweden")
## test_draw("norway-sweden", norw, swed, "difference")
##
## breakonpurpose
# test basics
def test_draw(testname, subjpoly, clippoly, mode):
t = time.time()
#print testname, mode
resultpolys = clip_polygon(subjpoly,clippoly,mode)
#print "finished:",resultpolys,time.time()-t
crs = pydraw.CoordinateSystem([-1,-1,11,11])
img = pydraw.Image(300,300, crs=crs)
img.drawpolygon(subjpoly, fillcolor=(222,0,0))
img.drawpolygon(clippoly, fillcolor=(0,222,0))
for ext,holes in resultpolys:
img.drawpolygon(ext,holes)
img.drawgridticks(1,1)
img.save("test_output/"+testname+"-"+mode+".png")
if not os.path.lexists("test_output"): os.mkdir("test_output")
for testname,testclip in testpolys_normal.items():
print testname
for mode in ("intersect","union","difference"):
print mode
test_draw(testname, subjpoly, testclip, mode)
for testname,testclip in testpolys_degens.items():
print testname
for mode in ("intersect","union","difference"):
print mode
test_draw(testname, subjpoly, testclip, mode)
for testname,testclip in testpolys_nextto_almostsame.items():
print testname
for mode in ("intersect","union","difference"):
print mode
test_draw(testname, subjpoly, testclip, mode)
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.