repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
lauromoraes/redes
|
MyTCPRequestHandler.py
|
1
|
1443
|
import logging
import socket
import threading
import SocketServer
import time
from recvall import *
from calc import *
logging.basicConfig( level = logging.DEBUG, format = "%(name)s: %(message)s", )
class MyTCPRequestHandler(SocketServer.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.logger = logging.getLogger('MyTCPRequestHandler')
self.logger.debug('__init__')
SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
return
def setup(self):
self.logger.debug('setup')
return SocketServer.BaseRequestHandler.setup(self)
def handle(self):
self.logger.debug('handle')
data = recvall(self.request, 2)
#print(self.request.accept()[1])
#current_thread = threading.currentThread()
#resp = "%s, %s" % (current_thread.getName(), data)
#self.logger.debug('Thread: %s | recv()->"%s"', current_thread.getName(), data)
#self.logger.debug('Threads: %s' % str( [ t.getName() for t in threading.enumerate()] ) )
resp = calc(data)
sent = 0
size = 1024*5
while(sent < len(resp)):
if(sent+size <= len(resp)):
sent += self.request.send(resp[sent:sent+size])
else:
sent += self.request.send(resp[sent:])
time.sleep(0.1)
#self.request.sendall("data")
self.request.shutdown(socket.SHUT_WR)
self.request.close()
#time.sleep(3)
return
def finish(self):
self.logger.debug('finish')
return SocketServer.BaseRequestHandler.finish(self)
|
gpl-2.0
|
Axam/nsx-web
|
nailgun/nailgun/test/unit/test_node_nic_handler.py
|
2
|
15780
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.openstack.common import jsonutils
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import reverse
class TestHandlers(BaseIntegrationTest):
def test_get_handler_with_wrong_nodeid(self):
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': 1}),
expect_errors=True,
headers=self.default_headers)
self.assertEqual(resp.status_code, 404)
def test_get_handler_with_invalid_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_list = [
{'interfaces': None},
{'interfaces': {}}
]
for nic_meta in meta_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 400)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertEqual(response, [])
def test_get_handler_with_incompleted_iface_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_clean_list = [
{'interfaces': [{'name': '', 'mac': '00:00:00'}]},
{'interfaces': [{'name': 'eth0', 'mac': ''}]},
{'interfaces': [{'mac': '00:00:00'}]},
{'interfaces': [{'name': 'eth0'}]}
]
for nic_meta in meta_clean_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers
)
ifaces = jsonutils.loads(resp.body)
self.assertEqual(ifaces, [])
def test_get_handler_with_invalid_speed_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_clean_list = [
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'max_speed': -100}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'current_speed': -100}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'current_speed': '100'}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'max_speed': 10.0}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'max_speed': '100'}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00',
'current_speed': 10.0}]}
]
for nic_meta in meta_clean_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeHandler', kwargs={'obj_id': node['id']}),
headers=self.default_headers
)
ifaces = jsonutils.loads(resp.body)['meta']['interfaces']
self.assertEqual(
ifaces,
[
{'name': 'eth0', 'mac': '00:00:00',
'max_speed': None, 'current_speed': None}
]
)
def test_get_handler_without_NICs(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertEqual(response, [])
def test_get_handler_with_NICs(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': self.env.generate_random_mac(),
'current_speed': 1, 'max_speed': 1},
{'name': 'eth1', 'mac': self.env.generate_random_mac(),
'current_speed': 1, 'max_speed': 1}])
self.env.create_node(api=True, meta=meta)
node_db = self.env.nodes[0]
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node_db.id}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertItemsEqual(
map(lambda i: i['id'], response),
map(lambda i: i.id, node_db.interfaces)
)
for nic in meta['interfaces']:
filtered_nics = filter(
lambda i: i['mac'] == nic['mac'],
response
)
resp_nic = filtered_nics[0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'], nic['current_speed'])
self.assertEqual(resp_nic['max_speed'], nic['max_speed'])
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
def test_NIC_updates_by_agent(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': '12345', 'current_speed': 1,
'state': 'up'}])
node = self.env.create_node(api=True, meta=meta)
new_meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(new_meta, [
{'name': 'new_nic', 'mac': '12345', 'current_speed': 10,
'max_speed': 10, 'state': 'down'}])
node_data = {'mac': node['mac'], 'meta': new_meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertEqual(len(response), 1)
resp_nic = response[0]
nic = new_meta['interfaces'][0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'], nic['current_speed'])
self.assertEqual(resp_nic['max_speed'], nic['max_speed'])
self.assertEqual(resp_nic['state'], nic['state'])
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
def test_NIC_adds_by_agent(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': '12345', 'current_speed': 1,
'state': 'up'}])
node = self.env.create_node(api=True, meta=meta)
meta['interfaces'].append({'name': 'new_nic', 'mac': '643'})
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertEqual(len(response), len(meta['interfaces']))
for nic in meta['interfaces']:
filtered_nics = filter(
lambda i: i['mac'] == nic['mac'],
response
)
resp_nic = filtered_nics[0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'],
nic.get('current_speed'))
self.assertEqual(resp_nic['max_speed'], nic.get('max_speed'))
self.assertEqual(resp_nic['state'], nic.get('state'))
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
def test_ignore_NIC_id_in_meta(self):
fake_id = 'some_data'
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'id': fake_id, 'name': 'eth0', 'mac': '12345'}])
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertNotEquals(response[0]['id'], fake_id)
def test_mac_address_should_be_in_lower_case(self):
meta = self.env.default_metadata()
new_mac = 'AA:BB:CC:DD:11:22'
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': new_mac}])
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
response = jsonutils.loads(resp.body)
self.assertNotEquals(response[0]['mac'], new_mac.lower())
def test_remove_assigned_interface(self):
def get_nodes():
resp = self.app.get(
reverse('NodeCollectionHandler',
kwargs={'cluster_id': self.env.clusters[0].id}),
headers=self.default_headers,
)
return jsonutils.loads(resp.body)
self.env.create(nodes_kwargs=[{'api': True}])
# check all possible handlers
for handler in ('NodeAgentHandler',
'NodeHandler',
'NodeCollectionHandler'):
# create node and check it availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
# remove all interfaces except admin one
adm_eth = self.env.network_manager._get_interface_by_network_name(
nodes_data[0]['id'], 'fuelweb_admin')
ifaces = list(nodes_data[0]['meta']['interfaces'])
nodes_data[0]['meta']['interfaces'] = \
[i for i in ifaces if i['name'] == adm_eth.name]
# prepare put request
data = {
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}
if handler in ('NodeCollectionHandler', ):
data = [data]
if handler in ('NodeHandler', ):
endpoint = reverse(handler, kwargs={'obj_id': data['id']})
else:
endpoint = reverse(handler)
self.app.put(
endpoint,
jsonutils.dumps(data),
headers=self.default_headers,
)
# check the node is visible for api
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
self.assertEqual(len(nodes_data[0]['meta']['interfaces']), 1)
# restore removed interfaces
nodes_data[0]['meta']['interfaces'] = ifaces
self.app.put(
reverse(
'NodeAgentHandler',
),
jsonutils.dumps({
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}),
headers=self.default_headers,
)
# check node availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
self.assertItemsEqual(nodes_data[0]['meta']['interfaces'], ifaces)
def test_change_mac_of_assigned_nics(self):
def get_nodes():
resp = self.app.get(
reverse('NodeCollectionHandler',
kwargs={'cluster_id': self.env.clusters[0].id}),
headers=self.default_headers,
)
return jsonutils.loads(resp.body)
meta = self.env.default_metadata()
meta["interfaces"] = [
{'name': 'eth0', 'mac': self.env.generate_random_mac()},
{'name': 'eth1', 'mac': self.env.generate_random_mac()},
{'name': 'eth2', 'mac': self.env.generate_random_mac()},
{'name': 'eth3', 'mac': self.env.generate_random_mac()},
{'name': 'eth4', 'mac': self.env.generate_random_mac()},
]
self.env.create(nodes_kwargs=[{'api': True, 'meta': meta}])
# check all possible handlers
for handler in ('NodeAgentHandler',
'NodeHandler',
'NodeCollectionHandler'):
# create node and check it availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
# change mac address of interfaces except admin one
adm_eth = self.env.network_manager._get_interface_by_network_name(
nodes_data[0]['id'], 'fuelweb_admin')
for iface in nodes_data[0]['meta']['interfaces']:
if iface['name'] != adm_eth.name:
iface['mac'] = self.env.generate_random_mac()
# prepare put request
data = {
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}
if handler in ('NodeCollectionHandler', ):
data = [data]
if handler in ('NodeHandler', ):
endpoint = reverse(handler, kwargs={'obj_id': data['id']})
else:
endpoint = reverse(handler)
self.app.put(
endpoint,
jsonutils.dumps(data),
headers=self.default_headers,
)
# check the node is visible for api
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
|
apache-2.0
|
amenonsen/ansible
|
test/units/modules/network/junos/test_junos_ping.py
|
48
|
4884
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch, MagicMock
from ansible.modules.network.junos import junos_ping
from units.modules.utils import set_module_args
from .junos_module import TestJunosModule, load_fixture
class TestJunosPingModule(TestJunosModule):
module = junos_ping
def setUp(self):
super(TestJunosPingModule, self).setUp()
self.mock_get_connection = patch('ansible.modules.network.junos.junos_ping.get_connection')
self.get_connection = self.mock_get_connection.start()
self.conn = self.get_connection()
self.conn.get = MagicMock()
def tearDown(self):
super(TestJunosPingModule, self).tearDown()
self.mock_get_connection.stop()
def test_junos_ping_expected_success(self):
set_module_args(dict(count=2, dest="10.10.10.10"))
self.conn.get = MagicMock(return_value=load_fixture('junos_ping_ping_10.10.10.10_count_2', content='str'))
result = self.execute_module()
self.assertEqual(result['commands'], 'ping 10.10.10.10 count 2')
def test_junos_ping_expected_failure(self):
set_module_args(dict(count=4, dest="10.10.10.20", state="absent"))
self.conn.get = MagicMock(return_value=load_fixture('junos_ping_ping_10.10.10.20_count_4', content='str'))
result = self.execute_module()
self.assertEqual(result['commands'], 'ping 10.10.10.20 count 4')
def test_junos_ping_unexpected_success(self):
''' Test for successful pings when destination should not be reachable - FAIL. '''
set_module_args(dict(count=2, dest="10.10.10.10", state="absent"))
self.conn.get = MagicMock(return_value=load_fixture('junos_ping_ping_10.10.10.10_count_2', content='str'))
self.execute_module(failed=True)
def test_junos_ping_unexpected_failure(self):
''' Test for unsuccessful pings when destination should be reachable - FAIL. '''
set_module_args(dict(count=4, dest="10.10.10.20"))
self.conn.get = MagicMock(return_value=load_fixture('junos_ping_ping_10.10.10.20_count_4', content='str'))
self.execute_module(failed=True)
def test_junos_ping_failure_stats(self):
'''Test for asserting stats when ping fails'''
set_module_args(dict(count=4, dest="10.10.10.20"))
self.conn.get = MagicMock(return_value=load_fixture('junos_ping_ping_10.10.10.20_count_4', content='str'))
result = self.execute_module(failed=True)
self.assertEqual(result['packet_loss'], '100%')
self.assertEqual(result['packets_rx'], 0)
self.assertEqual(result['packets_tx'], 4)
def test_junos_ping_success_stats(self):
set_module_args(dict(count=2, dest="10.10.10.10"))
self.conn.get = MagicMock(return_value=load_fixture('junos_ping_ping_10.10.10.10_count_2', content='str'))
result = self.execute_module()
self.assertEqual(result['commands'], 'ping 10.10.10.10 count 2')
self.assertEqual(result['packet_loss'], '0%')
self.assertEqual(result['packets_rx'], 2)
self.assertEqual(result['packets_tx'], 2)
self.assertEqual(result['rtt']['min'], 15.71)
self.assertEqual(result['rtt']['avg'], 16.87)
self.assertEqual(result['rtt']['max'], 18.04)
self.assertEqual(result['rtt']['stddev'], 1.165)
def test_junos_ping_success_stats_with_options(self):
set_module_args(dict(count=5, size=512, interval=2, dest="10.10.10.11"))
self.conn.get = MagicMock(return_value=load_fixture('junos_ping_ping_10.10.10.11_count_5_size_512_interval_2', content='str'))
result = self.execute_module()
self.assertEqual(result['commands'], 'ping 10.10.10.11 count 5 size 512 interval 2')
self.assertEqual(result['packet_loss'], '0%')
self.assertEqual(result['packets_rx'], 5)
self.assertEqual(result['packets_tx'], 5)
self.assertEqual(result['rtt']['min'], 18.71)
self.assertEqual(result['rtt']['avg'], 17.87)
self.assertEqual(result['rtt']['max'], 20.04)
self.assertEqual(result['rtt']['stddev'], 2.165)
|
gpl-3.0
|
drawks/ansible
|
lib/ansible/modules/remote_management/redfish/idrac_redfish_facts.py
|
10
|
4810
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Dell EMC Inc.
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: idrac_redfish_facts
version_added: "2.8"
short_description: Manages servers through iDRAC using Dell Redfish APIs
description:
- Builds Redfish URIs locally and sends them to remote iDRAC controllers to
get information back.
- For use with Dell iDRAC operations that require Redfish OEM extensions
options:
category:
required: true
description:
- Category to execute on iDRAC controller
command:
required: true
description:
- List of commands to execute on iDRAC controller
baseuri:
required: true
description:
- Base URI of iDRAC controller
username:
required: true
description:
- User for authentication with iDRAC controller
password:
required: true
description:
- Password for authentication with iDRAC controller
timeout:
description:
- Timeout in seconds for URL requests to OOB controller
default: 10
type: int
author: "Jose Delarosa (@jose-delarosa)"
'''
EXAMPLES = '''
- name: Get Manager attributes with a default of 20 seconds
idrac_redfish_command:
category: Manager
command: GetManagerAttributes
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
timeout: 20
'''
RETURN = '''
msg:
description: different results depending on task
returned: always
type: dict
sample: List of Manager attributes
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.redfish_utils import RedfishUtils
from ansible.module_utils._text import to_native
class IdracRedfishUtils(RedfishUtils):
def get_manager_attributes(self):
result = {}
manager_attributes = {}
key = "Attributes"
response = self.get_request(self.root_uri + self.manager_uri + "/" + key)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
if key not in data:
return {'ret': False, 'msg': "Key %s not found" % key}
for attribute in data[key].items():
manager_attributes[attribute[0]] = attribute[1]
result["entries"] = manager_attributes
return result
CATEGORY_COMMANDS_ALL = {
"Manager": ["GetManagerAttributes"]
}
def main():
result = {}
module = AnsibleModule(
argument_spec=dict(
category=dict(required=True),
command=dict(required=True, type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
timeout=dict(type='int', default=10)
),
supports_check_mode=False
)
category = module.params['category']
command_list = module.params['command']
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_uri = "/redfish/v1/"
rf_utils = IdracRedfishUtils(creds, root_uri, timeout)
# Check that Category is valid
if category not in CATEGORY_COMMANDS_ALL:
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
# Check that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
# Organize by Categories / Commands
if category == "Manager":
# execute only if we find a Manager resource
result = rf_utils._find_managers_resource(rf_uri)
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
for command in command_list:
if command == "GetManagerAttributes":
result = rf_utils.get_manager_attributes()
# Return data back or fail with proper message
if result['ret'] is True:
del result['ret']
module.exit_json(ansible_facts=dict(redfish_facts=result))
else:
module.fail_json(msg=to_native(result['msg']))
if __name__ == '__main__':
main()
|
gpl-3.0
|
stuntman723/rap-analyzer
|
rap_analyzer/lib/python2.7/site-packages/django/contrib/gis/shortcuts.py
|
197
|
1129
|
import zipfile
from io import BytesIO
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
def compress_kml(kml):
"Returns compressed KMZ from the given KML string."
kmz = BytesIO()
zf = zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED)
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
zf.close()
kmz.seek(0)
return kmz.read()
def render_to_kml(*args, **kwargs):
"Renders the response as KML (using the correct MIME type)."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='application/vnd.google-earth.kml+xml')
def render_to_kmz(*args, **kwargs):
"""
Compresses the KML content and returns as KMZ (using the correct
MIME type).
"""
return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)),
content_type='application/vnd.google-earth.kmz')
def render_to_text(*args, **kwargs):
"Renders the response using the MIME type for plain text."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='text/plain')
|
mit
|
gcblue/gcblue
|
bin/Lib/test/test_xml_etree.py
|
33
|
55012
|
# xml.etree test. This file contains enough tests to make sure that
# all included components work as they should.
# Large parts are extracted from the upstream test suite.
# IMPORTANT: the same doctests are run from "test_xml_etree_c" in
# order to ensure consistency between the C implementation and the
# Python implementation.
#
# For this purpose, the module-level "ET" symbol is temporarily
# monkey-patched when running the "test_xml_etree_c" test suite.
# Don't re-import "xml.etree.ElementTree" module in the docstring,
# except if the test is specific to the Python implementation.
import sys
import cgi
from test import test_support
from test.test_support import findfile
from xml.etree import ElementTree as ET
SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata")
SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata")
SAMPLE_XML = """\
<body>
<tag class='a'>text</tag>
<tag class='b' />
<section>
<tag class='b' id='inner'>subtext</tag>
</section>
</body>
"""
SAMPLE_SECTION = """\
<section>
<tag class='b' id='inner'>subtext</tag>
<nexttag />
<nextsection>
<tag />
</nextsection>
</section>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
def sanity():
"""
Import sanity.
>>> from xml.etree import ElementTree
>>> from xml.etree import ElementInclude
>>> from xml.etree import ElementPath
"""
def check_method(method):
if not hasattr(method, '__call__'):
print method, "not callable"
def serialize(elem, to_string=True, **options):
import StringIO
file = StringIO.StringIO()
tree = ET.ElementTree(elem)
tree.write(file, **options)
if to_string:
return file.getvalue()
else:
file.seek(0)
return file
def summarize(elem):
if elem.tag == ET.Comment:
return "<Comment>"
return elem.tag
def summarize_list(seq):
return [summarize(elem) for elem in seq]
def normalize_crlf(tree):
for elem in tree.iter():
if elem.text:
elem.text = elem.text.replace("\r\n", "\n")
if elem.tail:
elem.tail = elem.tail.replace("\r\n", "\n")
def check_string(string):
len(string)
for char in string:
if len(char) != 1:
print "expected one-character string, got %r" % char
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
if mapping["key"] != "value":
print "expected value string, got %r" % mapping["key"]
def check_element(element):
if not ET.iselement(element):
print "not an element"
if not hasattr(element, "tag"):
print "no tag member"
if not hasattr(element, "attrib"):
print "no attrib member"
if not hasattr(element, "text"):
print "no text member"
if not hasattr(element, "tail"):
print "no tail member"
check_string(element.tag)
check_mapping(element.attrib)
if element.text is not None:
check_string(element.text)
if element.tail is not None:
check_string(element.tail)
for elem in element:
check_element(elem)
# --------------------------------------------------------------------
# element tree tests
def interface():
r"""
Test element tree interface.
>>> element = ET.Element("tag")
>>> check_element(element)
>>> tree = ET.ElementTree(element)
>>> check_element(tree.getroot())
>>> element = ET.Element("t\xe4g", key="value")
>>> tree = ET.ElementTree(element)
>>> repr(element) # doctest: +ELLIPSIS
"<Element 't\\xe4g' at 0x...>"
>>> element = ET.Element("tag", key="value")
Make sure all standard element methods exist.
>>> check_method(element.append)
>>> check_method(element.extend)
>>> check_method(element.insert)
>>> check_method(element.remove)
>>> check_method(element.getchildren)
>>> check_method(element.find)
>>> check_method(element.iterfind)
>>> check_method(element.findall)
>>> check_method(element.findtext)
>>> check_method(element.clear)
>>> check_method(element.get)
>>> check_method(element.set)
>>> check_method(element.keys)
>>> check_method(element.items)
>>> check_method(element.iter)
>>> check_method(element.itertext)
>>> check_method(element.getiterator)
These methods return an iterable. See bug 6472.
>>> check_method(element.iter("tag").next)
>>> check_method(element.iterfind("tag").next)
>>> check_method(element.iterfind("*").next)
>>> check_method(tree.iter("tag").next)
>>> check_method(tree.iterfind("tag").next)
>>> check_method(tree.iterfind("*").next)
These aliases are provided:
>>> assert ET.XML == ET.fromstring
>>> assert ET.PI == ET.ProcessingInstruction
>>> assert ET.XMLParser == ET.XMLTreeBuilder
"""
def simpleops():
"""
Basic method sanity checks.
>>> elem = ET.XML("<body><tag/></body>")
>>> serialize(elem)
'<body><tag /></body>'
>>> e = ET.Element("tag2")
>>> elem.append(e)
>>> serialize(elem)
'<body><tag /><tag2 /></body>'
>>> elem.remove(e)
>>> serialize(elem)
'<body><tag /></body>'
>>> elem.insert(0, e)
>>> serialize(elem)
'<body><tag2 /><tag /></body>'
>>> elem.remove(e)
>>> elem.extend([e])
>>> serialize(elem)
'<body><tag /><tag2 /></body>'
>>> elem.remove(e)
>>> element = ET.Element("tag", key="value")
>>> serialize(element) # 1
'<tag key="value" />'
>>> subelement = ET.Element("subtag")
>>> element.append(subelement)
>>> serialize(element) # 2
'<tag key="value"><subtag /></tag>'
>>> element.insert(0, subelement)
>>> serialize(element) # 3
'<tag key="value"><subtag /><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(element) # 4
'<tag key="value"><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(element) # 5
'<tag key="value" />'
>>> element.remove(subelement)
Traceback (most recent call last):
ValueError: list.remove(x): x not in list
>>> serialize(element) # 6
'<tag key="value" />'
>>> element[0:0] = [subelement, subelement, subelement]
>>> serialize(element[1])
'<subtag />'
>>> element[1:9] == [element[1], element[2]]
True
>>> element[:9:2] == [element[0], element[2]]
True
>>> del element[1:2]
>>> serialize(element)
'<tag key="value"><subtag /><subtag /></tag>'
"""
def cdata():
"""
Test CDATA handling (etc).
>>> serialize(ET.XML("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(ET.XML("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(ET.XML("<tag><![CDATA[hello]]></tag>"))
'<tag>hello</tag>'
"""
# Only with Python implementation
def simplefind():
"""
Test find methods using the elementpath fallback.
>>> from xml.etree import ElementTree
>>> CurrentElementPath = ElementTree.ElementPath
>>> ElementTree.ElementPath = ElementTree._SimpleElementPath()
>>> elem = ElementTree.XML(SAMPLE_XML)
>>> elem.find("tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ElementTree.ElementTree(elem).findtext("tag")
'text'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
Path syntax doesn't work in this case.
>>> elem.find("section/tag")
>>> elem.findtext("section/tag")
>>> summarize_list(elem.findall("section/tag"))
[]
>>> ElementTree.ElementPath = CurrentElementPath
"""
def find():
"""
Test find methods (including xpath syntax).
>>> elem = ET.XML(SAMPLE_XML)
>>> elem.find("tag").tag
'tag'
>>> ET.ElementTree(elem).find("tag").tag
'tag'
>>> elem.find("section/tag").tag
'tag'
>>> elem.find("./tag").tag
'tag'
>>> ET.ElementTree(elem).find("./tag").tag
'tag'
>>> ET.ElementTree(elem).find("/tag").tag
'tag'
>>> elem[2] = ET.XML(SAMPLE_SECTION)
>>> elem.find("section/nexttag").tag
'nexttag'
>>> ET.ElementTree(elem).find("section/tag").tag
'tag'
>>> ET.ElementTree(elem).find("tog")
>>> ET.ElementTree(elem).find("tog/foo")
>>> elem.findtext("tag")
'text'
>>> elem.findtext("section/nexttag")
''
>>> elem.findtext("section/nexttag", "default")
''
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ET.ElementTree(elem).findtext("tag")
'text'
>>> ET.ElementTree(elem).findtext("tog/foo")
>>> ET.ElementTree(elem).findtext("tog/foo", "default")
'default'
>>> ET.ElementTree(elem).findtext("./tag")
'text'
>>> ET.ElementTree(elem).findtext("/tag")
'text'
>>> elem.findtext("section/tag")
'subtext'
>>> ET.ElementTree(elem).findtext("section/tag")
'subtext'
>>> summarize_list(elem.findall("."))
['body']
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("tog"))
[]
>>> summarize_list(elem.findall("tog/foo"))
[]
>>> summarize_list(elem.findall("*"))
['tag', 'tag', 'section']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag', 'tag']
>>> summarize_list(elem.findall("section/tag"))
['tag']
>>> summarize_list(elem.findall("section//tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("section/*"))
['tag', 'nexttag', 'nextsection']
>>> summarize_list(elem.findall("section//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("section/.//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("*/*"))
['tag', 'nexttag', 'nextsection']
>>> summarize_list(elem.findall("*//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("*/tag"))
['tag']
>>> summarize_list(elem.findall("*/./tag"))
['tag']
>>> summarize_list(elem.findall("./tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag', 'tag']
>>> summarize_list(elem.findall("././tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@class]"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@class='a']"))
['tag']
>>> summarize_list(elem.findall(".//tag[@class='b']"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@id]"))
['tag']
>>> summarize_list(elem.findall(".//section[tag]"))
['section']
>>> summarize_list(elem.findall(".//section[element]"))
[]
>>> summarize_list(elem.findall("../tag"))
[]
>>> summarize_list(elem.findall("section/../tag"))
['tag', 'tag']
>>> summarize_list(ET.ElementTree(elem).findall("./tag"))
['tag', 'tag']
Following example is invalid in 1.2.
A leading '*' is assumed in 1.3.
>>> elem.findall("section//") == elem.findall("section//*")
True
ET's Path module handles this case incorrectly; this gives
a warning in 1.3, and the behaviour will be modified in 1.4.
>>> summarize_list(ET.ElementTree(elem).findall("/tag"))
['tag', 'tag']
>>> elem = ET.XML(SAMPLE_XML_NS)
>>> summarize_list(elem.findall("tag"))
[]
>>> summarize_list(elem.findall("{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
>>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
"""
def file_init():
"""
>>> import StringIO
>>> stringfile = StringIO.StringIO(SAMPLE_XML)
>>> tree = ET.ElementTree(file=stringfile)
>>> tree.find("tag").tag
'tag'
>>> tree.find("section/tag").tag
'tag'
>>> tree = ET.ElementTree(file=SIMPLE_XMLFILE)
>>> tree.find("element").tag
'element'
>>> tree.find("element/../empty-element").tag
'empty-element'
"""
def bad_find():
"""
Check bad or unsupported path expressions.
>>> elem = ET.XML(SAMPLE_XML)
>>> elem.findall("/tag")
Traceback (most recent call last):
SyntaxError: cannot use absolute path on element
"""
def path_cache():
"""
Check that the path cache behaves sanely.
>>> elem = ET.XML(SAMPLE_XML)
>>> for i in range(10): ET.ElementTree(elem).find('./'+str(i))
>>> cache_len_10 = len(ET.ElementPath._cache)
>>> for i in range(10): ET.ElementTree(elem).find('./'+str(i))
>>> len(ET.ElementPath._cache) == cache_len_10
True
>>> for i in range(20): ET.ElementTree(elem).find('./'+str(i))
>>> len(ET.ElementPath._cache) > cache_len_10
True
>>> for i in range(600): ET.ElementTree(elem).find('./'+str(i))
>>> len(ET.ElementPath._cache) < 500
True
"""
def copy():
"""
Test copy handling (etc).
>>> import copy
>>> e1 = ET.XML("<tag>hello<foo/></tag>")
>>> e2 = copy.copy(e1)
>>> e3 = copy.deepcopy(e1)
>>> e1.find("foo").tag = "bar"
>>> serialize(e1)
'<tag>hello<bar /></tag>'
>>> serialize(e2)
'<tag>hello<bar /></tag>'
>>> serialize(e3)
'<tag>hello<foo /></tag>'
"""
def attrib():
"""
Test attribute handling.
>>> elem = ET.Element("tag")
>>> elem.get("key") # 1.1
>>> elem.get("key", "default") # 1.2
'default'
>>> elem.set("key", "value")
>>> elem.get("key") # 1.3
'value'
>>> elem = ET.Element("tag", key="value")
>>> elem.get("key") # 2.1
'value'
>>> elem.attrib # 2.2
{'key': 'value'}
>>> attrib = {"key": "value"}
>>> elem = ET.Element("tag", attrib)
>>> attrib.clear() # check for aliasing issues
>>> elem.get("key") # 3.1
'value'
>>> elem.attrib # 3.2
{'key': 'value'}
>>> attrib = {"key": "value"}
>>> elem = ET.Element("tag", **attrib)
>>> attrib.clear() # check for aliasing issues
>>> elem.get("key") # 4.1
'value'
>>> elem.attrib # 4.2
{'key': 'value'}
>>> elem = ET.Element("tag", {"key": "other"}, key="value")
>>> elem.get("key") # 5.1
'value'
>>> elem.attrib # 5.2
{'key': 'value'}
>>> elem = ET.Element('test')
>>> elem.text = "aa"
>>> elem.set('testa', 'testval')
>>> elem.set('testb', 'test2')
>>> ET.tostring(elem)
'<test testa="testval" testb="test2">aa</test>'
>>> sorted(elem.keys())
['testa', 'testb']
>>> sorted(elem.items())
[('testa', 'testval'), ('testb', 'test2')]
>>> elem.attrib['testb']
'test2'
>>> elem.attrib['testb'] = 'test1'
>>> elem.attrib['testc'] = 'test2'
>>> ET.tostring(elem)
'<test testa="testval" testb="test1" testc="test2">aa</test>'
"""
def makeelement():
"""
Test makeelement handling.
>>> elem = ET.Element("tag")
>>> attrib = {"key": "value"}
>>> subelem = elem.makeelement("subtag", attrib)
>>> if subelem.attrib is attrib:
... print "attrib aliasing"
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem.clear()
>>> serialize(elem)
'<tag />'
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem.extend([subelem, subelem])
>>> serialize(elem)
'<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>'
>>> elem[:] = [subelem]
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem[:] = tuple([subelem])
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
"""
def parsefile():
"""
Test parsing from file.
>>> tree = ET.parse(SIMPLE_XMLFILE)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> tree = ET.parse(SIMPLE_NS_XMLFILE)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<ns0:root xmlns:ns0="namespace">
<ns0:element key="value">text</ns0:element>
<ns0:element>text</ns0:element>tail
<ns0:empty-element />
</ns0:root>
>>> with open(SIMPLE_XMLFILE) as f:
... data = f.read()
>>> parser = ET.XMLParser()
>>> parser.version # doctest: +ELLIPSIS
'Expat ...'
>>> parser.feed(data)
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> parser = ET.XMLTreeBuilder() # 1.2 compatibility
>>> parser.feed(data)
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> target = ET.TreeBuilder()
>>> parser = ET.XMLParser(target=target)
>>> parser.feed(data)
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
"""
def parseliteral():
"""
>>> element = ET.XML("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> element = ET.fromstring("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> sequence = ["<html><body>", "text</bo", "dy></html>"]
>>> element = ET.fromstringlist(sequence)
>>> print ET.tostring(element)
<html><body>text</body></html>
>>> print "".join(ET.tostringlist(element))
<html><body>text</body></html>
>>> ET.tostring(element, "ascii")
"<?xml version='1.0' encoding='ascii'?>\\n<html><body>text</body></html>"
>>> _, ids = ET.XMLID("<html><body>text</body></html>")
>>> len(ids)
0
>>> _, ids = ET.XMLID("<html><body id='body'>text</body></html>")
>>> len(ids)
1
>>> ids["body"].tag
'body'
"""
def iterparse():
"""
Test iterparse interface.
>>> iterparse = ET.iterparse
>>> context = iterparse(SIMPLE_XMLFILE)
>>> action, elem = next(context)
>>> print action, elem.tag
end element
>>> for action, elem in context:
... print action, elem.tag
end element
end empty-element
end root
>>> context.root.tag
'root'
>>> context = iterparse(SIMPLE_NS_XMLFILE)
>>> for action, elem in context:
... print action, elem.tag
end {namespace}element
end {namespace}element
end {namespace}empty-element
end {namespace}root
>>> events = ()
>>> context = iterparse(SIMPLE_XMLFILE, events)
>>> for action, elem in context:
... print action, elem.tag
>>> events = ()
>>> context = iterparse(SIMPLE_XMLFILE, events=events)
>>> for action, elem in context:
... print action, elem.tag
>>> events = ("start", "end")
>>> context = iterparse(SIMPLE_XMLFILE, events)
>>> for action, elem in context:
... print action, elem.tag
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> events = ("start", "end", "start-ns", "end-ns")
>>> context = iterparse(SIMPLE_NS_XMLFILE, events)
>>> for action, elem in context:
... if action in ("start", "end"):
... print action, elem.tag
... else:
... print action, elem
start-ns ('', 'namespace')
start {namespace}root
start {namespace}element
end {namespace}element
start {namespace}element
end {namespace}element
start {namespace}empty-element
end {namespace}empty-element
end {namespace}root
end-ns None
>>> import StringIO
>>> events = ('start-ns', 'end-ns')
>>> context = ET.iterparse(StringIO.StringIO(r"<root xmlns=''/>"), events)
>>> for action, elem in context:
... print action, elem
start-ns ('', '')
end-ns None
>>> events = ("start", "end", "bogus")
>>> with open(SIMPLE_XMLFILE, "rb") as f:
... iterparse(f, events)
Traceback (most recent call last):
ValueError: unknown event 'bogus'
>>> source = StringIO.StringIO(
... "<?xml version='1.0' encoding='iso-8859-1'?>\\n"
... "<body xmlns='http://éffbot.org/ns'\\n"
... " xmlns:cl\\xe9='http://effbot.org/ns'>text</body>\\n")
>>> events = ("start-ns",)
>>> context = iterparse(source, events)
>>> for action, elem in context:
... print action, elem
start-ns ('', u'http://\\xe9ffbot.org/ns')
start-ns (u'cl\\xe9', 'http://effbot.org/ns')
>>> source = StringIO.StringIO("<document />junk")
>>> try:
... for action, elem in iterparse(source):
... print action, elem.tag
... except ET.ParseError, v:
... print v
end document
junk after document element: line 1, column 12
"""
def writefile():
"""
>>> elem = ET.Element("tag")
>>> elem.text = "text"
>>> serialize(elem)
'<tag>text</tag>'
>>> ET.SubElement(elem, "subtag").text = "subtext"
>>> serialize(elem)
'<tag>text<subtag>subtext</subtag></tag>'
Test tag suppression
>>> elem.tag = None
>>> serialize(elem)
'text<subtag>subtext</subtag>'
>>> elem.insert(0, ET.Comment("comment"))
>>> serialize(elem) # assumes 1.3
'text<!--comment--><subtag>subtext</subtag>'
>>> elem[0] = ET.PI("key", "value")
>>> serialize(elem)
'text<?key value?><subtag>subtext</subtag>'
"""
def custom_builder():
"""
Test parser w. custom builder.
>>> with open(SIMPLE_XMLFILE) as f:
... data = f.read()
>>> class Builder:
... def start(self, tag, attrib):
... print "start", tag
... def end(self, tag):
... print "end", tag
... def data(self, text):
... pass
>>> builder = Builder()
>>> parser = ET.XMLParser(target=builder)
>>> parser.feed(data)
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> with open(SIMPLE_NS_XMLFILE) as f:
... data = f.read()
>>> class Builder:
... def start(self, tag, attrib):
... print "start", tag
... def end(self, tag):
... print "end", tag
... def data(self, text):
... pass
... def pi(self, target, data):
... print "pi", target, repr(data)
... def comment(self, data):
... print "comment", repr(data)
>>> builder = Builder()
>>> parser = ET.XMLParser(target=builder)
>>> parser.feed(data)
pi pi 'data'
comment ' comment '
start {namespace}root
start {namespace}element
end {namespace}element
start {namespace}element
end {namespace}element
start {namespace}empty-element
end {namespace}empty-element
end {namespace}root
"""
def getchildren():
"""
Test Element.getchildren()
>>> with open(SIMPLE_XMLFILE, "r") as f:
... tree = ET.parse(f)
>>> for elem in tree.getroot().iter():
... summarize_list(elem.getchildren())
['element', 'element', 'empty-element']
[]
[]
[]
>>> for elem in tree.getiterator():
... summarize_list(elem.getchildren())
['element', 'element', 'empty-element']
[]
[]
[]
>>> elem = ET.XML(SAMPLE_XML)
>>> len(elem.getchildren())
3
>>> len(elem[2].getchildren())
1
>>> elem[:] == elem.getchildren()
True
>>> child1 = elem[0]
>>> child2 = elem[2]
>>> del elem[1:2]
>>> len(elem.getchildren())
2
>>> child1 == elem[0]
True
>>> child2 == elem[1]
True
>>> elem[0:2] = [child2, child1]
>>> child2 == elem[0]
True
>>> child1 == elem[1]
True
>>> child1 == elem[0]
False
>>> elem.clear()
>>> elem.getchildren()
[]
"""
def writestring():
"""
>>> elem = ET.XML("<html><body>text</body></html>")
>>> ET.tostring(elem)
'<html><body>text</body></html>'
>>> elem = ET.fromstring("<html><body>text</body></html>")
>>> ET.tostring(elem)
'<html><body>text</body></html>'
"""
def check_encoding(encoding):
"""
>>> check_encoding("ascii")
>>> check_encoding("us-ascii")
>>> check_encoding("iso-8859-1")
>>> check_encoding("iso-8859-15")
>>> check_encoding("cp437")
>>> check_encoding("mac-roman")
>>> check_encoding("gbk")
Traceback (most recent call last):
ValueError: multi-byte encodings are not supported
>>> check_encoding("cp037")
Traceback (most recent call last):
ParseError: unknown encoding: line 1, column 30
"""
ET.XML("<?xml version='1.0' encoding='%s'?><xml />" % encoding)
def encoding():
r"""
Test encoding issues.
>>> elem = ET.Element("tag")
>>> elem.text = u"abc"
>>> serialize(elem)
'<tag>abc</tag>'
>>> serialize(elem, encoding="utf-8")
'<tag>abc</tag>'
>>> serialize(elem, encoding="us-ascii")
'<tag>abc</tag>'
>>> serialize(elem, encoding="iso-8859-1")
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>"
>>> elem.text = "<&\"\'>"
>>> serialize(elem)
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="utf-8")
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="us-ascii") # cdata characters
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag><&"\'></tag>'
>>> elem.attrib["key"] = "<&\"\'>"
>>> elem.text = None
>>> serialize(elem)
'<tag key="<&"\'>" />'
>>> serialize(elem, encoding="utf-8")
'<tag key="<&"\'>" />'
>>> serialize(elem, encoding="us-ascii")
'<tag key="<&"\'>" />'
>>> serialize(elem, encoding="iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="<&"\'>" />'
>>> elem.text = u'\xe5\xf6\xf6<>'
>>> elem.attrib.clear()
>>> serialize(elem)
'<tag>åöö<></tag>'
>>> serialize(elem, encoding="utf-8")
'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>'
>>> serialize(elem, encoding="us-ascii")
'<tag>åöö<></tag>'
>>> serialize(elem, encoding="iso-8859-1")
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6<></tag>"
>>> elem.attrib["key"] = u'\xe5\xf6\xf6<>'
>>> elem.text = None
>>> serialize(elem)
'<tag key="åöö<>" />'
>>> serialize(elem, encoding="utf-8")
'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>" />'
>>> serialize(elem, encoding="us-ascii")
'<tag key="åöö<>" />'
>>> serialize(elem, encoding="iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6<>" />'
"""
def methods():
r"""
Test serialization methods.
>>> e = ET.XML("<html><link/><script>1 < 2</script></html>")
>>> e.tail = "\n"
>>> serialize(e)
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method=None)
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method="xml")
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method="html")
'<html><link><script>1 < 2</script></html>\n'
>>> serialize(e, method="text")
'1 < 2\n'
"""
def iterators():
"""
Test iterators.
>>> e = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>")
>>> summarize_list(e.iter())
['html', 'body', 'i']
>>> summarize_list(e.find("body").iter())
['body', 'i']
>>> summarize(next(e.iter()))
'html'
>>> "".join(e.itertext())
'this is a paragraph...'
>>> "".join(e.find("body").itertext())
'this is a paragraph.'
>>> next(e.itertext())
'this is a '
Method iterparse should return an iterator. See bug 6472.
>>> sourcefile = serialize(e, to_string=False)
>>> next(ET.iterparse(sourcefile)) # doctest: +ELLIPSIS
('end', <Element 'i' at 0x...>)
>>> tree = ET.ElementTree(None)
>>> tree.iter()
Traceback (most recent call last):
AttributeError: 'NoneType' object has no attribute 'iter'
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
def entity():
"""
Test entity handling.
1) good entities
>>> e = ET.XML("<document title='舰'>test</document>")
>>> serialize(e)
'<document title="舰">test</document>'
2) bad entities
>>> ET.XML("<document>&entity;</document>")
Traceback (most recent call last):
ParseError: undefined entity: line 1, column 10
>>> ET.XML(ENTITY_XML)
Traceback (most recent call last):
ParseError: undefined entity &entity;: line 5, column 10
3) custom entity
>>> parser = ET.XMLParser()
>>> parser.entity["entity"] = "text"
>>> parser.feed(ENTITY_XML)
>>> root = parser.close()
>>> serialize(root)
'<document>text</document>'
"""
def error(xml):
"""
Test error handling.
>>> issubclass(ET.ParseError, SyntaxError)
True
>>> error("foo").position
(1, 0)
>>> error("<tag>&foo;</tag>").position
(1, 5)
>>> error("foobar<").position
(1, 6)
"""
try:
ET.XML(xml)
except ET.ParseError:
return sys.exc_value
def namespace():
"""
Test namespace issues.
1) xml namespace
>>> elem = ET.XML("<tag xml:lang='en' />")
>>> serialize(elem) # 1.1
'<tag xml:lang="en" />'
2) other "well-known" namespaces
>>> elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
>>> serialize(elem) # 2.1
'<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />'
>>> elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
>>> serialize(elem) # 2.2
'<html:html xmlns:html="http://www.w3.org/1999/xhtml" />'
>>> elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
>>> serialize(elem) # 2.3
'<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />'
3) unknown namespaces
>>> elem = ET.XML(SAMPLE_XML_NS)
>>> print serialize(elem)
<ns0:body xmlns:ns0="http://effbot.org/ns">
<ns0:tag>text</ns0:tag>
<ns0:tag />
<ns0:section>
<ns0:tag>subtext</ns0:tag>
</ns0:section>
</ns0:body>
"""
def qname():
"""
Test QName handling.
1) decorated tags
>>> elem = ET.Element("{uri}tag")
>>> serialize(elem) # 1.1
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("{uri}tag"))
>>> serialize(elem) # 1.2
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("uri", "tag"))
>>> serialize(elem) # 1.3
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("uri", "tag"))
>>> subelem = ET.SubElement(elem, ET.QName("uri", "tag1"))
>>> subelem = ET.SubElement(elem, ET.QName("uri", "tag2"))
>>> serialize(elem) # 1.4
'<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>'
2) decorated attributes
>>> elem.clear()
>>> elem.attrib["{uri}key"] = "value"
>>> serialize(elem) # 2.1
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />'
>>> elem.clear()
>>> elem.attrib[ET.QName("{uri}key")] = "value"
>>> serialize(elem) # 2.2
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />'
3) decorated values are not converted by default, but the
QName wrapper can be used for values
>>> elem.clear()
>>> elem.attrib["{uri}key"] = "{uri}value"
>>> serialize(elem) # 3.1
'<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />'
>>> elem.clear()
>>> elem.attrib["{uri}key"] = ET.QName("{uri}value")
>>> serialize(elem) # 3.2
'<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />'
>>> elem.clear()
>>> subelem = ET.Element("tag")
>>> subelem.attrib["{uri1}key"] = ET.QName("{uri2}value")
>>> elem.append(subelem)
>>> elem.append(subelem)
>>> serialize(elem) # 3.3
'<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2"><tag ns1:key="ns2:value" /><tag ns1:key="ns2:value" /></ns0:tag>'
4) Direct QName tests
>>> str(ET.QName('ns', 'tag'))
'{ns}tag'
>>> str(ET.QName('{ns}tag'))
'{ns}tag'
>>> q1 = ET.QName('ns', 'tag')
>>> q2 = ET.QName('ns', 'tag')
>>> q1 == q2
True
>>> q2 = ET.QName('ns', 'other-tag')
>>> q1 == q2
False
>>> q1 == 'ns:tag'
False
>>> q1 == '{ns}tag'
True
"""
def doctype_public():
"""
Test PUBLIC doctype.
>>> elem = ET.XML('<!DOCTYPE html PUBLIC'
... ' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
... ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
... '<html>text</html>')
"""
def xpath_tokenizer(p):
"""
Test the XPath tokenizer.
>>> # tests from the xml specification
>>> xpath_tokenizer("*")
['*']
>>> xpath_tokenizer("text()")
['text', '()']
>>> xpath_tokenizer("@name")
['@', 'name']
>>> xpath_tokenizer("@*")
['@', '*']
>>> xpath_tokenizer("para[1]")
['para', '[', '1', ']']
>>> xpath_tokenizer("para[last()]")
['para', '[', 'last', '()', ']']
>>> xpath_tokenizer("*/para")
['*', '/', 'para']
>>> xpath_tokenizer("/doc/chapter[5]/section[2]")
['/', 'doc', '/', 'chapter', '[', '5', ']', '/', 'section', '[', '2', ']']
>>> xpath_tokenizer("chapter//para")
['chapter', '//', 'para']
>>> xpath_tokenizer("//para")
['//', 'para']
>>> xpath_tokenizer("//olist/item")
['//', 'olist', '/', 'item']
>>> xpath_tokenizer(".")
['.']
>>> xpath_tokenizer(".//para")
['.', '//', 'para']
>>> xpath_tokenizer("..")
['..']
>>> xpath_tokenizer("../@lang")
['..', '/', '@', 'lang']
>>> xpath_tokenizer("chapter[title]")
['chapter', '[', 'title', ']']
>>> xpath_tokenizer("employee[@secretary and @assistant]")
['employee', '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']']
>>> # additional tests
>>> xpath_tokenizer("{http://spam}egg")
['{http://spam}egg']
>>> xpath_tokenizer("./spam.egg")
['.', '/', 'spam.egg']
>>> xpath_tokenizer(".//{http://spam}egg")
['.', '//', '{http://spam}egg']
"""
from xml.etree import ElementPath
out = []
for op, tag in ElementPath.xpath_tokenizer(p):
out.append(op or tag)
return out
def processinginstruction():
"""
Test ProcessingInstruction directly
>>> ET.tostring(ET.ProcessingInstruction('test', 'instruction'))
'<?test instruction?>'
>>> ET.tostring(ET.PI('test', 'instruction'))
'<?test instruction?>'
Issue #2746
>>> ET.tostring(ET.PI('test', '<testing&>'))
'<?test <testing&>?>'
>>> ET.tostring(ET.PI('test', u'<testing&>\xe3'), 'latin1')
"<?xml version='1.0' encoding='latin1'?>\\n<?test <testing&>\\xe3?>"
"""
#
# xinclude tests (samples from appendix C of the xinclude specification)
XINCLUDE = {}
XINCLUDE["C1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml"/>
</document>
"""
XINCLUDE["disclaimer.xml"] = """\
<?xml version='1.0'?>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
"""
XINCLUDE["C2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been accessed
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["count.txt"] = "324387"
XINCLUDE["C2b.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been <em>accessed</em>
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["C3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source of the "data.xml" resource:</p>
<example><xi:include href="data.xml" parse="text"/></example>
</document>
"""
XINCLUDE["data.xml"] = """\
<?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
"""
XINCLUDE["C5.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="example.txt" parse="text">
<xi:fallback>
<xi:include href="fallback-example.txt" parse="text">
<xi:fallback><a href="mailto:bob@example.org">Report error</a></xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</div>
"""
XINCLUDE["default.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>Example.</p>
<xi:include href="{}"/>
</document>
""".format(cgi.escape(SIMPLE_XMLFILE, True))
def xinclude_loader(href, parse="xml", encoding=None):
try:
data = XINCLUDE[href]
except KeyError:
raise IOError("resource not found")
if parse == "xml":
from xml.etree.ElementTree import XML
return XML(data)
return data
def xinclude():
r"""
Basic inclusion example (XInclude C.1)
>>> from xml.etree import ElementTree as ET
>>> from xml.etree import ElementInclude
>>> document = xinclude_loader("C1.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C1
<document>
<p>120 Mz is adequate for an average home user.</p>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
</document>
Textual inclusion example (XInclude C.2)
>>> document = xinclude_loader("C2.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C2
<document>
<p>This document has been accessed
324387 times.</p>
</document>
Textual inclusion after sibling element (based on modified XInclude C.2)
>>> document = xinclude_loader("C2b.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print(serialize(document)) # C2b
<document>
<p>This document has been <em>accessed</em>
324387 times.</p>
</document>
Textual inclusion of XML example (XInclude C.3)
>>> document = xinclude_loader("C3.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C3
<document>
<p>The following is the source of the "data.xml" resource:</p>
<example><?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
</example>
</document>
Fallback example (XInclude C.5)
Note! Fallback support is not yet implemented
>>> document = xinclude_loader("C5.xml")
>>> ElementInclude.include(document, xinclude_loader)
Traceback (most recent call last):
IOError: resource not found
>>> # print serialize(document) # C5
"""
def xinclude_default():
"""
>>> from xml.etree import ElementInclude
>>> document = xinclude_loader("default.xml")
>>> ElementInclude.include(document)
>>> print serialize(document) # default
<document>
<p>Example.</p>
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
</document>
"""
#
# badly formatted xi:include tags
XINCLUDE_BAD = {}
XINCLUDE_BAD["B1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml" parse="BAD_TYPE"/>
</document>
"""
XINCLUDE_BAD["B2.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:fallback></xi:fallback>
</div>
"""
def xinclude_failures():
r"""
Test failure to locate included XML file.
>>> from xml.etree import ElementInclude
>>> def none_loader(href, parser, encoding=None):
... return None
>>> document = ET.XML(XINCLUDE["C1.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: cannot load 'disclaimer.xml' as 'xml'
Test failure to locate included text file.
>>> document = ET.XML(XINCLUDE["C2.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: cannot load 'count.txt' as 'text'
Test bad parse type.
>>> document = ET.XML(XINCLUDE_BAD["B1.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: unknown parse type in xi:include tag ('BAD_TYPE')
Test xi:fallback outside xi:include.
>>> document = ET.XML(XINCLUDE_BAD["B2.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: xi:fallback tag must be child of xi:include ('{http://www.w3.org/2001/XInclude}fallback')
"""
# --------------------------------------------------------------------
# reported bugs
def bug_xmltoolkit21():
"""
marshaller gives obscure errors for non-string values
>>> elem = ET.Element(123)
>>> serialize(elem) # tag
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.text = 123
>>> serialize(elem) # text
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.tail = 123
>>> serialize(elem) # tail
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.set(123, "123")
>>> serialize(elem) # attribute key
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.set("123", 123)
>>> serialize(elem) # attribute value
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
"""
def bug_xmltoolkit25():
"""
typo in ElementTree.findtext
>>> elem = ET.XML(SAMPLE_XML)
>>> tree = ET.ElementTree(elem)
>>> tree.findtext("tag")
'text'
>>> tree.findtext("section/tag")
'subtext'
"""
def bug_xmltoolkit28():
"""
.//tag causes exceptions
>>> tree = ET.XML("<doc><table><tbody/></table></doc>")
>>> summarize_list(tree.findall(".//thead"))
[]
>>> summarize_list(tree.findall(".//tbody"))
['tbody']
"""
def bug_xmltoolkitX1():
"""
dump() doesn't flush the output buffer
>>> tree = ET.XML("<doc><table><tbody/></table></doc>")
>>> ET.dump(tree); sys.stdout.write("tail")
<doc><table><tbody /></table></doc>
tail
"""
def bug_xmltoolkit39():
"""
non-ascii element and attribute names doesn't work
>>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g />")
>>> ET.tostring(tree, "utf-8")
'<t\\xc3\\xa4g />'
>>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><tag \xe4ttr='välue' />")
>>> tree.attrib
{u'\\xe4ttr': u'v\\xe4lue'}
>>> ET.tostring(tree, "utf-8")
'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
>>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g>text</t\xe4g>")
>>> ET.tostring(tree, "utf-8")
'<t\\xc3\\xa4g>text</t\\xc3\\xa4g>'
>>> tree = ET.Element(u"t\u00e4g")
>>> ET.tostring(tree, "utf-8")
'<t\\xc3\\xa4g />'
>>> tree = ET.Element("tag")
>>> tree.set(u"\u00e4ttr", u"v\u00e4lue")
>>> ET.tostring(tree, "utf-8")
'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
"""
def bug_xmltoolkit54():
"""
problems handling internally defined entities
>>> e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '舰'>]><doc>&ldots;</doc>")
>>> serialize(e)
'<doc>舰</doc>'
"""
def bug_xmltoolkit55():
"""
make sure we're reporting the first error, not the last
>>> e = ET.XML("<!DOCTYPE doc SYSTEM 'doc.dtd'><doc>&ldots;&ndots;&rdots;</doc>")
Traceback (most recent call last):
ParseError: undefined entity &ldots;: line 1, column 36
"""
class ExceptionFile:
def read(self, x):
raise IOError
def xmltoolkit60():
"""
Handle crash in stream source.
>>> tree = ET.parse(ExceptionFile())
Traceback (most recent call last):
IOError
"""
XMLTOOLKIT62_DOC = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE patent-application-publication SYSTEM "pap-v15-2001-01-31.dtd" []>
<patent-application-publication>
<subdoc-abstract>
<paragraph id="A-0001" lvl="0">A new cultivar of Begonia plant named ‘BCT9801BEG’.</paragraph>
</subdoc-abstract>
</patent-application-publication>"""
def xmltoolkit62():
"""
Don't crash when using custom entities.
>>> xmltoolkit62()
u'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.'
"""
ENTITIES = {u'rsquo': u'\u2019', u'lsquo': u'\u2018'}
parser = ET.XMLTreeBuilder()
parser.entity.update(ENTITIES)
parser.feed(XMLTOOLKIT62_DOC)
t = parser.close()
return t.find('.//paragraph').text
def xmltoolkit63():
"""
Check reference leak.
>>> xmltoolkit63()
>>> count = sys.getrefcount(None)
>>> for i in range(1000):
... xmltoolkit63()
>>> sys.getrefcount(None) - count
0
"""
tree = ET.TreeBuilder()
tree.start("tag", {})
tree.data("text")
tree.end("tag")
# --------------------------------------------------------------------
def bug_200708_newline():
r"""
Preserve newlines in attributes.
>>> e = ET.Element('SomeTag', text="def _f():\n return 3\n")
>>> ET.tostring(e)
'<SomeTag text="def _f(): return 3 " />'
>>> ET.XML(ET.tostring(e)).get("text")
'def _f():\n return 3\n'
>>> ET.tostring(ET.XML(ET.tostring(e)))
'<SomeTag text="def _f(): return 3 " />'
"""
def bug_200708_close():
"""
Test default builder.
>>> parser = ET.XMLParser() # default
>>> parser.feed("<element>some text</element>")
>>> summarize(parser.close())
'element'
Test custom builder.
>>> class EchoTarget:
... def close(self):
... return ET.Element("element") # simulate root
>>> parser = ET.XMLParser(EchoTarget())
>>> parser.feed("<element>some text</element>")
>>> summarize(parser.close())
'element'
"""
def bug_200709_default_namespace():
"""
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> serialize(e, default_namespace="default") # 1
'<elem xmlns="default"><elem /></elem>'
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> s = ET.SubElement(e, "{not-default}elem")
>>> serialize(e, default_namespace="default") # 2
'<elem xmlns="default" xmlns:ns1="not-default"><elem /><ns1:elem /></elem>'
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> s = ET.SubElement(e, "elem") # unprefixed name
>>> serialize(e, default_namespace="default") # 3
Traceback (most recent call last):
ValueError: cannot use non-qualified names with default_namespace option
"""
def bug_200709_register_namespace():
"""
>>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title"))
'<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />'
>>> ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/")
>>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title"))
'<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />'
And the Dublin Core namespace is in the default list:
>>> ET.tostring(ET.Element("{http://purl.org/dc/elements/1.1/}title"))
'<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />'
"""
def bug_200709_element_comment():
"""
Not sure if this can be fixed, really (since the serializer needs
ET.Comment, not cET.comment).
>>> a = ET.Element('a')
>>> a.append(ET.Comment('foo'))
>>> a[0].tag == ET.Comment
True
>>> a = ET.Element('a')
>>> a.append(ET.PI('foo'))
>>> a[0].tag == ET.PI
True
"""
def bug_200709_element_insert():
"""
>>> a = ET.Element('a')
>>> b = ET.SubElement(a, 'b')
>>> c = ET.SubElement(a, 'c')
>>> d = ET.Element('d')
>>> a.insert(0, d)
>>> summarize_list(a)
['d', 'b', 'c']
>>> a.insert(-1, d)
>>> summarize_list(a)
['d', 'b', 'd', 'c']
"""
def bug_200709_iter_comment():
"""
>>> a = ET.Element('a')
>>> b = ET.SubElement(a, 'b')
>>> comment_b = ET.Comment("TEST-b")
>>> b.append(comment_b)
>>> summarize_list(a.iter(ET.Comment))
['<Comment>']
"""
def bug_18347():
"""
>>> e = ET.XML('<html><CamelCase>text</CamelCase></html>')
>>> serialize(e)
'<html><CamelCase>text</CamelCase></html>'
>>> serialize(e, method="html")
'<html><CamelCase>text</CamelCase></html>'
"""
# --------------------------------------------------------------------
# reported on bugs.python.org
def bug_1534630():
"""
>>> bob = ET.TreeBuilder()
>>> e = bob.data("data")
>>> e = bob.start("tag", {})
>>> e = bob.end("tag")
>>> e = bob.close()
>>> serialize(e)
'<tag />'
"""
def check_issue6233():
"""
>>> e = ET.XML("<?xml version='1.0' encoding='utf-8'?><body>t\\xc3\\xa3g</body>")
>>> ET.tostring(e, 'ascii')
"<?xml version='1.0' encoding='ascii'?>\\n<body>tãg</body>"
>>> e = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><body>t\\xe3g</body>")
>>> ET.tostring(e, 'ascii')
"<?xml version='1.0' encoding='ascii'?>\\n<body>tãg</body>"
"""
def check_issue3151():
"""
>>> e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>')
>>> e.tag
'{${stuff}}localname'
>>> t = ET.ElementTree(e)
>>> ET.tostring(e)
'<ns0:localname xmlns:ns0="${stuff}" />'
"""
def check_issue6565():
"""
>>> elem = ET.XML("<body><tag/></body>")
>>> summarize_list(elem)
['tag']
>>> newelem = ET.XML(SAMPLE_XML)
>>> elem[:] = newelem[:]
>>> summarize_list(elem)
['tag', 'tag', 'section']
"""
def check_html_empty_elems_serialization(self):
# issue 15970
# from http://www.w3.org/TR/html401/index/elements.html
"""
>>> empty_elems = ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR',
... 'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM']
>>> elems = ''.join('<%s />' % elem for elem in empty_elems)
>>> serialize(ET.XML('<html>%s</html>' % elems), method='html')
'<html><AREA><BASE><BASEFONT><BR><COL><FRAME><HR><IMG><INPUT><ISINDEX><LINK><META><PARAM></html>'
>>> serialize(ET.XML('<html>%s</html>' % elems.lower()), method='html')
'<html><area><base><basefont><br><col><frame><hr><img><input><isindex><link><meta><param></html>'
>>> elems = ''.join('<%s></%s>' % (elem, elem) for elem in empty_elems)
>>> serialize(ET.XML('<html>%s</html>' % elems), method='html')
'<html><AREA><BASE><BASEFONT><BR><COL><FRAME><HR><IMG><INPUT><ISINDEX><LINK><META><PARAM></html>'
>>> serialize(ET.XML('<html>%s</html>' % elems.lower()), method='html')
'<html><area><base><basefont><br><col><frame><hr><img><input><isindex><link><meta><param></html>'
"""
# --------------------------------------------------------------------
class CleanContext(object):
"""Provide default namespace mapping and path cache."""
checkwarnings = None
def __init__(self, quiet=False):
if sys.flags.optimize >= 2:
# under -OO, doctests cannot be run and therefore not all warnings
# will be emitted
quiet = True
deprecations = (
# Search behaviour is broken if search path starts with "/".
("This search is broken in 1.3 and earlier, and will be fixed "
"in a future version. If you rely on the current behaviour, "
"change it to '.+'", FutureWarning),
# Element.getchildren() and Element.getiterator() are deprecated.
("This method will be removed in future versions. "
"Use .+ instead.", DeprecationWarning),
("This method will be removed in future versions. "
"Use .+ instead.", PendingDeprecationWarning),
# XMLParser.doctype() is deprecated.
("This method of XMLParser is deprecated. Define doctype.. "
"method on the TreeBuilder target.", DeprecationWarning))
self.checkwarnings = test_support.check_warnings(*deprecations,
quiet=quiet)
def __enter__(self):
from xml.etree import ElementTree
self._nsmap = ElementTree._namespace_map
self._path_cache = ElementTree.ElementPath._cache
# Copy the default namespace mapping
ElementTree._namespace_map = self._nsmap.copy()
# Copy the path cache (should be empty)
ElementTree.ElementPath._cache = self._path_cache.copy()
self.checkwarnings.__enter__()
def __exit__(self, *args):
from xml.etree import ElementTree
# Restore mapping and path cache
ElementTree._namespace_map = self._nsmap
ElementTree.ElementPath._cache = self._path_cache
self.checkwarnings.__exit__(*args)
def test_main(module_name='xml.etree.ElementTree'):
from test import test_xml_etree
use_py_module = (module_name == 'xml.etree.ElementTree')
# The same doctests are used for both the Python and the C implementations
assert test_xml_etree.ET.__name__ == module_name
# XXX the C module should give the same warnings as the Python module
with CleanContext(quiet=not use_py_module):
test_support.run_doctest(test_xml_etree, verbosity=True)
# The module should not be changed by the tests
assert test_xml_etree.ET.__name__ == module_name
if __name__ == '__main__':
test_main()
|
bsd-3-clause
|
pubnative/redash
|
redash/handlers/widgets.py
|
3
|
2188
|
import json
from flask import request
from redash import models
from redash.permissions import require_permission, require_admin_or_owner, require_access, view_only
from redash.handlers.base import BaseResource
class WidgetListResource(BaseResource):
@require_permission('edit_dashboard')
def post(self):
widget_properties = request.get_json(force=True)
dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)
require_admin_or_owner(dashboard.user_id)
widget_properties['options'] = json.dumps(widget_properties['options'])
widget_properties.pop('id', None)
widget_properties['dashboard'] = dashboard
visualization_id = widget_properties.pop('visualization_id')
if visualization_id:
visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)
require_access(visualization.query.groups, self.current_user, view_only)
else:
visualization = None
widget_properties['visualization'] = visualization
widget = models.Widget.create(**widget_properties)
layout = json.loads(widget.dashboard.layout)
new_row = True
if len(layout) == 0 or widget.width == 2:
layout.append([widget.id])
elif len(layout[-1]) == 1:
neighbour_widget = models.Widget.get(models.Widget.id == layout[-1][0])
if neighbour_widget.width == 1:
layout[-1].append(widget.id)
new_row = False
else:
layout.append([widget.id])
else:
layout.append([widget.id])
widget.dashboard.layout = json.dumps(layout)
widget.dashboard.save()
return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}
class WidgetResource(BaseResource):
@require_permission('edit_dashboard')
def delete(self, widget_id):
widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
require_admin_or_owner(widget.dashboard.user_id)
widget.delete_instance()
return {'layout': widget.dashboard.layout}
|
bsd-2-clause
|
barbarubra/Don-t-know-What-i-m-doing.
|
python/src/Lib/test/test_typechecks.py
|
58
|
3078
|
"""Unit tests for __instancecheck__ and __subclasscheck__."""
import unittest
from test import test_support
class ABC(type):
def __instancecheck__(cls, inst):
"""Implement isinstance(inst, cls)."""
return any(cls.__subclasscheck__(c)
for c in set([type(inst), inst.__class__]))
def __subclasscheck__(cls, sub):
"""Implement issubclass(sub, cls)."""
candidates = cls.__dict__.get("__subclass__", set()) | set([cls])
return any(c in candidates for c in sub.mro())
class Integer:
__metaclass__ = ABC
__subclass__ = set([int])
class SubInt(Integer):
pass
class Evil:
def __instancecheck__(self, inst): return False
class TypeChecksTest(unittest.TestCase):
def testIsSubclassInternal(self):
self.assertEqual(Integer.__subclasscheck__(int), True)
self.assertEqual(Integer.__subclasscheck__(float), False)
def testIsSubclassBuiltin(self):
self.assertEqual(issubclass(int, Integer), True)
self.assertEqual(issubclass(int, (Integer,)), True)
self.assertEqual(issubclass(float, Integer), False)
self.assertEqual(issubclass(float, (Integer,)), False)
def testIsInstanceBuiltin(self):
self.assertEqual(isinstance(42, Integer), True)
self.assertEqual(isinstance(42, (Integer,)), True)
self.assertEqual(isinstance(3.14, Integer), False)
self.assertEqual(isinstance(3.14, (Integer,)), False)
def testIsInstanceActual(self):
self.assertEqual(isinstance(Integer(), Integer), True)
self.assertEqual(isinstance(Integer(), (Integer,)), True)
def testIsSubclassActual(self):
self.assertEqual(issubclass(Integer, Integer), True)
self.assertEqual(issubclass(Integer, (Integer,)), True)
def testSubclassBehavior(self):
self.assertEqual(issubclass(SubInt, Integer), True)
self.assertEqual(issubclass(SubInt, (Integer,)), True)
self.assertEqual(issubclass(SubInt, SubInt), True)
self.assertEqual(issubclass(SubInt, (SubInt,)), True)
self.assertEqual(issubclass(Integer, SubInt), False)
self.assertEqual(issubclass(Integer, (SubInt,)), False)
self.assertEqual(issubclass(int, SubInt), False)
self.assertEqual(issubclass(int, (SubInt,)), False)
self.assertEqual(isinstance(SubInt(), Integer), True)
self.assertEqual(isinstance(SubInt(), (Integer,)), True)
self.assertEqual(isinstance(SubInt(), SubInt), True)
self.assertEqual(isinstance(SubInt(), (SubInt,)), True)
self.assertEqual(isinstance(42, SubInt), False)
self.assertEqual(isinstance(42, (SubInt,)), False)
def testInfiniteRecursionCaughtProperly(self):
e = Evil()
# This invokes isinstance() recursively, until the stack is exhausted.
self.assertRaises(RuntimeError, isinstance, e, Evil)
# XXX How to check the same situation for issubclass()?
def test_main():
test_support.run_unittest(TypeChecksTest)
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
EloYGomeZ/android-kenel-3.4.0
|
tools/perf/scripts/python/futex-contention.py
|
11261
|
1486
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
gauribhoite/personfinder
|
env/google_appengine/lib/django-1.4/setup.py
|
8
|
4020
|
from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
import os
import sys
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific fix
# for this in distutils.command.install_data#306. It fixes install_lib but not
# install_data, which is why we roll our own install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is set to the
# fixed directory, so we set the installdir to install_lib. The
# install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils not to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
django_dir = 'django'
for dirpath, dirnames, filenames in os.walk(django_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
# Small hack for working with bdist_wininst.
# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
setup(
name = "Django",
version = version,
url = 'http://www.djangoproject.com/',
author = 'Django Software Foundation',
author_email = 'foundation@djangoproject.com',
description = 'A high-level Python Web framework that encourages rapid development and clean, pragmatic design.',
download_url = 'https://www.djangoproject.com/m/releases/1.4/Django-1.4.13.tar.gz',
packages = packages,
cmdclass = cmdclasses,
data_files = data_files,
scripts = ['django/bin/django-admin.py'],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
apache-2.0
|
openbadges/badgecheck
|
openbadges/verifier/actions/input.py
|
3
|
1306
|
from .action_types import SET_INPUT_TYPE, SET_PROFILE_ID, STORE_INPUT, STORE_ORIGINAL_RESOURCE
def store_input(badge_input):
"""
Emits an action that encapsulates user input provided and instructs that it
be stored in the state.
:param badge_input: string
:return: dict
"""
return {
'type': STORE_INPUT,
'input': badge_input
}
def set_input_type(type_string):
"""
Emits an action that indicates the type of input provided.
Options: "url", "json", "jws"
:param input_type: string
:return: dict
"""
if type_string not in ['file', 'json', 'jws', 'url']:
raise TypeError("Only 'file', 'json', 'jws' or 'url' input types supported.")
return {
'type': SET_INPUT_TYPE,
'input_type': type_string
}
def store_expected_profile_id(profile_id):
return {
'type': SET_PROFILE_ID,
'node_id': profile_id
}
def store_original_resource(node_id, data=None, file=None):
"""
Store a fetched blob of JSON, a JWS string, or an image file
:param data: string
:param file: file-like object
:param node_id: string
:return: dict
"""
return {
'type': STORE_ORIGINAL_RESOURCE,
'data': data,
'file': file,
'node_id': node_id
}
|
apache-2.0
|
rgerkin/neuroConstruct
|
lib/jython/Lib/wsgiref/headers.py
|
229
|
5879
|
"""Manage HTTP Response Headers
Much of this module is red-handedly pilfered from email.message in the stdlib,
so portions are Copyright (C) 2001,2002 Python Software Foundation, and were
written by Barry Warsaw.
"""
from types import ListType, TupleType
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
import re
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers:
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if type(headers) is not ListType:
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower() != name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%r)" % self._headers
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
|
gpl-2.0
|
youfou/wxpy
|
tests/api/messages/test_message.py
|
1
|
2800
|
from datetime import datetime
from tests.conftest import wait_for_message
from wxpy import *
def sent_message(sent_msg, msg_type, receiver):
assert isinstance(sent_msg, SentMessage)
assert sent_msg.type == msg_type
assert sent_msg.receiver == receiver
assert sent_msg.bot == receiver.bot
assert sent_msg.sender == receiver.bot.self
assert isinstance(sent_msg.receive_time, datetime)
assert isinstance(sent_msg.create_time, datetime)
assert sent_msg.create_time < sent_msg.receive_time
class TestMessage:
def test_text_message(self, group, friend):
sent_message(group.send('text'), TEXT, group)
msg = wait_for_message(group, TEXT)
assert isinstance(msg, Message)
assert msg.type == TEXT
assert msg.text == 'Hello!'
assert not msg.is_at
assert msg.chat == group
assert msg.sender == group
assert msg.receiver == group.self
assert msg.member == friend
assert 0 < msg.latency < 30
group.send('at')
msg = wait_for_message(group, TEXT)
assert msg.is_at
def test_picture_message(self, group, image_path):
sent = group.send_image(image_path)
sent_message(sent, PICTURE, group)
assert sent.path == image_path
def test_video_message(self, group, video_path):
sent = group.send_video(video_path)
sent_message(sent, VIDEO, group)
assert sent.path == video_path
def test_raw_message(self, group):
# 发送名片
raw_type = 42
raw_content = '<msg username="{}" nickname="{}"/>'.format('wxpy_bot', 'wxpy 机器人')
sent_message(group.send_raw_msg(raw_type, raw_content), None, group)
def test_send(self, friend, file_path, image_path, video_path):
text_to_send = 'test sending text'
sent = friend.send(text_to_send)
sent_message(sent, TEXT, friend)
assert sent.text == text_to_send
sent = friend.send('@fil@{}'.format(file_path))
sent_message(sent, ATTACHMENT, friend)
assert sent.path == file_path
sent = friend.send('@img@{}'.format(image_path))
sent_message(sent, PICTURE, friend)
assert sent.path == image_path
sent = friend.send('@vid@{}'.format(video_path))
sent_message(sent, VIDEO, friend)
assert sent.path == video_path
# 发送名片
raw_type = 42
raw_content = '<msg username="{}" nickname="{}"/>'.format('wxpy_bot', 'wxpy 机器人')
uri = '/webwxsendmsg'
sent = friend.send_raw_msg(raw_type, raw_content)
sent_message(sent, None, friend)
assert sent.type is None
assert sent.raw_type == raw_type
assert sent.raw_content == raw_content
assert sent.uri == uri
|
mit
|
bcorwin/twilio-python
|
twilio/rest/exceptions.py
|
40
|
2547
|
# -*- coding: utf-8 -*-
import sys
from six import u
# Backwards compatibility.
from ..version import __version__, __version_info__
from ..exceptions import TwilioException
class TwilioRestException(TwilioException):
""" A generic 400 or 500 level exception from the Twilio API
:param int status: the HTTP status that was returned for the exception
:param str uri: The URI that caused the exception
:param str msg: A human-readable message for the error
:param str method: The HTTP method used to make the request
:param int|None code: A Twilio-specific error code for the error. This is
not available for all errors.
"""
def __init__(self, status, uri, msg="", code=None, method='GET'):
self.uri = uri
self.status = status
self.msg = msg
self.code = code
self.method = method
def __str__(self):
""" Try to pretty-print the exception, if this is going on screen. """
def red(words):
return u("\033[31m\033[49m%s\033[0m") % words
def white(words):
return u("\033[37m\033[49m%s\033[0m") % words
def blue(words):
return u("\033[34m\033[49m%s\033[0m") % words
def teal(words):
return u("\033[36m\033[49m%s\033[0m") % words
def get_uri(code):
return "https://www.twilio.com/docs/errors/{0}".format(code)
# If it makes sense to print a human readable error message, try to
# do it. The one problem is that someone might catch this error and
# try to display the message from it to an end user.
if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
msg = (
"\n{red_error} {request_was}\n\n{http_line}"
"\n\n{twilio_returned}\n\n{message}\n".format(
red_error=red("HTTP Error"),
request_was=white("Your request was:"),
http_line=teal("%s %s" % (self.method, self.uri)),
twilio_returned=white(
"Twilio returned the following information:"),
message=blue(str(self.msg))
))
if self.code:
msg = "".join([msg, "\n{more_info}\n\n{uri}\n\n".format(
more_info=white("More information may be available here:"),
uri=blue(get_uri(self.code))),
])
return msg
else:
return "HTTP {0} error: {1}".format(self.status, self.msg)
|
mit
|
Immortalin/python-for-android
|
python3-alpha/python3-src/Lib/test/test_bytes.py
|
48
|
47606
|
"""Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import functools
import pickle
import tempfile
import unittest
import test.support
import test.string_tests
import test.buffer_tests
if sys.flags.bytes_warning:
def check_bytes_warnings(func):
@functools.wraps(func)
def wrapper(*args, **kw):
with test.support.check_warnings(('', BytesWarning)):
return func(*args, **kw)
return wrapper
else:
# no-op
def check_bytes_warnings(func):
return func
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest(unittest.TestCase):
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_copy(self):
a = self.type2test(b"abcd")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertEqual(a, b)
self.assertEqual(type(a), type(b))
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxsize])
self.assertRaises(IndexError, lambda: b[sys.maxsize+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxsize])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-1])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxsize])
self.assertRaises(ValueError, self.type2test, [sys.maxsize+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
@check_bytes_warnings
def test_compare_to_str(self):
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character
# sizes.
self.assertEqual(self.type2test(b"\0a\0b\0c") == "abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == "abc",
False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == "abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == "abc",
False)
self.assertEqual(self.type2test() == str(), False)
self.assertEqual(self.type2test() != str(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
def test_encoding(self):
sample = "Hello world\n\u1234\u5678\u9abc"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b, self.type2test(sample.encode(enc)))
self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin1")
b = self.type2test(sample, "latin1", "ignore")
self.assertEqual(b, self.type2test(sample[:-3], "utf-8"))
def test_decode(self):
sample = "Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = "Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf8")
self.assertEqual(b.decode("utf8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf8"),
"Hello world\n")
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + "def")
self.assertRaises(TypeError, lambda: "abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
with self.assertRaises((OverflowError, MemoryError)):
c = b * sys.maxsize
with self.assertRaises((OverflowError, MemoryError)):
b *= sys.maxsize
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: "a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEqual(self.type2test.fromhex(''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30])
self.assertEqual(self.type2test.fromhex('1a2B30'), b)
self.assertEqual(self.type2test.fromhex(' 1A 2B 30 '), b)
self.assertEqual(self.type2test.fromhex('0000'), b'\0\0')
self.assertRaises(TypeError, self.type2test.fromhex, b'1B')
self.assertRaises(ValueError, self.type2test.fromhex, 'a')
self.assertRaises(ValueError, self.type2test.fromhex, 'rt')
self.assertRaises(ValueError, self.type2test.fromhex, '1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, '\x00')
self.assertRaises(ValueError, self.type2test.fromhex, '12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
self.assertEqual(self.type2test(b".").join([b"ab", b"cd"]), b"ab.cd")
# XXX more...
def test_count(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
with self.assertRaises(TypeError) as cm:
b.startswith([b'h'])
exc = str(cm.exception)
self.assertIn('bytes', exc)
self.assertIn('tuple', exc)
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
with self.assertRaises(TypeError) as cm:
b.endswith([b'o'])
exc = str(cm.exception)
self.assertIn('bytes', exc)
self.assertIn('tuple', exc)
def test_find(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
def test_rfind(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
def test_index(self):
b = self.type2test(b'world')
self.assertEqual(b.index(b'w'), 0)
self.assertEqual(b.index(b'orl'), 1)
self.assertRaises(ValueError, b.index, b'worm')
self.assertRaises(ValueError, b.index, b'ldo')
def test_rindex(self):
# XXX could be more rigorous
b = self.type2test(b'world')
self.assertEqual(b.rindex(b'w'), 0)
self.assertEqual(b.rindex(b'orl'), 1)
self.assertRaises(ValueError, b.rindex, b'worm')
self.assertRaises(ValueError, b.rindex, b'ldo')
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, ' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, ' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, 'b')
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_maketrans(self):
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
self.assertEqual(self.type2test.maketrans(b'abc', b'xyz'), transtable)
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374xyz'
self.assertEqual(self.type2test.maketrans(b'\375\376\377', b'xyz'), transtable)
self.assertRaises(ValueError, self.type2test.maketrans, b'abc', b'xyzq')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def')
def test_none_arguments(self):
# issue 11828
b = self.type2test(b'hello')
l = self.type2test(b'l')
h = self.type2test(b'h')
x = self.type2test(b'x')
o = self.type2test(b'o')
self.assertEqual(2, b.find(l, None))
self.assertEqual(3, b.find(l, -2, None))
self.assertEqual(2, b.find(l, None, -2))
self.assertEqual(0, b.find(h, None, None))
self.assertEqual(3, b.rfind(l, None))
self.assertEqual(3, b.rfind(l, -2, None))
self.assertEqual(2, b.rfind(l, None, -2))
self.assertEqual(0, b.rfind(h, None, None))
self.assertEqual(2, b.index(l, None))
self.assertEqual(3, b.index(l, -2, None))
self.assertEqual(2, b.index(l, None, -2))
self.assertEqual(0, b.index(h, None, None))
self.assertEqual(3, b.rindex(l, None))
self.assertEqual(3, b.rindex(l, -2, None))
self.assertEqual(2, b.rindex(l, None, -2))
self.assertEqual(0, b.rindex(h, None, None))
self.assertEqual(2, b.count(l, None))
self.assertEqual(1, b.count(l, -2, None))
self.assertEqual(1, b.count(l, None, -2))
self.assertEqual(0, b.count(x, None, None))
self.assertEqual(True, b.endswith(o, None))
self.assertEqual(True, b.endswith(o, -2, None))
self.assertEqual(True, b.endswith(l, None, -2))
self.assertEqual(False, b.endswith(x, None, None))
self.assertEqual(True, b.startswith(h, None))
self.assertEqual(True, b.startswith(l, -2, None))
self.assertEqual(True, b.startswith(h, None, -2))
self.assertEqual(False, b.startswith(x, None, None))
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
b = self.type2test(b'hello')
x = self.type2test(b'x')
self.assertRaisesRegex(TypeError, r'\bfind\b', b.find,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\brfind\b', b.rfind,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bindex\b', b.index,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\brindex\b', b.rindex,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bcount\b', b.count,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bstartswith\b', b.startswith,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bendswith\b', b.endswith,
x, None, None, None)
class BytesTest(BaseBytesTest):
type2test = bytes
def test_buffer_is_readonly(self):
fd = os.dup(sys.stdin.fileno())
with open(fd, "rb", buffering=0) as f:
self.assertRaises(TypeError, f.readinto, b"")
def test_custom(self):
class A:
def __bytes__(self):
return b'abc'
self.assertEqual(bytes(A()), b'abc')
class A: pass
self.assertRaises(TypeError, bytes, A())
class A:
def __bytes__(self):
return None
self.assertRaises(TypeError, bytes, A())
# Test PyBytes_FromFormat()
def test_from_format(self):
test.support.import_module('ctypes')
from ctypes import pythonapi, py_object, c_int, c_char_p
PyBytes_FromFormat = pythonapi.PyBytes_FromFormat
PyBytes_FromFormat.restype = py_object
self.assertEqual(PyBytes_FromFormat(b'format'),
b'format')
self.assertEqual(PyBytes_FromFormat(b'%'), b'%')
self.assertEqual(PyBytes_FromFormat(b'%%'), b'%')
self.assertEqual(PyBytes_FromFormat(b'%%s'), b'%s')
self.assertEqual(PyBytes_FromFormat(b'[%%]'), b'[%]')
self.assertEqual(PyBytes_FromFormat(b'%%%c', c_int(ord('_'))), b'%_')
self.assertEqual(PyBytes_FromFormat(b'c:%c', c_int(255)),
b'c:\xff')
self.assertEqual(PyBytes_FromFormat(b's:%s', c_char_p(b'cstr')),
b's:cstr')
class ByteArrayTest(BaseBytesTest):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
self.assertEqual(list(b), list(sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except os.error:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(br"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEqual(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEqual(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += ""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertTrue(alloc >= len(b))
if alloc not in seq:
seq.append(alloc)
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(int, orig * 25))
a.extend(int(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove('e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(b'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(IndexError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(b'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEqual(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEqual(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEqual(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEqual(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEqual(b, orig)
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
@check_bytes_warnings
def test_repr_str(self):
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
@check_bytes_warnings
def test_to_str(self):
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
def test_return_self(self):
# bytearray.replace must always return a new bytearray
b = bytearray()
self.assertFalse(b.replace(b'', b'') is b)
def test_compare(self):
if sys.flags.bytes_warning:
def bytes_warning():
return test.support.check_warnings(('', BytesWarning))
with bytes_warning():
b'' == ''
with bytes_warning():
b'' != ''
with bytes_warning():
bytearray(b'') == ''
with bytes_warning():
bytearray(b'') != ''
else:
self.skipTest("BytesWarning is needed for this test: use -bb option")
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(b".")[0]', 'val.rpartition(b".")[2]',
'val.splitlines()[0]', 'val.replace(b"", b"")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super().fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
class ByteArrayAsStringTest(FixedStringTest):
type2test = bytearray
class BytesAsStringTest(FixedStringTest):
type2test = bytes
class SubclassTest(unittest.TestCase):
def test_basic(self):
self.assertTrue(issubclass(self.subclass2test, self.type2test))
self.assertIsInstance(self.subclass2test(), self.type2test)
a, b = b"abcd", b"efgh"
_a, _b = self.subclass2test(a), self.subclass2test(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = self.subclass2test(b"abcd")
s2 = self.type2test().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is self.type2test, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is self.type2test)
def test_pickle(self):
a = self.subclass2test(b"abcd")
a.x = 10
a.y = self.subclass2test(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = self.subclass2test(b"abcd")
a.x = 10
a.y = self.subclass2test(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
class ByteArraySubclass(bytearray):
pass
class BytesSubclass(bytes):
pass
class ByteArraySubclassTest(SubclassTest):
type2test = bytearray
subclass2test = ByteArraySubclass
def test_init_override(self):
class subclass(bytearray):
def __init__(me, newarg=1, *args, **kwargs):
bytearray.__init__(me, *args, **kwargs)
x = subclass(4, b"abcd")
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
class BytesSubclassTest(SubclassTest):
type2test = bytes
subclass2test = BytesSubclass
def test_main():
test.support.run_unittest(
BytesTest, AssortedBytesTest, BytesAsStringTest,
ByteArrayTest, ByteArrayAsStringTest, BytesSubclassTest,
ByteArraySubclassTest, BytearrayPEP3137Test)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/chardet/langhebrewmodel.py
|
2763
|
11318
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
|
gpl-3.0
|
phektus/Django-Google-AppEngine-OpenId-Auth
|
django/contrib/gis/gdal/envelope.py
|
321
|
7044
|
"""
The GDAL/OGR library uses an Envelope structure to hold the bounding
box information for a geometry. The envelope (bounding box) contains
two pairs of coordinates, one for the lower left coordinate and one
for the upper right coordinate:
+----------o Upper right; (max_x, max_y)
| |
| |
| |
Lower left (min_x, min_y) o----------+
"""
from ctypes import Structure, c_double
from django.contrib.gis.gdal.error import OGRException
# The OGR definition of an Envelope is a C structure containing four doubles.
# See the 'ogr_core.h' source file for more information:
# http://www.gdal.org/ogr/ogr__core_8h-source.html
class OGREnvelope(Structure):
"Represents the OGREnvelope C Structure."
_fields_ = [("MinX", c_double),
("MaxX", c_double),
("MinY", c_double),
("MaxY", c_double),
]
class Envelope(object):
"""
The Envelope object is a C structure that contains the minimum and
maximum X, Y coordinates for a rectangle bounding box. The naming
of the variables is compatible with the OGR Envelope structure.
"""
def __init__(self, *args):
"""
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
"""
if len(args) == 1:
if isinstance(args[0], OGREnvelope):
# OGREnvelope (a ctypes Structure) was passed in.
self._envelope = args[0]
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) != 4:
raise OGRException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
self._from_sequence(args[0])
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 4:
# Individiual parameters passed in.
# Thanks to ww for the help
self._from_sequence(map(float, args))
else:
raise OGRException('Incorrect number (%d) of arguments.' % len(args))
# Checking the x,y coordinates
if self.min_x > self.max_x:
raise OGRException('Envelope minimum X > maximum X.')
if self.min_y > self.max_y:
raise OGRException('Envelope minimum Y > maximum Y.')
def __eq__(self, other):
"""
Returns True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
"""
if isinstance(other, Envelope):
return (self.min_x == other.min_x) and (self.min_y == other.min_y) and \
(self.max_x == other.max_x) and (self.max_y == other.max_y)
elif isinstance(other, tuple) and len(other) == 4:
return (self.min_x == other[0]) and (self.min_y == other[1]) and \
(self.max_x == other[2]) and (self.max_y == other[3])
else:
raise OGRException('Equivalence testing only works with other Envelopes.')
def __str__(self):
"Returns a string representation of the tuple."
return str(self.tuple)
def _from_sequence(self, seq):
"Initializes the C OGR Envelope structure from the given sequence."
self._envelope = OGREnvelope()
self._envelope.MinX = seq[0]
self._envelope.MinY = seq[1]
self._envelope.MaxX = seq[2]
self._envelope.MaxY = seq[3]
def expand_to_include(self, *args):
"""
Modifies the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
"""
# We provide a number of different signatures for this method,
# and the logic here is all about converting them into a
# 4-tuple single parameter which does the actual work of
# expanding the envelope.
if len(args) == 1:
if isinstance(args[0], Envelope):
return self.expand_to_include(args[0].tuple)
elif hasattr(args[0], 'x') and hasattr(args[0], 'y'):
return self.expand_to_include(args[0].x, args[0].y, args[0].x, args[0].y)
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) == 2:
return self.expand_to_include((args[0][0], args[0][1], args[0][0], args[0][1]))
elif len(args[0]) == 4:
(minx, miny, maxx, maxy) = args[0]
if minx < self._envelope.MinX:
self._envelope.MinX = minx
if miny < self._envelope.MinY:
self._envelope.MinY = miny
if maxx > self._envelope.MaxX:
self._envelope.MaxX = maxx
if maxy > self._envelope.MaxY:
self._envelope.MaxY = maxy
else:
raise OGRException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 2:
# An x and an y parameter were passed in
return self.expand_to_include((args[0], args[1], args[0], args[1]))
elif len(args) == 4:
# Individiual parameters passed in.
return self.expand_to_include(args)
else:
raise OGRException('Incorrect number (%d) of arguments.' % len(args[0]))
@property
def min_x(self):
"Returns the value of the minimum X coordinate."
return self._envelope.MinX
@property
def min_y(self):
"Returns the value of the minimum Y coordinate."
return self._envelope.MinY
@property
def max_x(self):
"Returns the value of the maximum X coordinate."
return self._envelope.MaxX
@property
def max_y(self):
"Returns the value of the maximum Y coordinate."
return self._envelope.MaxY
@property
def ur(self):
"Returns the upper-right coordinate."
return (self.max_x, self.max_y)
@property
def ll(self):
"Returns the lower-left coordinate."
return (self.min_x, self.min_y)
@property
def tuple(self):
"Returns a tuple representing the envelope."
return (self.min_x, self.min_y, self.max_x, self.max_y)
@property
def wkt(self):
"Returns WKT representing a Polygon for this envelope."
# TODO: Fix significant figures.
return 'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))' % \
(self.min_x, self.min_y, self.min_x, self.max_y,
self.max_x, self.max_y, self.max_x, self.min_y,
self.min_x, self.min_y)
|
bsd-3-clause
|
xin-cai/openwhisk
|
tools/json/validate.py
|
5
|
1341
|
#!/usr/bin/python
"""Validate that a string conforms to a json schema.
usage: validate.py obj schema
where obj and schema are both strings.
prints 'true' if validate succeeds, 'false' otherwise
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
import sys
import json
from jsonschema import validate
if len(sys.argv) != 3:
print('usage: validate.py obj schema')
sys.exit(-1)
a1 = sys.argv[1].replace('\n', '')
a2 = sys.argv[2].replace('\n', '')
obj = json.loads(a1)
schema = json.loads(a2)
try:
validate(obj, schema)
print('true')
except:
print('false')
|
apache-2.0
|
ensemblr/llvm-project-boilerplate
|
include/llvm/projects/compiler-rt/lib/asan/scripts/asan_symbolize.py
|
1
|
18097
|
#!/usr/bin/env python
#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
import argparse
import bisect
import getopt
import os
import re
import subprocess
import sys
symbolizers = {}
DEBUG = False
demangle = False
binutils_prefix = None
sysroot_path = None
binary_name_filter = None
fix_filename_patterns = None
logfile = sys.stdin
allow_system_symbolizer = True
force_system_symbolizer = False
# FIXME: merge the code that calls fix_filename().
def fix_filename(file_name):
if fix_filename_patterns:
for path_to_cut in fix_filename_patterns:
file_name = re.sub('.*' + path_to_cut, '', file_name)
file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name)
file_name = re.sub('.*crtstuff.c:0', '???:0', file_name)
return file_name
def sysroot_path_filter(binary_name):
return sysroot_path + binary_name
def is_valid_arch(s):
return s in ["i386", "x86_64", "x86_64h", "arm", "armv6", "armv7", "armv7s",
"armv7k", "arm64", "powerpc64", "powerpc64le", "s390x", "s390"]
def guess_arch(addr):
# Guess which arch we're running. 10 = len('0x') + 8 hex digits.
if len(addr) > 10:
return 'x86_64'
else:
return 'i386'
class Symbolizer(object):
def __init__(self):
pass
def symbolize(self, addr, binary, offset):
"""Symbolize the given address (pair of binary and offset).
Overriden in subclasses.
Args:
addr: virtual address of an instruction.
binary: path to executable/shared object containing this instruction.
offset: instruction offset in the @binary.
Returns:
list of strings (one string for each inlined frame) describing
the code locations for this instruction (that is, function name, file
name, line and column numbers).
"""
return None
class LLVMSymbolizer(Symbolizer):
def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]):
super(LLVMSymbolizer, self).__init__()
self.symbolizer_path = symbolizer_path
self.default_arch = default_arch
self.system = system
self.dsym_hints = dsym_hints
self.pipe = self.open_llvm_symbolizer()
def open_llvm_symbolizer(self):
cmd = [self.symbolizer_path,
'--use-symbol-table=true',
'--demangle=%s' % demangle,
'--functions=linkage',
'--inlining=true',
'--default-arch=%s' % self.default_arch]
if self.system == 'Darwin':
for hint in self.dsym_hints:
cmd.append('--dsym-hint=%s' % hint)
if DEBUG:
print ' '.join(cmd)
try:
result = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except OSError:
result = None
return result
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if not self.pipe:
return None
result = []
try:
symbolizer_input = '"%s" %s' % (binary, offset)
if DEBUG:
print symbolizer_input
print >> self.pipe.stdin, symbolizer_input
while True:
function_name = self.pipe.stdout.readline().rstrip()
if not function_name:
break
file_name = self.pipe.stdout.readline().rstrip()
file_name = fix_filename(file_name)
if (not function_name.startswith('??') or
not file_name.startswith('??')):
# Append only non-trivial frames.
result.append('%s in %s %s' % (addr, function_name,
file_name))
except Exception:
result = []
if not result:
result = None
return result
def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]):
symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
if not symbolizer_path:
symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH')
if not symbolizer_path:
# Assume llvm-symbolizer is in PATH.
symbolizer_path = 'llvm-symbolizer'
return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints)
class Addr2LineSymbolizer(Symbolizer):
def __init__(self, binary):
super(Addr2LineSymbolizer, self).__init__()
self.binary = binary
self.pipe = self.open_addr2line()
self.output_terminator = -1
def open_addr2line(self):
addr2line_tool = 'addr2line'
if binutils_prefix:
addr2line_tool = binutils_prefix + addr2line_tool
cmd = [addr2line_tool, '-fi']
if demangle:
cmd += ['--demangle']
cmd += ['-e', self.binary]
if DEBUG:
print ' '.join(cmd)
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
lines = []
try:
print >> self.pipe.stdin, offset
print >> self.pipe.stdin, self.output_terminator
is_first_frame = True
while True:
function_name = self.pipe.stdout.readline().rstrip()
file_name = self.pipe.stdout.readline().rstrip()
if is_first_frame:
is_first_frame = False
elif function_name in ['', '??']:
assert file_name == function_name
break
lines.append((function_name, file_name));
except Exception:
lines.append(('??', '??:0'))
return ['%s in %s %s' % (addr, function, fix_filename(file)) for (function, file) in lines]
class UnbufferedLineConverter(object):
"""
Wrap a child process that responds to each line of input with one line of
output. Uses pty to trick the child into providing unbuffered output.
"""
def __init__(self, args, close_stderr=False):
# Local imports so that the script can start on Windows.
import pty
import termios
pid, fd = pty.fork()
if pid == 0:
# We're the child. Transfer control to command.
if close_stderr:
dev_null = os.open('/dev/null', 0)
os.dup2(dev_null, 2)
os.execvp(args[0], args)
else:
# Disable echoing.
attr = termios.tcgetattr(fd)
attr[3] = attr[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, attr)
# Set up a file()-like interface to the child process
self.r = os.fdopen(fd, "r", 1)
self.w = os.fdopen(os.dup(fd), "w", 1)
def convert(self, line):
self.w.write(line + "\n")
return self.readline()
def readline(self):
return self.r.readline().rstrip()
class DarwinSymbolizer(Symbolizer):
def __init__(self, addr, binary, arch):
super(DarwinSymbolizer, self).__init__()
self.binary = binary
self.arch = arch
self.open_atos()
def open_atos(self):
if DEBUG:
print 'atos -o %s -arch %s' % (self.binary, self.arch)
cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
self.atos = UnbufferedLineConverter(cmdline, close_stderr=True)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
atos_line = self.atos.convert('0x%x' % int(offset, 16))
while "got symbolicator for" in atos_line:
atos_line = self.atos.readline()
# A well-formed atos response looks like this:
# foo(type1, type2) (in object.name) (filename.cc:80)
match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
if DEBUG:
print 'atos_line: ', atos_line
if match:
function_name = match.group(1)
function_name = re.sub('\(.*?\)', '', function_name)
file_name = fix_filename(match.group(3))
return ['%s in %s %s' % (addr, function_name, file_name)]
else:
return ['%s in %s' % (addr, atos_line)]
# Chain several symbolizers so that if one symbolizer fails, we fall back
# to the next symbolizer in chain.
class ChainSymbolizer(Symbolizer):
def __init__(self, symbolizer_list):
super(ChainSymbolizer, self).__init__()
self.symbolizer_list = symbolizer_list
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
for symbolizer in self.symbolizer_list:
if symbolizer:
result = symbolizer.symbolize(addr, binary, offset)
if result:
return result
return None
def append_symbolizer(self, symbolizer):
self.symbolizer_list.append(symbolizer)
def BreakpadSymbolizerFactory(binary):
suffix = os.getenv('BREAKPAD_SUFFIX')
if suffix:
filename = binary + suffix
if os.access(filename, os.F_OK):
return BreakpadSymbolizer(filename)
return None
def SystemSymbolizerFactory(system, addr, binary, arch):
if system == 'Darwin':
return DarwinSymbolizer(addr, binary, arch)
elif system == 'Linux' or system == 'FreeBSD':
return Addr2LineSymbolizer(binary)
class BreakpadSymbolizer(Symbolizer):
def __init__(self, filename):
super(BreakpadSymbolizer, self).__init__()
self.filename = filename
lines = file(filename).readlines()
self.files = []
self.symbols = {}
self.address_list = []
self.addresses = {}
# MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
fragments = lines[0].rstrip().split()
self.arch = fragments[2]
self.debug_id = fragments[3]
self.binary = ' '.join(fragments[4:])
self.parse_lines(lines[1:])
def parse_lines(self, lines):
cur_function_addr = ''
for line in lines:
fragments = line.split()
if fragments[0] == 'FILE':
assert int(fragments[1]) == len(self.files)
self.files.append(' '.join(fragments[2:]))
elif fragments[0] == 'PUBLIC':
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
elif fragments[0] in ['CFI', 'STACK']:
pass
elif fragments[0] == 'FUNC':
cur_function_addr = int(fragments[1], 16)
if not cur_function_addr in self.symbols.keys():
self.symbols[cur_function_addr] = ' '.join(fragments[4:])
else:
# Line starting with an address.
addr = int(fragments[0], 16)
self.address_list.append(addr)
# Tuple of symbol address, size, line, file number.
self.addresses[addr] = (cur_function_addr,
int(fragments[1], 16),
int(fragments[2]),
int(fragments[3]))
self.address_list.sort()
def get_sym_file_line(self, addr):
key = None
if addr in self.addresses.keys():
key = addr
else:
index = bisect.bisect_left(self.address_list, addr)
if index == 0:
return None
else:
key = self.address_list[index - 1]
sym_id, size, line_no, file_no = self.addresses[key]
symbol = self.symbols[sym_id]
filename = self.files[file_no]
if addr < key + size:
return symbol, filename, line_no
else:
return None
def symbolize(self, addr, binary, offset):
if self.binary != binary:
return None
res = self.get_sym_file_line(int(offset, 16))
if res:
function_name, file_name, line_no = res
result = ['%s in %s %s:%d' % (
addr, function_name, file_name, line_no)]
print result
return result
else:
return None
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None, dsym_hint_producer=None):
if sys.platform == 'win32':
# ASan on Windows uses dbghelp.dll to symbolize in-process, which works
# even in sandboxed processes. Nothing needs to be done here.
self.process_line = self.process_line_echo
else:
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.dsym_hint_producer = dsym_hint_producer
self.system = os.uname()[0]
if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
raise Exception('Unknown system')
self.llvm_symbolizers = {}
self.last_llvm_symbolizer = None
self.dsym_hints = set([])
self.frame_no = 0
self.process_line = self.process_line_posix
def symbolize_address(self, addr, binary, offset, arch):
# On non-Darwin (i.e. on platforms without .dSYM debug info) always use
# a single symbolizer binary.
# On Darwin, if the dsym hint producer is present:
# 1. check whether we've seen this binary already; if so,
# use |llvm_symbolizers[binary]|, which has already loaded the debug
# info for this binary (might not be the case for
# |last_llvm_symbolizer|);
# 2. otherwise check if we've seen all the hints for this binary already;
# if so, reuse |last_llvm_symbolizer| which has the full set of hints;
# 3. otherwise create a new symbolizer and pass all currently known
# .dSYM hints to it.
result = None
if not force_system_symbolizer:
if not binary in self.llvm_symbolizers:
use_new_symbolizer = True
if self.system == 'Darwin' and self.dsym_hint_producer:
dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
self.dsym_hints |= dsym_hints_for_binary
if self.last_llvm_symbolizer and not use_new_symbolizer:
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
else:
self.last_llvm_symbolizer = LLVMSymbolizerFactory(
self.system, arch, self.dsym_hints)
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
# Use the chain of symbolizers:
# Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
# (fall back to next symbolizer if the previous one fails).
if not binary in symbolizers:
symbolizers[binary] = ChainSymbolizer(
[BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]])
result = symbolizers[binary].symbolize(addr, binary, offset)
else:
symbolizers[binary] = ChainSymbolizer([])
if result is None:
if not allow_system_symbolizer:
raise Exception('Failed to launch or use llvm-symbolizer.')
# Initialize system symbolizer only if other symbolizers failed.
symbolizers[binary].append_symbolizer(
SystemSymbolizerFactory(self.system, addr, binary, arch))
result = symbolizers[binary].symbolize(addr, binary, offset)
# The system symbolizer must produce some result.
assert result
return result
def get_symbolized_lines(self, symbolized_lines):
if not symbolized_lines:
return [self.current_line]
else:
result = []
for symbolized_frame in symbolized_lines:
result.append(' #%s %s' % (str(self.frame_no), symbolized_frame.rstrip()))
self.frame_no += 1
return result
def process_logfile(self):
self.frame_no = 0
for line in logfile:
processed = self.process_line(line)
print '\n'.join(processed)
def process_line_echo(self, line):
return [line.rstrip()]
def process_line_posix(self, line):
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
'^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)')
match = re.match(stack_trace_line_format, line)
if not match:
return [self.current_line]
if DEBUG:
print line
_, frameno_str, addr, binary, offset = match.groups()
arch = ""
# Arch can be embedded in the filename, e.g.: "libabc.dylib:x86_64h"
colon_pos = binary.rfind(":")
if colon_pos != -1:
maybe_arch = binary[colon_pos+1:]
if is_valid_arch(maybe_arch):
arch = maybe_arch
binary = binary[0:colon_pos]
if arch == "":
arch = guess_arch(addr)
if frameno_str == '0':
# Assume that frame #0 is the first frame of new stack trace.
self.frame_no = 0
original_binary = binary
if self.binary_name_filter:
binary = self.binary_name_filter(binary)
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
if not symbolized_line:
if original_binary != binary:
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
return self.get_symbolized_lines(symbolized_line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='ASan symbolization script',
epilog='Example of use:\n'
'asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" '
'-s "$HOME/SymbolFiles" < asan.log')
parser.add_argument('path_to_cut', nargs='*',
help='pattern to be cut from the result file path ')
parser.add_argument('-d','--demangle', action='store_true',
help='demangle function names')
parser.add_argument('-s', metavar='SYSROOT',
help='set path to sysroot for sanitized binaries')
parser.add_argument('-c', metavar='CROSS_COMPILE',
help='set prefix for binutils')
parser.add_argument('-l','--logfile', default=sys.stdin,
type=argparse.FileType('r'),
help='set log file name to parse, default is stdin')
parser.add_argument('--force-system-symbolizer', action='store_true',
help='don\'t use llvm-symbolizer')
args = parser.parse_args()
if args.path_to_cut:
fix_filename_patterns = args.path_to_cut
if args.demangle:
demangle = True
if args.s:
binary_name_filter = sysroot_path_filter
sysroot_path = args.s
if args.c:
binutils_prefix = args.c
if args.logfile:
logfile = args.logfile
else:
logfile = sys.stdin
if args.force_system_symbolizer:
force_system_symbolizer = True
if force_system_symbolizer:
assert(allow_system_symbolizer)
loop = SymbolizationLoop(binary_name_filter)
loop.process_logfile()
|
mit
|
wazo-pbx/xivo-auth
|
wazo_auth/services/email.py
|
1
|
4200
|
# Copyright 2018-2020 The Wazo Authors (see the AUTHORS file)
# SPDX-License-Identifier: GPL-3.0-or-later
import time
import smtplib
from collections import namedtuple
from email import utils as email_utils
from email.mime.text import MIMEText
from wazo_auth.services.helpers import BaseService
EmailDestination = namedtuple('EmailDestination', ['name', 'address'])
# NOTE(sileht): default socket timeout is None on linux
# Our client http client is 10s, since sending mail is currently synchronous
# we have to be sure we return before the 10s, so we set the SMTP timeout.
SMTP_TIMEOUT = 4
class EmailService(BaseService):
def __init__(self, dao, tenant_uuid, config, template_formatter):
super().__init__(dao, tenant_uuid)
self._formatter = template_formatter
self._smtp_host = config['smtp']['hostname']
self._smtp_port = config['smtp']['port']
self._confirmation_token_expiration = config['email_confirmation_expiration']
self._reset_token_expiration = config['password_reset_expiration']
self._confirmation_from = EmailDestination(
config['email_confirmation_from_name'],
config['email_confirmation_from_address'],
)
self._password_reset_from = EmailDestination(
config['password_reset_from_name'], config['password_reset_from_address']
)
def confirm(self, email_uuid):
self._dao.email.confirm(email_uuid)
def send_confirmation_email(
self, username, email_uuid, email_address, connection_params
):
template_context = dict(connection_params)
template_context.update(
{
'token': self._new_email_confirmation_token(email_uuid),
'username': username,
'email_uuid': email_uuid,
'email_address': email_address,
}
)
body = self._formatter.format_confirmation_email(template_context)
subject = self._formatter.format_confirmation_subject(template_context)
to = EmailDestination(username, email_address)
self._send_msg(to, self._confirmation_from, subject, body)
def send_reset_email(self, user_uuid, username, email_address, connection_params):
template_context = dict(connection_params)
template_context.update(
{
'token': self._new_email_reset_token(user_uuid),
'username': username,
'user_uuid': user_uuid,
'email_address': email_address,
}
)
body = self._formatter.format_password_reset_email(template_context)
subject = self._formatter.format_password_reset_subject(template_context)
to = EmailDestination(username, email_address)
self._send_msg(to, self._confirmation_from, subject, body)
def _send_msg(self, to, from_, subject, body):
msg = MIMEText(body)
msg['To'] = email_utils.formataddr(to)
msg['From'] = email_utils.formataddr(from_)
msg['Subject'] = subject
with smtplib.SMTP(
self._smtp_host, self._smtp_port, timeout=SMTP_TIMEOUT
) as server:
server.sendmail(from_.address, [to.address], msg.as_string())
def _new_email_confirmation_token(self, email_uuid):
acl = 'auth.emails.{}.confirm.edit'.format(email_uuid)
return self._new_generic_token(self._confirmation_token_expiration, acl)
def _new_email_reset_token(self, user_uuid):
acl = 'auth.users.password.reset.{}.create'.format(user_uuid)
return self._new_generic_token(self._reset_token_expiration, acl)
def _new_generic_token(self, expiration, *acl):
t = time.time()
token_payload = {
'auth_id': 'wazo-auth',
'pbx_user_uuid': None,
'xivo_uuid': None,
'expire_t': t + expiration,
'issued_t': t,
'acl': acl,
'user_agent': 'wazo-auth-email-reset',
'remote_addr': '',
}
session_payload = {}
token_uuid, session_uuid = self._dao.token.create(
token_payload, session_payload
)
return token_uuid
|
gpl-3.0
|
kurtrwall/cfgov-refresh
|
_lib/wordpress_initiative_processor.py
|
3
|
1748
|
import sys
import json
import os.path
import requests
import dateutil.parser
def posts_at_url(url):
current_page = 1
max_page = sys.maxint
while current_page <= max_page:
url = os.path.expandvars(url)
resp = requests.get(url, params={'page':current_page, 'count': '-1'})
results = json.loads(resp.content)
current_page += 1
max_page = results['pages']
for p in results['posts']:
yield p
def documents(name, url, **kwargs):
for post in posts_at_url(url):
yield process_initiative(post)
def process_initiative(item):
del item['comments']
item['_id'] = item['slug']
custom_fields = item['custom_fields']
if custom_fields.get('related_office'):
item['related_office'] = \
custom_fields['related_office'][0]
# create list of initiative subinitiative dicts
item['subinitiatives'] = []
for x in xrange(0,6):
subinitiative = {}
fields = ['header', 'desc']
subinitiative_links = []
for field in fields:
field_name = 'subinitiative_%s_%s' % (field, str(x))
if field_name in custom_fields and custom_fields[field_name][0] != '':
subinitiative[field] = custom_fields[field_name][0]
for y in xrange(0,5):
link_name = 'subinitiative_links_%s_%s' % (str(x), str(y))
if link_name in custom_fields:
subinitiative_links.append(custom_fields[link_name])
if subinitiative_links:
subinitiative['links'] = subinitiative_links
if subinitiative:
item['subinitiatives'].append(subinitiative)
return item
|
cc0-1.0
|
tunneln/CarnotKE
|
jyhton/Lib/test/test_posix.py
|
8
|
18006
|
"Test posix functions"
from test import test_support
# Skip these tests if there is no posix module.
posix = test_support.import_module('posix')
import errno
import sys
import time
import os
import pwd
import shutil
import stat
import sys
import tempfile
import unittest
import warnings
_DUMMY_SYMLINK = os.path.join(tempfile.gettempdir(),
test_support.TESTFN + '-dummy-symlink')
warnings.filterwarnings('ignore', '.* potential security risk .*',
RuntimeWarning)
class PosixTester(unittest.TestCase):
def setUp(self):
# create empty file
fp = open(test_support.TESTFN, 'w+')
fp.close()
self.teardown_files = [ test_support.TESTFN ]
def tearDown(self):
for teardown_file in self.teardown_files:
os.unlink(teardown_file)
def testNoArgFunctions(self):
# test posix functions which take no arguments and have
# no side-effects which we need to cleanup (e.g., fork, wait, abort)
NO_ARG_FUNCTIONS = [ "ctermid", "getcwd", "getcwdu", "uname",
"times", "getloadavg", "tmpnam",
"getegid", "geteuid", "getgid", "getgroups",
"getpid", "getpgrp", "getppid", "getuid",
]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "", DeprecationWarning)
for name in NO_ARG_FUNCTIONS:
posix_func = getattr(posix, name, None)
if posix_func is not None:
posix_func()
self.assertRaises(TypeError, posix_func, 1)
if hasattr(posix, 'getresuid'):
def test_getresuid(self):
user_ids = posix.getresuid()
self.assertEqual(len(user_ids), 3)
for val in user_ids:
self.assertGreaterEqual(val, 0)
if hasattr(posix, 'getresgid'):
def test_getresgid(self):
group_ids = posix.getresgid()
self.assertEqual(len(group_ids), 3)
for val in group_ids:
self.assertGreaterEqual(val, 0)
if hasattr(posix, 'setresuid'):
def test_setresuid(self):
current_user_ids = posix.getresuid()
self.assertIsNone(posix.setresuid(*current_user_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresuid(-1, -1, -1))
def test_setresuid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_user_ids = posix.getresuid()
if 0 not in current_user_ids:
new_user_ids = (current_user_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresuid, *new_user_ids)
if hasattr(posix, 'setresgid'):
def test_setresgid(self):
current_group_ids = posix.getresgid()
self.assertIsNone(posix.setresgid(*current_group_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresgid(-1, -1, -1))
def test_setresgid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_group_ids = posix.getresgid()
if 0 not in current_group_ids:
new_group_ids = (current_group_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresgid, *new_group_ids)
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs os.initgroups()")
def test_initgroups(self):
# It takes a string and an integer; check that it raises a TypeError
# for other argument lists.
self.assertRaises(TypeError, posix.initgroups)
self.assertRaises(TypeError, posix.initgroups, None)
self.assertRaises(TypeError, posix.initgroups, 3, "foo")
self.assertRaises(TypeError, posix.initgroups, "foo", 3, object())
# If a non-privileged user invokes it, it should fail with OSError
# EPERM.
if os.getuid() != 0:
name = pwd.getpwuid(posix.getuid()).pw_name
try:
posix.initgroups(name, 13)
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
else:
self.fail("Expected OSError to be raised by initgroups")
def test_statvfs(self):
if hasattr(posix, 'statvfs'):
self.assertTrue(posix.statvfs(os.curdir))
def test_fstatvfs(self):
if hasattr(posix, 'fstatvfs'):
fp = open(test_support.TESTFN)
try:
self.assertTrue(posix.fstatvfs(fp.fileno()))
finally:
fp.close()
def test_ftruncate(self):
if hasattr(posix, 'ftruncate'):
fp = open(test_support.TESTFN, 'w+')
try:
# we need to have some data to truncate
fp.write('test')
fp.flush()
posix.ftruncate(fp.fileno(), 0)
finally:
fp.close()
def test_dup(self):
if hasattr(posix, 'dup'):
fp = open(test_support.TESTFN)
try:
fd = posix.dup(fp.fileno())
self.assertIsInstance(fd, int)
os.close(fd)
finally:
fp.close()
def test_confstr(self):
if hasattr(posix, 'confstr'):
self.assertRaises(ValueError, posix.confstr, "CS_garbage")
self.assertEqual(len(posix.confstr("CS_PATH")) > 0, True)
def test_dup2(self):
if hasattr(posix, 'dup2'):
fp1 = open(test_support.TESTFN)
fp2 = open(test_support.TESTFN)
try:
posix.dup2(fp1.fileno(), fp2.fileno())
finally:
fp1.close()
fp2.close()
def fdopen_helper(self, *args):
fd = os.open(test_support.TESTFN, os.O_RDONLY)
fp2 = posix.fdopen(fd, *args)
fp2.close()
def test_fdopen(self):
if hasattr(posix, 'fdopen'):
self.fdopen_helper()
self.fdopen_helper('r')
self.fdopen_helper('r', 100)
def test_osexlock(self):
if hasattr(posix, "O_EXLOCK"):
fd = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, test_support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
if hasattr(posix, "O_SHLOCK"):
fd = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, test_support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
def test_osshlock(self):
if hasattr(posix, "O_SHLOCK"):
fd1 = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
fd2 = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
os.close(fd2)
os.close(fd1)
if hasattr(posix, "O_EXLOCK"):
fd = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, test_support.TESTFN,
os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
def test_fstat(self):
if hasattr(posix, 'fstat'):
fp = open(test_support.TESTFN)
try:
self.assertTrue(posix.fstat(fp.fileno()))
finally:
fp.close()
def test_stat(self):
if hasattr(posix, 'stat'):
self.assertTrue(posix.stat(test_support.TESTFN))
def test_stat_tuple(self):
self.assertEqual(tuple(posix.stat(".")), posix.stat("."))
def _test_all_chown_common(self, chown_func, first_param):
"""Common code for chown, fchown and lchown tests."""
if os.getuid() == 0:
try:
# Many linux distros have a nfsnobody user as MAX_UID-2
# that makes a good test case for signedness issues.
# http://bugs.python.org/issue1747858
# This part of the test only runs when run as root.
# Only scary people run their tests as root.
ent = pwd.getpwnam('nfsnobody')
chown_func(first_param, ent.pw_uid, ent.pw_gid)
except KeyError:
pass
else:
# non-root cannot chown to root, raises OSError
self.assertRaises(OSError, chown_func,
first_param, 0, 0)
# test a successful chown call
chown_func(first_param, os.getuid(), os.getgid())
@unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
def test_chown(self):
# raise an OSError if the file does not exist
os.unlink(test_support.TESTFN)
self.assertRaises(OSError, posix.chown, test_support.TESTFN, -1, -1)
# re-create the file
open(test_support.TESTFN, 'w').close()
self._test_all_chown_common(posix.chown, test_support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'fchown'), "test needs os.fchown()")
def test_fchown(self):
os.unlink(test_support.TESTFN)
# re-create the file
test_file = open(test_support.TESTFN, 'w')
try:
fd = test_file.fileno()
self._test_all_chown_common(posix.fchown, fd)
finally:
test_file.close()
@unittest.skipUnless(hasattr(posix, 'lchown'), "test needs os.lchown()")
def test_lchown(self):
os.unlink(test_support.TESTFN)
# create a symlink
os.symlink(_DUMMY_SYMLINK, test_support.TESTFN)
self._test_all_chown_common(posix.lchown, test_support.TESTFN)
def test_chdir(self):
if hasattr(posix, 'chdir'):
posix.chdir(os.curdir)
self.assertRaises(OSError, posix.chdir, test_support.TESTFN)
def test_lsdir(self):
if hasattr(posix, 'lsdir'):
self.assertIn(test_support.TESTFN, posix.lsdir(os.curdir))
def test_access(self):
if hasattr(posix, 'access'):
self.assertTrue(posix.access(test_support.TESTFN, os.R_OK))
def test_umask(self):
if hasattr(posix, 'umask'):
old_mask = posix.umask(0)
self.assertIsInstance(old_mask, int)
posix.umask(old_mask)
def test_strerror(self):
if hasattr(posix, 'strerror'):
self.assertTrue(posix.strerror(0))
def test_pipe(self):
if hasattr(posix, 'pipe'):
reader, writer = posix.pipe()
os.close(reader)
os.close(writer)
def test_tempnam(self):
if hasattr(posix, 'tempnam'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tempnam", DeprecationWarning)
self.assertTrue(posix.tempnam())
self.assertTrue(posix.tempnam(os.curdir))
self.assertTrue(posix.tempnam(os.curdir, 'blah'))
def test_tmpfile(self):
if hasattr(posix, 'tmpfile'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning)
fp = posix.tmpfile()
fp.close()
def test_utime(self):
if hasattr(posix, 'utime'):
now = time.time()
posix.utime(test_support.TESTFN, None)
self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (None, None))
self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (now, None))
self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (None, now))
posix.utime(test_support.TESTFN, (int(now), int(now)))
posix.utime(test_support.TESTFN, (now, now))
def _test_chflags_regular_file(self, chflags_func, target_file):
st = os.stat(target_file)
self.assertTrue(hasattr(st, 'st_flags'))
chflags_func(target_file, st.st_flags | stat.UF_IMMUTABLE)
try:
new_st = os.stat(target_file)
self.assertEqual(st.st_flags | stat.UF_IMMUTABLE, new_st.st_flags)
try:
fd = open(target_file, 'w+')
except IOError as e:
self.assertEqual(e.errno, errno.EPERM)
finally:
posix.chflags(target_file, st.st_flags)
@unittest.skipUnless(hasattr(posix, 'chflags'), 'test needs os.chflags()')
def test_chflags(self):
self._test_chflags_regular_file(posix.chflags, test_support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_regular_file(self):
self._test_chflags_regular_file(posix.lchflags, test_support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_symlink(self):
testfn_st = os.stat(test_support.TESTFN)
self.assertTrue(hasattr(testfn_st, 'st_flags'))
os.symlink(test_support.TESTFN, _DUMMY_SYMLINK)
self.teardown_files.append(_DUMMY_SYMLINK)
dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
posix.lchflags(_DUMMY_SYMLINK,
dummy_symlink_st.st_flags | stat.UF_IMMUTABLE)
try:
new_testfn_st = os.stat(test_support.TESTFN)
new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE,
new_dummy_symlink_st.st_flags)
finally:
posix.lchflags(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
def test_getcwd_long_pathnames(self):
if hasattr(posix, 'getcwd'):
dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
curdir = os.getcwd()
base_path = os.path.abspath(test_support.TESTFN) + '.getcwd'
try:
os.mkdir(base_path)
os.chdir(base_path)
except:
# Just returning nothing instead of the SkipTest exception,
# because the test results in Error in that case.
# Is that ok?
# raise unittest.SkipTest, "cannot create directory for testing"
return
try:
def _create_and_do_getcwd(dirname, current_path_length = 0):
try:
os.mkdir(dirname)
except:
raise unittest.SkipTest, "mkdir cannot create directory sufficiently deep for getcwd test"
os.chdir(dirname)
try:
os.getcwd()
if current_path_length < 4099:
_create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
except OSError as e:
expected_errno = errno.ENAMETOOLONG
if 'sunos' in sys.platform or 'openbsd' in sys.platform:
expected_errno = errno.ERANGE # Issue 9185
self.assertEqual(e.errno, expected_errno)
finally:
os.chdir('..')
os.rmdir(dirname)
_create_and_do_getcwd(dirname)
finally:
os.chdir(curdir)
shutil.rmtree(base_path)
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
@unittest.skipUnless(hasattr(os, 'getegid'), "test needs os.getegid()")
def test_getgroups(self):
with os.popen('id -G') as idg:
groups = idg.read().strip()
if not groups:
raise unittest.SkipTest("need working 'id -G'")
# 'id -G' and 'os.getgroups()' should return the same
# groups, ignoring order and duplicates.
# #10822 - it is implementation defined whether posix.getgroups()
# includes the effective gid so we include it anyway, since id -G does
self.assertEqual(
set([int(x) for x in groups.split()]),
set(posix.getgroups() + [posix.getegid()]))
class PosixGroupsTester(unittest.TestCase):
def setUp(self):
if posix.getuid() != 0:
raise unittest.SkipTest("not enough privileges")
if not hasattr(posix, 'getgroups'):
raise unittest.SkipTest("need posix.getgroups")
if sys.platform == 'darwin':
raise unittest.SkipTest("getgroups(2) is broken on OSX")
self.saved_groups = posix.getgroups()
def tearDown(self):
if hasattr(posix, 'setgroups'):
posix.setgroups(self.saved_groups)
elif hasattr(posix, 'initgroups'):
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, self.saved_groups[0])
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs posix.initgroups()")
def test_initgroups(self):
# find missing group
g = max(self.saved_groups) + 1
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, g)
self.assertIn(g, posix.getgroups())
@unittest.skipUnless(hasattr(posix, 'setgroups'),
"test needs posix.setgroups()")
def test_setgroups(self):
for groups in [[0], range(16)]:
posix.setgroups(groups)
self.assertListEqual(groups, posix.getgroups())
def test_main():
test_support.run_unittest(PosixTester, PosixGroupsTester)
if __name__ == '__main__':
test_main()
|
apache-2.0
|
allevato/swift
|
utils/pass-pipeline/src/pass_pipeline_library.py
|
77
|
2942
|
import pass_pipeline as ppipe
import passes as p
def simplifycfg_silcombine_passlist():
return ppipe.PassList([
p.SimplifyCFG,
p.SILCombine,
p.SimplifyCFG,
])
def highlevel_loopopt_passlist():
return ppipe.PassList([
p.LowerAggregateInstrs,
p.SILCombine,
p.SROA,
p.Mem2Reg,
p.DCE,
p.SILCombine,
simplifycfg_silcombine_passlist(),
p.LoopRotate,
p.DCE,
p.CSE,
p.SILCombine,
p.SimplifyCFG,
p.ABCOpt,
p.DCE,
p.COWArrayOpts,
p.DCE,
p.SwiftArrayOpts,
])
def lowlevel_loopopt_passlist():
return ppipe.PassList([
p.LICM,
p.DCE,
p.CSE,
p.SILCombine,
p.SimplifyCFG,
])
def inliner_for_optlevel(optlevel):
if optlevel == 'high':
return p.EarlyInliner
elif optlevel == 'mid':
return p.PerfInliner
elif optlevel == 'low':
return p.LateInliner
else:
raise RuntimeError('Unknown opt level')
def ssapass_passlist(optlevel):
return ppipe.PassList([
simplifycfg_silcombine_passlist(),
p.AllocBoxToStack,
p.CopyForwarding,
p.LowerAggregateInstrs,
p.SILCombine,
p.SROA,
p.Mem2Reg,
p.PerformanceConstantPropagation,
p.DCE,
p.CSE,
p.SILCombine,
simplifycfg_silcombine_passlist(),
p.GlobalLoadStoreOpts,
# Need to add proper argument here
p.CodeMotion,
p.GlobalARCOpts,
p.SpeculativeDevirtualizer,
p.SILLinker,
inliner_for_optlevel(optlevel),
p.SimplifyCFG,
p.CodeMotion,
p.GlobalARCOpts,
])
def lower_passlist():
return ppipe.PassList([
p.DeadFunctionElimination,
p.DeadObjectElimination,
p.GlobalOpt,
p.CapturePropagation,
p.ClosureSpecializer,
p.SpeculativeDevirtualizer,
p.FunctionSignatureOpts,
])
def normal_passpipelines():
result = []
x = ppipe.PassPipeline('HighLevel', {'name': 'run_n_times', 'count': 2})
x.add_pass(ssapass_passlist('high'))
result.append(x)
x = ppipe.PassPipeline('EarlyLoopOpt', {'name': 'run_n_times', 'count': 1})
x.add_pass(highlevel_loopopt_passlist())
result.append(x)
x = ppipe.PassPipeline('MidLevelOpt', {'name': 'run_n_times', 'count': 2})
x.add_pass(ssapass_passlist('mid'))
result.append(x)
x = ppipe.PassPipeline('Lower', {'name': 'run_to_fixed_point'})
x.add_pass(lower_passlist())
result.append(x)
x = ppipe.PassPipeline('LowLevel', {'name': 'run_n_times', 'count': 1})
x.add_pass(ssapass_passlist('low'))
result.append(x)
x = ppipe.PassPipeline('LateLoopOpt', {'name': 'run_n_times', 'count': 1})
x.add_pass([lowlevel_loopopt_passlist(), p.DeadFunctionElimination])
result.append(x)
return result
|
apache-2.0
|
gcode-mirror/audacity
|
lib-src/lv2/lilv/waflib/Options.py
|
330
|
5458
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,tempfile,optparse,sys,re
from waflib import Logs,Utils,Context
cmds='distclean configure build install clean uninstall check dist distcheck'.split()
options={}
commands=[]
lockfile=os.environ.get('WAFLOCK','.lock-waf_%s_build'%sys.platform)
try:cache_global=os.path.abspath(os.environ['WAFCACHE'])
except KeyError:cache_global=''
platform=Utils.unversioned_sys_platform()
class opt_parser(optparse.OptionParser):
def __init__(self,ctx):
optparse.OptionParser.__init__(self,conflict_handler="resolve",version='waf %s (%s)'%(Context.WAFVERSION,Context.WAFREVISION))
self.formatter.width=Logs.get_term_cols()
p=self.add_option
self.ctx=ctx
jobs=ctx.jobs()
p('-j','--jobs',dest='jobs',default=jobs,type='int',help='amount of parallel jobs (%r)'%jobs)
p('-k','--keep',dest='keep',default=0,action='count',help='keep running happily even if errors are found')
p('-v','--verbose',dest='verbose',default=0,action='count',help='verbosity level -v -vv or -vvv [default: 0]')
p('--nocache',dest='nocache',default=False,action='store_true',help='ignore the WAFCACHE (if set)')
p('--zones',dest='zones',default='',action='store',help='debugging zones (task_gen, deps, tasks, etc)')
gr=optparse.OptionGroup(self,'configure options')
self.add_option_group(gr)
gr.add_option('-o','--out',action='store',default='',help='build dir for the project',dest='out')
gr.add_option('-t','--top',action='store',default='',help='src dir for the project',dest='top')
default_prefix=os.environ.get('PREFIX')
if not default_prefix:
if platform=='win32':
d=tempfile.gettempdir()
default_prefix=d[0].upper()+d[1:]
else:
default_prefix='/usr/local/'
gr.add_option('--prefix',dest='prefix',default=default_prefix,help='installation prefix [default: %r]'%default_prefix)
gr.add_option('--download',dest='download',default=False,action='store_true',help='try to download the tools if missing')
gr=optparse.OptionGroup(self,'build and install options')
self.add_option_group(gr)
gr.add_option('-p','--progress',dest='progress_bar',default=0,action='count',help='-p: progress bar; -pp: ide output')
gr.add_option('--targets',dest='targets',default='',action='store',help='task generators, e.g. "target1,target2"')
gr=optparse.OptionGroup(self,'step options')
self.add_option_group(gr)
gr.add_option('--files',dest='files',default='',action='store',help='files to process, by regexp, e.g. "*/main.c,*/test/main.o"')
default_destdir=os.environ.get('DESTDIR','')
gr=optparse.OptionGroup(self,'install/uninstall options')
self.add_option_group(gr)
gr.add_option('--destdir',help='installation root [default: %r]'%default_destdir,default=default_destdir,dest='destdir')
gr.add_option('-f','--force',dest='force',default=False,action='store_true',help='force file installation')
gr.add_option('--distcheck-args',help='arguments to pass to distcheck',default=None,action='store')
def get_usage(self):
cmds_str={}
for cls in Context.classes:
if not cls.cmd or cls.cmd=='options':
continue
s=cls.__doc__ or''
cmds_str[cls.cmd]=s
if Context.g_module:
for(k,v)in Context.g_module.__dict__.items():
if k in['options','init','shutdown']:
continue
if type(v)is type(Context.create_context):
if v.__doc__ and not k.startswith('_'):
cmds_str[k]=v.__doc__
just=0
for k in cmds_str:
just=max(just,len(k))
lst=[' %s: %s'%(k.ljust(just),v)for(k,v)in cmds_str.items()]
lst.sort()
ret='\n'.join(lst)
return'''waf [commands] [options]
Main commands (example: ./waf build -j4)
%s
'''%ret
class OptionsContext(Context.Context):
cmd='options'
fun='options'
def __init__(self,**kw):
super(OptionsContext,self).__init__(**kw)
self.parser=opt_parser(self)
self.option_groups={}
def jobs(self):
count=int(os.environ.get('JOBS',0))
if count<1:
if'NUMBER_OF_PROCESSORS'in os.environ:
count=int(os.environ.get('NUMBER_OF_PROCESSORS',1))
else:
if hasattr(os,'sysconf_names'):
if'SC_NPROCESSORS_ONLN'in os.sysconf_names:
count=int(os.sysconf('SC_NPROCESSORS_ONLN'))
elif'SC_NPROCESSORS_CONF'in os.sysconf_names:
count=int(os.sysconf('SC_NPROCESSORS_CONF'))
if not count and os.name not in('nt','java'):
try:
tmp=self.cmd_and_log(['sysctl','-n','hw.ncpu'],quiet=0)
except Exception:
pass
else:
if re.match('^[0-9]+$',tmp):
count=int(tmp)
if count<1:
count=1
elif count>1024:
count=1024
return count
def add_option(self,*k,**kw):
return self.parser.add_option(*k,**kw)
def add_option_group(self,*k,**kw):
try:
gr=self.option_groups[k[0]]
except KeyError:
gr=self.parser.add_option_group(*k,**kw)
self.option_groups[k[0]]=gr
return gr
def get_option_group(self,opt_str):
try:
return self.option_groups[opt_str]
except KeyError:
for group in self.parser.option_groups:
if group.title==opt_str:
return group
return None
def parse_args(self,_args=None):
global options,commands
(options,leftover_args)=self.parser.parse_args(args=_args)
commands=leftover_args
if options.destdir:
options.destdir=os.path.abspath(os.path.expanduser(options.destdir))
if options.verbose>=1:
self.load('errcheck')
def execute(self):
super(OptionsContext,self).execute()
self.parse_args()
|
gpl-2.0
|
Rangozhang/Twitch-plays-LSD-neural-net
|
classify.py
|
6
|
2510
|
################################################################################
# INIT
################################################################################
import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
from time import time, strftime, localtime
from subprocess import Popen
import sys
import os
import importlib
import warnings
import string
from glob import glob
import cPickle
import platform
import scipy.misc
import utils
# warnings.filterwarnings('ignore', '.*topo.*')
if len(sys.argv) < 2:
print "Usage: %s <config_path>"%os.path.basename(__file__)
cfg_path = "models/classification.py"
else: cfg_path = sys.argv[1]
cfg_name = cfg_path.split("/")[-1][:-3]
print "Model:", cfg_name
cfg = importlib.import_module("models.%s" % cfg_name)
expid = "%s-%s-%s" % (cfg_name, platform.node(), strftime("%Y%m%d-%H%M%S", localtime()))
print "expid:", expid
################################################################################
# BUILD & COMPILE
################################################################################
print "Building"
model = cfg.build_model()
pretrained_params = cfg.pretrained_params
nn.layers.set_all_param_values(model.out, pretrained_params)
all_layers = nn.layers.get_all_layers(model.out)
num_params = nn.layers.count_params(model.out)
print " number of parameters: %d" % num_params
print " layer output shapes:"
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
print " %s %s" % (name, layer.get_output_shape(),)
x = nn.utils.shared_empty(dim=len(model.input.get_output_shape()))
x.set_value(cfg.image.astype("float32").reshape((1,)+cfg.image.shape))
all_params = [x,]
givens = {
model.input.input_var: x-cfg.mean_img
}
print "Compiling"
compute_output = theano.function([], model.out.get_output(deterministic=True),
givens=givens, on_unused_input='ignore')
################################################################################
# TRAIN
################################################################################
output = np.asarray(compute_output())
print output.shape
print np.argmax(output)
print np.argmax(output,axis=1)
print output[0,np.argmax(output)]
print cfg.class_str[np.argmax(output)]
|
mit
|
yoseforb/lollypop
|
src/fullscreen.py
|
1
|
9247
|
#!/usr/bin/python
# Copyright (c) 2014-2015 Cedric Bellegarde <cedric.bellegarde@adishatz.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gdk, GLib
from cgi import escape
from gettext import gettext as _
from lollypop.define import Lp, ArtSize, Type
from lollypop.utils import seconds_to_string
# Show a fullscreen window showing current track context
class FullScreen(Gtk.Window):
"""
Init window and set transient for parent
@param: parent as Gtk.window
"""
def __init__(self, parent):
Gtk.Window.__init__(self)
self._timeout = None
self._seeking = False
self._signal1_id = None
self._signal2_id = None
self.set_transient_for(parent)
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
builder = Gtk.Builder()
builder.add_from_resource('/org/gnome/Lollypop/FullScreen.ui')
main_widget = builder.get_object('fs')
self.add(main_widget)
self._prev_btn = builder.get_object('prev_btn')
self._prev_btn.connect('clicked', self._on_prev_btn_clicked)
self._play_btn = builder.get_object('play_btn')
self._play_btn.connect('clicked', self._on_play_btn_clicked)
self._next_btn = builder.get_object('next_btn')
self._next_btn.connect('clicked', self._on_next_btn_clicked)
self._play_image = builder.get_object('play_image')
self._pause_image = builder.get_object('pause_image')
close_btn = builder.get_object('close_btn')
close_btn.connect('clicked', self._destroy)
self._cover = builder.get_object('cover')
self._title = builder.get_object('title')
self._artist = builder.get_object('artist')
self._album = builder.get_object('album')
self._next = builder.get_object('next')
self._next_cover = builder.get_object('next_cover')
self._progress = builder.get_object('progress_scale')
self._progress.connect('button-release-event',
self._on_progress_release_button)
self._progress.connect('button-press-event',
self._on_progress_press_button)
self._timelabel = builder.get_object('playback')
self._total_time_label = builder.get_object('duration')
self.connect('key-release-event', self._on_key_release_event)
"""
Init signals, set color and go party mode if nothing is playing
"""
def do_show(self):
is_playing = Lp.player.is_playing()
self._signal1_id = Lp.player.connect('current-changed',
self._on_current_changed)
self._signal2_id = Lp.player.connect('status-changed',
self._on_status_changed)
if is_playing:
self._change_play_btn_status(self._pause_image, _('Pause'))
self._on_current_changed(Lp.player)
else:
Lp.player.set_party(True)
if not self._timeout:
self._timeout = GLib.timeout_add(1000, self._update_position)
Gtk.Window.do_show(self)
self._update_position()
self.fullscreen()
"""
Remove signals and unset color
"""
def do_hide(self):
if self._signal1_id:
Lp.player.disconnect(self._signal1_id)
self._signal1_id = None
if self._signal2_id:
Lp.player.disconnect(self._signal2_id)
self._signal2_id = None
if self._timeout:
GLib.source_remove(self._timeout)
self._timeout = None
#######################
# PRIVATE #
#######################
"""
Update View for current track
- Cover
- artist/title
- reset progress bar
- update time/total labels
@param player as Player
"""
def _on_current_changed(self, player):
if player.current_track.id is None:
pass # Impossible as we force play on show
else:
if Lp.player.current_track.id == Type.RADIOS:
self._prev_btn.set_sensitive(False)
self._next_btn.set_sensitive(False)
self._timelabel.hide()
self._total_time_label.hide()
self._progress.hide()
cover = Lp.art.get_radio(player.current_track.artist,
ArtSize.MONSTER)
else:
self._prev_btn.set_sensitive(True)
self._next_btn.set_sensitive(True)
self._timelabel.show()
self._total_time_label.show()
self._progress.show()
cover = Lp.art.get_album(player.current_track.album_id,
ArtSize.MONSTER)
self._cover.set_from_pixbuf(cover)
del cover
album = player.current_track.album
if player.current_track.year != '':
album += " (%s)" % player.current_track.year
self._title.set_text(player.current_track.title)
self._artist.set_text(player.current_track.artist)
self._album.set_text(album)
next_cover = Lp.art.get_album(player.next_track.album_id,
ArtSize.MEDIUM)
self._next_cover.set_from_pixbuf(next_cover)
del next_cover
self._next.set_markup("<b>%s</b> - %s" %
(escape(player.next_track.artist),
escape(player.next_track.title)))
self._progress.set_value(1.0)
self._progress.set_range(0.0, player.current_track.duration * 60)
self._total_time_label.set_text(
seconds_to_string(player.current_track.duration))
self._timelabel.set_text("0:00")
"""
Destroy window if Esc
@param widget as Gtk.Widget
@param event as Gdk.event
"""
def _on_key_release_event(self, widget, event):
if event.keyval == Gdk.KEY_Escape:
self.destroy()
"""
Go to prev track
@param widget as Gtk.Button
"""
def _on_prev_btn_clicked(self, widget):
Lp.player.prev()
"""
Play/pause
@param widget as Gtk.Button
"""
def _on_play_btn_clicked(self, widget):
if Lp.player.is_playing():
Lp.player.pause()
widget.set_image(self._play_image)
else:
Lp.player.play()
widget.set_image(self._pause_image)
"""
Go to next track
@param widget as Gtk.Button
"""
def _on_next_btn_clicked(self, widget):
Lp.player.next()
"""
Update buttons and progress bar
@param obj as unused
"""
def _on_status_changed(self, obj):
is_playing = Lp.player.is_playing()
if is_playing and not self._timeout:
self._timeout = GLib.timeout_add(1000, self._update_position)
self._change_play_btn_status(self._pause_image, _("Pause"))
elif not is_playing and self._timeout:
GLib.source_remove(self._timeout)
self._timeout = None
self._change_play_btn_status(self._play_image, _("Play"))
"""
On press, mark player as seeking
@param unused
"""
def _on_progress_press_button(self, scale, data):
self._seeking = True
"""
Callback for scale release button
Seek player to scale value
@param scale as Gtk.Scale, data as unused
"""
def _on_progress_release_button(self, scale, data):
value = scale.get_value()
self._seeking = False
self._update_position(value)
Lp.player.seek(value/60)
"""
Update play button with image and status as tooltip
@param image as Gtk.Image
@param status as str
"""
def _change_play_btn_status(self, image, status):
self._play_btn.set_image(image)
self._play_btn.set_tooltip_text(status)
"""
Update progress bar position
@param value as int
"""
def _update_position(self, value=None):
if not self._seeking and self._progress.is_visible():
if value is None:
value = Lp.player.get_position_in_track()/1000000
self._progress.set_value(value)
self._timelabel.set_text(seconds_to_string(value/60))
return True
"""
Destroy self
@param widget as Gtk.Button
"""
def _destroy(self, widget):
self.destroy()
|
gpl-3.0
|
elfnor/sverchok
|
nodes/layout/wifi_in.py
|
3
|
4068
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import StringProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import multi_socket
# Warning, changing this node without modifying the update system might break functionlaity
# bl_idname and var_name is used by the update system
def name_seq():
for i in range(ord('a'),ord('z')):
yield chr(i)
for i in range(1000):
yield "a.".join(str(i))
class WifiInNode(bpy.types.Node, SverchCustomTreeNode):
''' Wifi Input '''
bl_idname = 'WifiInNode'
bl_label = 'Wifi in'
bl_icon = 'OUTLINER_OB_EMPTY'
def change_var_name(self, context):
# no change
if self.base_name == self.var_name:
return
ng = self.id_data
wifi_in_list = [node for node in ng.nodes
if node.bl_idname == 'WifiInNode']
# verify that set var name isn't used before, if it is reset to previous
for node in wifi_in_list:
if node.name != self.name:
if node.var_name == self.var_name:
self.var_name = self.base_name
return
# name is unique, store it.
self.base_name = self.var_name
if self.inputs: # if we have inputs, rename
for i, s in enumerate(self.inputs):
s.name = "{0}[{1}]".format(self.var_name, i)
else: #create first socket
self.inputs.new('StringsSocket', self.var_name+"[0]")
var_name = StringProperty(name='var_name', update=change_var_name)
base_name = StringProperty(default='')
multi_socket_type = StringProperty(default='StringsSocket')
def draw_buttons(self, context, layout):
layout.prop(self, "var_name", text="Var")
def sv_init(self, context):
ng = self.id_data
var_set = {node.var_name for node in ng.nodes
if node.bl_idname == 'WifiInNode'}
for name in name_seq():
if not name in var_set:
self.var_name = name
return
def copy(self, node):
ng = self.id_data
var_set = {node.var_name for node in ng.nodes
if node.bl_idname == 'WifiInNode'}
for name in name_seq():
if not name in var_set:
self.var_name = name
return
def gen_var_name(self):
#from socket
if self.inputs:
n = self.inputs[0].name.rstrip("[0]")
self.base_name = n
self.var_name = n
else:
ng = self.id_data
var_set = {node.var_name for node in ng.nodes
if node.bl_idname == 'WifiInNode'}
for name in name_seq():
if not name in var_set:
self.var_name = name
return
def update(self):
# ugly hack to get var name sometimes with old layouts
if not self.var_name:
self.gen_var_name()
self.base_name = self.var_name
multi_socket(self, min=1, breck=True)
def register():
bpy.utils.register_class(WifiInNode)
def unregister():
bpy.utils.unregister_class(WifiInNode)
|
gpl-3.0
|
daviwesley/Empire
|
lib/stagers/hop_php.py
|
22
|
2123
|
from lib.common import helpers
class Stager:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Launcher',
'Author': ['@harmj0y'],
'Description': ('Generates a hop.php redirector for an Empire listener.'),
'Comments': [
''
]
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener' : {
'Description' : 'Listener to generate stager for.',
'Required' : True,
'Value' : ''
},
'OutFile' : {
'Description' : 'File to output php redirector to.',
'Required' : True,
'Value' : '/tmp/hop.php'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
listenerID = self.options['Listener']['Value']
# extract out the listener config information
listener = self.mainMenu.listeners.get_listener(listenerID)
if listener:
# extract out the listener config information
name = listener[1]
host = listener[2]
port = listener[3]
certPath = listener[4]
profile = listener[8]
listenerType = listener[-2]
redirectTarget = listener[-1]
resources = profile.split("|")[0]
code = self.mainMenu.stagers.generate_hop_php(host, resources)
return code
else:
print helpers.color("[!] Error in hop.php generation.")
return ""
|
bsd-3-clause
|
darmaa/odoo
|
addons/mrp/report/bom_structure.py
|
66
|
2475
|
## -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class bom_structure(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(bom_structure, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_children':self.get_children,
})
def get_children(self, object, level=0):
result = []
def _get_rec(object, level):
for l in object:
res = {}
res['name'] = l.name
res['pname'] = l.product_id.name
res['pcode'] = l.product_id.default_code
res['pqty'] = l.product_qty
res['uname'] = l.product_uom.name
res['code'] = l.code
res['level'] = level
result.append(res)
if l.child_complete_ids:
if level<6:
level += 1
_get_rec(l.child_complete_ids,level)
if level>0 and level<6:
level -= 1
return result
children = _get_rec(object,level)
return children
class report_lunchorder(osv.AbstractModel):
_name = 'report.mrp.report_mrpbomstructure'
_inherit = 'report.abstract_report'
_template = 'mpr.report_mrpbomstructure'
_wrapped_report_class = bom_structure
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
vfuse/nixstatsagent
|
nixstatsagent/plugins/mdstat.py
|
2
|
1272
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import plugins
import json
class Plugin(plugins.BasePlugin):
__name__ = 'mdstat'
def run(self, config):
'''
Monitor software raid status using mdadm
pip install mdstat
'''
data = os.popen('sudo mdjson').read()
results = {}
try:
data = json.loads(data)
except Exception:
return "Could not load mdstat data"
for key, value in data['devices'].items():
device = {}
if(value['active'] is not True):
device['active'] = 0
else:
device['active'] = 1
if(value['read_only'] is not False):
device['read_only'] = 1
else:
device['read_only'] = 0
if(value['resync'] is not None):
device['resync'] = 1
else:
device['resync'] = 0
device['faulty'] = 0
for disk, diskvalue in value['disks'].items():
if diskvalue['faulty'] is not False:
device['faulty'] = device['faulty'] + 1
results[key] = device
return results
if __name__ == '__main__':
Plugin().execute()
|
bsd-3-clause
|
ryfeus/lambda-packs
|
LightGBM_sklearn_scipy_numpy/source/scipy/odr/tests/test_odr.py
|
14
|
13045
|
from __future__ import division, print_function, absolute_import
# Scipy imports.
import numpy as np
from numpy import pi
from numpy.testing import (assert_array_almost_equal,
assert_equal, assert_warns)
from pytest import raises as assert_raises
from scipy.odr import Data, Model, ODR, RealData, OdrStop, OdrWarning
class TestODR(object):
# Bad Data for 'x'
def test_bad_data(self):
assert_raises(ValueError, Data, 2, 1)
assert_raises(ValueError, RealData, 2, 1)
# Empty Data for 'x'
def empty_data_func(self, B, x):
return B[0]*x + B[1]
def test_empty_data(self):
beta0 = [0.02, 0.0]
linear = Model(self.empty_data_func)
empty_dat = Data([], [])
assert_warns(OdrWarning, ODR,
empty_dat, linear, beta0=beta0)
empty_dat = RealData([], [])
assert_warns(OdrWarning, ODR,
empty_dat, linear, beta0=beta0)
# Explicit Example
def explicit_fcn(self, B, x):
ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2)
return ret
def explicit_fjd(self, B, x):
eBx = np.exp(B[2]*x)
ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx
return ret
def explicit_fjb(self, B, x):
eBx = np.exp(B[2]*x)
res = np.vstack([np.ones(x.shape[-1]),
np.power(eBx-1.0, 2),
B[1]*2.0*(eBx-1.0)*eBx*x])
return res
def test_explicit(self):
explicit_mod = Model(
self.explicit_fcn,
fjacb=self.explicit_fjb,
fjacd=self.explicit_fjd,
meta=dict(name='Sample Explicit Model',
ref='ODRPACK UG, pg. 39'),
)
explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.],
[1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6,
1213.8,1215.5,1212.])
explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1],
ifixx=[0,0,1,1,1,1,1,1,1,1,1,0])
explicit_odr.set_job(deriv=2)
explicit_odr.set_iprint(init=0, iter=0, final=0)
out = explicit_odr.run()
assert_array_almost_equal(
out.beta,
np.array([1.2646548050648876e+03, -5.4018409956678255e+01,
-8.7849712165253724e-02]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[4.4949592379003039e-01, -3.7421976890364739e-01,
-8.0978217468468912e-04],
[-3.7421976890364739e-01, 1.0529686462751804e+00,
-1.9453521827942002e-03],
[-8.0978217468468912e-04, -1.9453521827942002e-03,
1.6827336938454476e-05]]),
)
# Implicit Example
def implicit_fcn(self, B, x):
return (B[2]*np.power(x[0]-B[0], 2) +
2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) +
B[4]*np.power(x[1]-B[1], 2) - 1.0)
def test_implicit(self):
implicit_mod = Model(
self.implicit_fcn,
implicit=1,
meta=dict(name='Sample Implicit Model',
ref='ODRPACK UG, pg. 49'),
)
implicit_dat = Data([
[0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28,
-0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44],
[-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32,
-6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]],
1,
)
implicit_odr = ODR(implicit_dat, implicit_mod,
beta0=[-1.0, -3.0, 0.09, 0.02, 0.08])
out = implicit_odr.run()
assert_array_almost_equal(
out.beta,
np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354,
0.0162299708984738, 0.0797537982976416]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314,
0.0027500347539902, 0.0034962501532468]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[2.1089274602333052e+00, -1.9437686411979040e+00,
7.0263550868344446e-02, -4.7175267373474862e-02,
5.2515575927380355e-02],
[-1.9437686411979040e+00, 2.0481509222414456e+00,
-6.1600515853057307e-02, 4.6268827806232933e-02,
-5.8822307501391467e-02],
[7.0263550868344446e-02, -6.1600515853057307e-02,
2.8659542561579308e-03, -1.4628662260014491e-03,
1.4528860663055824e-03],
[-4.7175267373474862e-02, 4.6268827806232933e-02,
-1.4628662260014491e-03, 1.2855592885514335e-03,
-1.2692942951415293e-03],
[5.2515575927380355e-02, -5.8822307501391467e-02,
1.4528860663055824e-03, -1.2692942951415293e-03,
2.0778813389755596e-03]]),
)
# Multi-variable Example
def multi_fcn(self, B, x):
if (x < 0.0).any():
raise OdrStop
theta = pi*B[3]/2.
ctheta = np.cos(theta)
stheta = np.sin(theta)
omega = np.power(2.*pi*x*np.exp(-B[2]), B[3])
phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta))
r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) +
np.power(omega*stheta, 2)), -B[4])
ret = np.vstack([B[1] + r*np.cos(B[4]*phi),
r*np.sin(B[4]*phi)])
return ret
def test_multi(self):
multi_mod = Model(
self.multi_fcn,
meta=dict(name='Sample Multi-Response Model',
ref='ODRPACK UG, pg. 56'),
)
multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0,
700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0,
15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0])
multi_y = np.array([
[4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713,
3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984,
2.934, 2.876, 2.838, 2.798, 2.759],
[0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309,
0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218,
0.202, 0.182, 0.168, 0.153, 0.139],
])
n = len(multi_x)
multi_we = np.zeros((2, 2, n), dtype=float)
multi_ifixx = np.ones(n, dtype=int)
multi_delta = np.zeros(n, dtype=float)
multi_we[0,0,:] = 559.6
multi_we[1,0,:] = multi_we[0,1,:] = -1634.0
multi_we[1,1,:] = 8397.0
for i in range(n):
if multi_x[i] < 100.0:
multi_ifixx[i] = 0
elif multi_x[i] <= 150.0:
pass # defaults are fine
elif multi_x[i] <= 1000.0:
multi_delta[i] = 25.0
elif multi_x[i] <= 10000.0:
multi_delta[i] = 560.0
elif multi_x[i] <= 100000.0:
multi_delta[i] = 9500.0
else:
multi_delta[i] = 144000.0
if multi_x[i] == 100.0 or multi_x[i] == 150.0:
multi_we[:,:,i] = 0.0
multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2),
we=multi_we)
multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5],
delta0=multi_delta, ifixx=multi_ifixx)
multi_odr.set_job(deriv=1, del_init=1)
out = multi_odr.run()
assert_array_almost_equal(
out.beta,
np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978,
0.5101147161764654, 0.5173902330489161]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757,
0.0132642749596149, 0.0288529201353984]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406,
-0.0058700836512467, 0.011281212888768],
[0.0036159705923791, 0.0064793789429006, 0.0517610978353126,
-0.0051181304940204, 0.0130726943624117],
[0.0438637051470406, 0.0517610978353126, 0.5182263323095322,
-0.0563083340093696, 0.1269490939468611],
[-0.0058700836512467, -0.0051181304940204, -0.0563083340093696,
0.0066939246261263, -0.0140184391377962],
[0.011281212888768, 0.0130726943624117, 0.1269490939468611,
-0.0140184391377962, 0.0316733013820852]]),
)
# Pearson's Data
# K. Pearson, Philosophical Magazine, 2, 559 (1901)
def pearson_fcn(self, B, x):
return B[0] + B[1]*x
def test_pearson(self):
p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4])
p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5])
p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.])
p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04])
p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy)
# Reverse the data to test invariance of results
pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx)
p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit'))
p_odr = ODR(p_dat, p_mod, beta0=[1.,1.])
pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.])
out = p_odr.run()
assert_array_almost_equal(
out.beta,
np.array([5.4767400299231674, -0.4796082367610305]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.3590121690702467, 0.0706291186037444]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[0.0854275622946333, -0.0161807025443155],
[-0.0161807025443155, 0.003306337993922]]),
)
rout = pr_odr.run()
assert_array_almost_equal(
rout.beta,
np.array([11.4192022410781231, -2.0850374506165474]),
)
assert_array_almost_equal(
rout.sd_beta,
np.array([0.9820231665657161, 0.3070515616198911]),
)
assert_array_almost_equal(
rout.cov_beta,
np.array([[0.6391799462548782, -0.1955657291119177],
[-0.1955657291119177, 0.0624888159223392]]),
)
# Lorentz Peak
# The data is taken from one of the undergraduate physics labs I performed.
def lorentz(self, beta, x):
return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x -
beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0)))
def test_lorentz(self):
l_sy = np.array([.29]*18)
l_sx = np.array([.000972971,.000948268,.000707632,.000706679,
.000706074, .000703918,.000698955,.000456856,
.000455207,.000662717,.000654619,.000652694,
.000000859202,.00106589,.00106378,.00125483, .00140818,.00241839])
l_dat = RealData(
[3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608,
3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982,
3.6562, 3.62498, 3.55525, 3.41886],
[652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122,
957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5],
sx=l_sx,
sy=l_sy,
)
l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak'))
l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8))
out = l_odr.run()
assert_array_almost_equal(
out.beta,
np.array([1.4306780846149925e+03, 1.3390509034538309e-01,
3.7798193600109009e+00]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([7.3621186811330963e-01, 3.5068899941471650e-04,
2.4451209281408992e-04]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[2.4714409064597873e-01, -6.9067261911110836e-05,
-3.1236953270424990e-05],
[-6.9067261911110836e-05, 5.6077531517333009e-08,
3.6133261832722601e-08],
[-3.1236953270424990e-05, 3.6133261832722601e-08,
2.7261220025171730e-08]]),
)
def test_ticket_1253(self):
def linear(c, x):
return c[0]*x+c[1]
c = [2.0, 3.0]
x = np.linspace(0, 10)
y = linear(c, x)
model = Model(linear)
data = Data(x, y, wd=1.0, we=1.0)
job = ODR(data, model, beta0=[1.0, 1.0])
result = job.run()
assert_equal(result.info, 2)
|
mit
|
stclair/wes-cms
|
django/middleware/gzip.py
|
321
|
1455
|
import re
from django.utils.text import compress_string
from django.utils.cache import patch_vary_headers
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth compressing non-OK or really short responses.
if response.status_code != 200 or len(response.content) < 200:
return response
patch_vary_headers(response, ('Accept-Encoding',))
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
# MSIE have issues with gzipped respones of various content types.
if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
ctype = response.get('Content-Type', '').lower()
if not ctype.startswith("text/") or "javascript" in ctype:
return response
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
response.content = compress_string(response.content)
response['Content-Encoding'] = 'gzip'
response['Content-Length'] = str(len(response.content))
return response
|
bsd-3-clause
|
EvanK/ansible-modules-extras
|
notification/pushover.py
|
59
|
3528
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012, Jim Richardson <weaselkeeper@gmail.com>
# All rights reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
###
DOCUMENTATION = '''
---
module: pushover
version_added: "2.0"
short_description: Send notifications via U(https://pushover.net)
description:
- Send notifications via pushover, to subscriber list of devices, and email
addresses. Requires pushover app on devices.
notes:
- You will require a pushover.net account to use this module. But no account
is required to receive messages.
options:
msg:
description:
- What message you wish to send.
required: true
app_token:
description:
- Pushover issued token identifying your pushover app.
required: true
user_key:
description:
- Pushover issued authentication key for your user.
required: true
pri:
description:
- Message priority (see U(https://pushover.net) for details.)
required: false
author: "Jim Richardson (@weaselkeeper)"
'''
EXAMPLES = '''
- local_action: pushover msg="{{inventory_hostname}} has exploded in flames,
It is now time to panic" app_token=wxfdksl user_key=baa5fe97f2c5ab3ca8f0bb59
'''
import urllib
class Pushover(object):
''' Instantiates a pushover object, use it to send notifications '''
base_uri = 'https://api.pushover.net'
port = 443
def __init__(self, module, user, token):
self.module = module
self.user = user
self.token = token
def run(self, priority, msg):
''' Do, whatever it is, we do. '''
url = '%s:%s/1/messages.json' % (self.base_uri, self.port)
# parse config
options = dict(user=self.user,
token=self.token,
priority=priority,
message=msg)
data = urllib.urlencode(options)
headers = { "Content-type": "application/x-www-form-urlencoded"}
r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers)
if info['status'] != 200:
raise Exception(info)
return r.read()
def main():
module = AnsibleModule(
argument_spec=dict(
msg=dict(required=True),
app_token=dict(required=True, no_log=True),
user_key=dict(required=True, no_log=True),
pri=dict(required=False, default='0', choices=['-2','-1','0','1','2']),
),
)
msg_object = Pushover(module, module.params['user_key'], module.params['app_token'])
try:
response = msg_object.run(module.params['pri'], module.params['msg'])
except:
module.fail_json(msg='Unable to send msg via pushover')
module.exit_json(msg='message sent successfully: %s' % response, changed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
protochron/aurora
|
src/main/python/apache/thermos/monitoring/disk.py
|
8
|
2533
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Sample disk usage under a particular path
This module provides threads which can be used to gather information on the disk utilisation
under a particular path.
"""
import threading
import time
from twitter.common import log
from twitter.common.dirutil import du
from twitter.common.exceptions import ExceptionalThread
from twitter.common.lang import Lockable
class DiskCollectorThread(ExceptionalThread):
""" Thread to calculate aggregate disk usage under a given path using a simple algorithm """
def __init__(self, path):
self.path = path
self.value = None
self.event = threading.Event()
super(DiskCollectorThread, self).__init__()
self.daemon = True
def run(self):
start = time.time()
self.value = du(self.path)
log.debug("DiskCollectorThread: finished collection of %s in %.1fms" % (
self.path, 1000.0 * (time.time() - start)))
self.event.set()
def finished(self):
return self.event.is_set()
class DiskCollector(Lockable):
""" Spawn a background thread to sample disk usage """
def __init__(self, root):
self._root = root
self._thread = None
self._value = 0
super(DiskCollector, self).__init__()
@Lockable.sync
def sample(self):
""" Trigger collection of sample, if not already begun """
if self._thread is None:
self._thread = DiskCollectorThread(self._root)
self._thread.start()
@property
@Lockable.sync
def value(self):
""" Retrieve value of disk usage """
if self._thread is not None and self._thread.finished():
self._value = self._thread.value
self._thread = None
return self._value
@property
@Lockable.sync
def completed_event(self):
""" Return a threading.Event that will block until an in-progress disk collection is complete,
or block indefinitely otherwise. Use with caution! (i.e.: set a timeout) """
if self._thread is not None:
return self._thread.event
else:
return threading.Event()
|
apache-2.0
|
batxes/4c2vhic
|
SHH_INV_models/SHH_INV_models_final_output_0.2_-0.1_10000/SHH_INV_models30420.py
|
2
|
17571
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((10265.1, -87.5294, -633.58), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((9492.27, 1146.76, 982.519), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((8383.43, 2407.8, 2065.26), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((6956.11, 2805.97, 236.8), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((5284.4, 3087.42, 1115.55), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((5288.6, 2893.89, 3616.15), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((4795.63, 114.467, 4356.61), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((4626.08, -1027.14, 3746.06), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((5480.98, 2232.1, 6691.86), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((4829.64, 1975.37, 7388.41), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((3524.45, 3253.84, 7568.41), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((1907.58, 4463.49, 8007.56), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((3200.62, 5191.34, 8737), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((3955.71, 6546.57, 7746.57), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2572.27, 7032.25, 8307.17), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((940.706, 8418.25, 9724.27), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((1322.63, 6759.26, 9190.68), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((1224.22, 5246.09, 8806.65), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((399.22, 5649.77, 7720.51), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((1233.34, 6684.6, 8465.81), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((2934.91, 6204.13, 8202.63), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((2138.8, 6235.1, 7547.75), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((2014.4, 5384.08, 8602.93), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((1884.69, 4079.17, 8536.99), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((1769.37, 4647.52, 7055.99), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((2033.36, 5766.96, 7005.94), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((769.31, 5337.96, 7576.1), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((1749.71, 5635.89, 6640.23), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((2319.67, 6829.89, 7034.69), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((2860.7, 7875.87, 7300.86), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((2970.27, 6365.1, 7063.47), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((3811.76, 5489, 6031.42), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3643.48, 5433.97, 7549.91), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((2371.74, 5941.67, 8181.17), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((1904.57, 5566.94, 8931.04), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((2185.56, 5132.47, 10405.9), (0.7, 0.7, 0.7), 697.612)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((4302.83, 5273.56, 8208.61), (0.7, 0.7, 0.7), 799.808)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((5911.39, 3704.57, 7473.1), (0.7, 0.7, 0.7), 1132.58)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((6312.35, 4426.78, 7447.48), (0.7, 0.7, 0.7), 1011.94)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((7848.86, 4431.55, 6619.3), (0.7, 0.7, 0.7), 782.592)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((8436.32, 3159.93, 7681.02), (0.7, 0.7, 0.7), 856.575)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((8847.89, 2471.89, 8343.03), (1, 0.7, 0), 706.579)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((8200.75, 3013.71, 7494.41), (0.7, 0.7, 0.7), 1015.96)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((7341.79, 4770.24, 5432.34), (0.7, 0.7, 0.7), 1205.72)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((7669.63, 6820.41, 4787.68), (0.7, 0.7, 0.7), 841.939)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((6953.01, 5797.45, 4057.69), (1, 0.7, 0), 806.999)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((7224.34, 6272.93, 3633.91), (0.7, 0.7, 0.7), 958.856)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((5988.07, 6866.51, 2365.69), (0.7, 0.7, 0.7), 952.892)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((6144.08, 7528.03, 2028.92), (0.7, 0.7, 0.7), 809.284)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((7343.98, 7067.18, 2068.59), (0.7, 0.7, 0.7), 709.159)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((5859.94, 8058.7, 2225.08), (0.7, 0.7, 0.7), 859.832)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((4330.57, 7994.72, 3102.47), (0.7, 0.7, 0.7), 800.866)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((5345.24, 9169.11, 4177.99), (0.7, 0.7, 0.7), 949.508)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((6127.45, 8600.72, 5871.24), (0.7, 0.7, 0.7), 891.98)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((4369.78, 7805.87, 5280.74), (0.7, 0.7, 0.7), 890.034)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((4607.46, 8188.23, 3565.38), (0.7, 0.7, 0.7), 804.165)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((3741.85, 9147.98, 3611.98), (0.7, 0.7, 0.7), 826.796)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((4929.58, 10461.3, 4384.41), (0.7, 0.7, 0.7), 1085.8)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((5106.41, 8195.31, 5438.69), (0.7, 0.7, 0.7), 906.997)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((5336.32, 9109.93, 3907.71), (0.7, 0.7, 0.7), 708.694)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((5247.4, 8578.57, 2413.94), (0.7, 0.7, 0.7), 780.223)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((6762.95, 8206.75, 2438.69), (0.7, 0.7, 0.7), 757.424)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((6620.06, 7570.92, 954.66), (0.7, 0.7, 0.7), 817.574)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((7772.85, 7171.05, 2.9646), (0.7, 0.7, 0.7), 782.423)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((6699.93, 6338.05, 1150.96), (0.7, 0.7, 0.7), 906.404)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((6814.49, 7899.59, 2088.01), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((5835.93, 7740.66, 1687.57), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((5706.84, 6060.6, 1128.43), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((6934.83, 7676.97, 504.112), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((6959.59, 7212.5, 1314.18), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((5611.13, 7714.19, 1336.14), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
gpl-3.0
|
jamessy/select-sc
|
node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/xcode.py
|
526
|
54812
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'mac_xctest_bundle',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile('(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
_xcode_define_re = re.compile(r'([\\\"\' ])')
def EscapeXcodeDefine(s):
"""We must escape the defines that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals. However, we
must not quote the define, or Xcode will incorrectly intepret variables
especially $(inherited)."""
return re.sub(_xcode_define_re, r'\\\1', s)
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
project_version = generator_flags.get('xcode_project_version', None)
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
if project_version:
xcp.project_file.SetXcodeVersion(project_version)
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
'shared_library+bundle': 'com.apple.product-type.framework',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_xctest = int(spec.get('mac_xctest_bundle', 0))
is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest
if type != 'none':
type_bundle_key = type
if is_xctest:
type_bundle_key += '+xctest'
assert type == 'loadable_module', (
'mac_xctest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xctest, (
'mac_xctest_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
if type != 'none' and (spec_actions or spec_rules):
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + ' Support',
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
for library_dir in configuration.get('library_dirs', []):
if library_dir not in xcode_standard_library_dirs and (
not xcbc.HasBuildSetting(_library_search_paths_var) or
library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXcodeDefine(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
|
mit
|
mcheatham/computationalEnvironmentODP
|
bin/kernels.py
|
1
|
3923
|
#!/usr/bin/env python3
from bs4 import BeautifulSoup
from requests import get
from bs4.element import Tag
ignore = {'Lisp Machines, Inc.', 'Symbolics', 'Texas Instruments', 'Xerox'}
levels = {}
levels['Apple Inc.'] = {3}
levels['On S/360, S/370, and successor mainframes'] = {3}
levels['On other hardware platforms'] = {3}
def before(tag1, tag2, startTag):
if len(tag1) == 0: return False;
if len(tag2) == 0 :return True;
tempTag = startTag
while tempTag and tempTag.previous_sibling:
tempTag = tempTag.previous_sibling
if isinstance(tempTag, Tag):
if tag1 in tempTag.getText():
return True
elif tag2 in tempTag.getText():
return False
return True
def includeLI(tag):
for p in tag.parents:
# ignores tags in the page's table of contents, navigation header, and footer
if 'id' in p.attrs.keys() and ('toc' in p['id'] or 'mw-navigation' in p['id'] or 'footer' in p['id']):
return False;
# ignores links to external references and wikipedia categories
if 'class' in p.attrs.keys() and ('references' in p['class'] or 'reference' in p['class'] or 'catlinks' in p['class']):
return False;
# ignores navigation links
if 'role' in p.attrs.keys() and 'navigation' in p['role']:
return False;
# ignores the 'See also' links
if tag.parent and tag.parent.find_previous_sibling('h2') and 'See also' in tag.parent.find_previous_sibling('h2').text:
return False;
# ignores the external links
if tag.parent and tag.parent.find_previous_sibling('h2') and 'External links' in tag.parent.find_previous_sibling('h2').text:
return False;
return True;
def includeA(tag):
# ignores tags specified directly in the ignore list
if tag.text in ignore:
return False;
# ignores links to external references and wikipedia categories
p = tag.parent
if p and 'class' in p.attrs.keys() and 'reference' in p['class']:
return False;
# this page displays operating systems at various levels of specificity,from kernel down to
# particular distributions in some cases. the script allows the user to specify the correct
# level(s) of each list to pull using the 'levels' dictionary defined abouve. the code below
# insures that the tag is at an acceptable level. if the level is not specified, top-level
# items are pulled.
h4Depth = -1 # -1 because it takes one move to get out of the <a> tag itself
h4Heading = ''
temp = tag
while temp and not temp.find_previous_sibling('h4'):
h4Depth += 1
temp = temp.parent
if temp and temp.find_previous_sibling('h4') and temp.find_previous_sibling('h4').select('span'):
h4Heading = temp.find_previous_sibling('h4').select('span')[0].getText()
h3Depth = -1
h3Heading = ''
temp = tag
while temp and not temp.find_previous_sibling('h3'):
h3Depth += 1
temp = temp.parent
if temp and temp.find_previous_sibling('h3') and temp.find_previous_sibling('h3').select('span'):
h3Heading = temp.find_previous_sibling('h3').select('span')[0].getText()
if h4Depth < h3Depth or before(h4Heading, h3Heading, temp) and h4Heading in levels:
return h4Depth in levels[h4Heading]
elif h3Heading in levels:
return h3Depth in levels[h3Heading];
else:
return h3Depth == 1
baseUrl = 'https://en.wikipedia.org/wiki/List_of_operating_systems'
doc = get(baseUrl).text
soup = BeautifulSoup(doc, 'html.parser')
listItems = soup.select('li')
answers = set()
for i in listItems:
if not includeLI(i): continue
links = i.select('a')
if links and includeA(links[0]) and not links[0].getText() in answers:
answers.add(links[0].getText())
for answer in sorted(answers):
print(answer)
|
mit
|
mgaitan/scipy
|
scipy/signal/tests/test_spectral.py
|
41
|
31224
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import assert_raises, assert_approx_equal, \
assert_, run_module_suite, TestCase,\
assert_allclose, assert_array_equal,\
assert_array_almost_equal_nulp, dec
from scipy import signal, fftpack
from scipy._lib._version import NumpyVersion
from scipy.signal import (periodogram, welch, lombscargle, csd, coherence,
spectrogram)
class TestPeriodogram(TestCase):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_odd(self):
x = np.zeros(15)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = np.ones(16)/16.0
q[0] = 0
assert_allclose(p, q)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, scaling='spectrum')
g, q = periodogram(x, scaling='density')
assert_allclose(f, np.linspace(0, 0.5, 9))
assert_allclose(p, q/16.0)
def test_integer_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_integer_odd(self):
x = np.zeros(15, dtype=int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = np.ones(16)/16.0
q[0] = 0
assert_allclose(p, q)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
f, p = periodogram(x)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = 5.0*np.ones(16)/16.0
q[0] = 0
assert_allclose(p, q)
def test_unk_scaling(self):
assert_raises(ValueError, periodogram, np.zeros(4, np.complex128),
scaling='foo')
def test_nd_axis_m1(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((2,1,10))
x[:,:,0] = 1.0
f, p = periodogram(x)
assert_array_equal(p.shape, (2, 1, 6))
assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60)
f0, p0 = periodogram(x[0,0,:])
assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60)
def test_nd_axis_0(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((10,2,1))
x[0,:,:] = 1.0
f, p = periodogram(x, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60)
f0, p0 = periodogram(x[:,0,0])
assert_array_almost_equal_nulp(p0, p[:,1,0])
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, 10, 'hanning')
win = signal.get_window('hanning', 16)
fe, pe = periodogram(x, 10, win)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
def test_padded_fft(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
fp, pp = periodogram(x, nfft=32)
assert_allclose(f, fp[::2])
assert_allclose(p, pp[::2])
assert_array_equal(pp.shape, (17,))
def test_empty_input(self):
f, p = periodogram([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = periodogram(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = periodogram(np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_nfft(self):
x = np.zeros(18)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_nfft_is_xshape(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9, 'f')
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(15, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8, 'f')
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-7)
assert_(p.dtype == q.dtype)
@dec.skipif(NumpyVersion(np.__version__) < '1.8.0')
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = np.ones(16, 'f')/16.0
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
@dec.skipif(NumpyVersion(np.__version__) < '1.8.0')
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
f, p = periodogram(x)
assert_allclose(f, fftpack.fftfreq(16, 1.0))
q = 5.0*np.ones(16, 'f')/16.0
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
class TestWelch(TestCase):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_onesided_odd(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.15958227, 0.24193957, 0.24145224, 0.24100919,
0.24377353])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, scaling='spectrum')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
0.02083333])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.15958227, 0.24193957, 0.24145224, 0.24100919,
0.24377353])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.38194444])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_unk_scaling(self):
assert_raises(ValueError, welch, np.zeros(4, np.complex128),
scaling='foo', nperseg=4)
def test_detrend_linear(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10, detrend='linear')
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self):
x = np.arange(10, dtype=np.float64) + 0.04
f1, p1 = welch(x, nperseg=10, detrend=False)
f2, p2 = welch(x, nperseg=10, detrend=lambda x: x)
assert_allclose(f1, f2, atol=1e-15)
assert_allclose(p1, p2, atol=1e-15)
def test_detrend_external(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_m1(self):
x = np.arange(40, dtype=np.float64) + 0.04
x = x.reshape((2,2,10))
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
x = np.rollaxis(x, 2, 0)
f, p = welch(x, nperseg=10, axis=0,
detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_nd_axis_m1(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
f, p = welch(x, nperseg=10)
assert_array_equal(p.shape, (2, 1, 6))
assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[0,0,:], nperseg=10)
assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
def test_nd_axis_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((10,2,1))
f, p = welch(x, nperseg=10, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[:,0,0], nperseg=10)
assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, 10, 'hanning', 8)
win = signal.get_window('hanning', 8)
fe, pe = welch(x, 10, win, 8)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
def test_empty_input(self):
f, p = welch([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = welch(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = welch(np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_data(self):
x = np.zeros(8)
x[0] = 1
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
f, p = welch(x)
f1, p1 = welch(x, nperseg=8)
assert_allclose(f, f1)
assert_allclose(p, p1)
def test_window_long_or_nd(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_raises(ValueError, welch, np.zeros(4), 1,
np.array([1,1,1,1,1]))
assert_raises(ValueError, welch, np.zeros(4), 1,
np.arange(6).reshape((2,3)))
def test_nondefault_noverlap(self):
x = np.zeros(64)
x[::8] = 1
f, p = welch(x, nperseg=16, noverlap=4)
q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,
1./6.])
assert_allclose(p, q, atol=1e-12)
def test_bad_noverlap(self):
assert_raises(ValueError, welch, np.zeros(4), 1, 'hanning', 2, 7)
def test_nfft_too_short(self):
assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.15958227, 0.24193957, 0.24145224, 0.24100919,
0.24377353], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
@dec.skipif(NumpyVersion(np.__version__) < '1.8.0')
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111,
0.07638889], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
@dec.skipif(NumpyVersion(np.__version__) < '1.8.0')
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,
0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype,
'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
def test_padded_freqs(self):
x = np.zeros(12)
nfft = 24
f = fftpack.fftfreq(nfft, 1.0)[:nfft//2+1]
f[-1] *= -1
fodd, _ = welch(x, nperseg=5, nfft=nfft)
feven, _ = welch(x, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
nfft = 25
f = fftpack.fftfreq(nfft, 1.0)[:(nfft + 1)//2]
fodd, _ = welch(x, nperseg=5, nfft=nfft)
feven, _ = welch(x, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
class TestCSD:
def test_pad_shorter_x(self):
x = np.zeros(8)
y = np.zeros(12)
f = np.linspace(0, 0.5, 7)
c = np.zeros(7,dtype=np.complex128)
f1, c1 = csd(x, y, nperseg=12)
assert_allclose(f, f1)
assert_allclose(c, c1)
def test_pad_shorter_y(self):
x = np.zeros(12)
y = np.zeros(8)
f = np.linspace(0, 0.5, 7)
c = np.zeros(7,dtype=np.complex128)
f1, c1 = csd(x, y, nperseg=12)
assert_allclose(f, f1)
assert_allclose(c, c1)
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_onesided_odd(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.15958227, 0.24193957, 0.24145224, 0.24100919,
0.24377353])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, scaling='spectrum')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
0.02083333])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.15958227, 0.24193957, 0.24145224, 0.24100919,
0.24377353])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = csd(x, x, nperseg=8)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.38194444])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_unk_scaling(self):
assert_raises(ValueError, csd, np.zeros(4, np.complex128),
np.ones(4, np.complex128), scaling='foo', nperseg=4)
def test_detrend_linear(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = csd(x, x, nperseg=10, detrend='linear')
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self):
x = np.arange(10, dtype=np.float64) + 0.04
f1, p1 = csd(x, x, nperseg=10, detrend=False)
f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x)
assert_allclose(f1, f2, atol=1e-15)
assert_allclose(p1, p2, atol=1e-15)
def test_detrend_external(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = csd(x, x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_m1(self):
x = np.arange(40, dtype=np.float64) + 0.04
x = x.reshape((2,2,10))
f, p = csd(x, x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
x = np.rollaxis(x, 2, 0)
f, p = csd(x, x, nperseg=10, axis=0,
detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_nd_axis_m1(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
f, p = csd(x, x, nperseg=10)
assert_array_equal(p.shape, (2, 1, 6))
assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
f0, p0 = csd(x[0,0,:], x[0,0,:], nperseg=10)
assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
def test_nd_axis_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((10,2,1))
f, p = csd(x, x, nperseg=10, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
f0, p0 = csd(x[:,0,0], x[:,0,0], nperseg=10)
assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, 10, 'hanning', 8)
win = signal.get_window('hanning', 8)
fe, pe = csd(x, x, 10, win, 8)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
def test_empty_input(self):
f, p = csd([],np.zeros(10))
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
f, p = csd(np.zeros(10),[])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = csd(np.empty(shape), np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
f, p = csd(np.ones(10), np.empty((5,0)))
assert_array_equal(f.shape, (5,0))
assert_array_equal(p.shape, (5,0))
f, p = csd(np.empty((5,0)), np.ones(10))
assert_array_equal(f.shape, (5,0))
assert_array_equal(p.shape, (5,0))
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = csd(np.empty(shape), np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
f, p = csd(np.empty((10,10,3)), np.zeros((10,0,1)), axis=1)
assert_array_equal(f.shape, (10,0,3))
assert_array_equal(p.shape, (10,0,3))
f, p = csd(np.empty((10,0,1)), np.zeros((10,10,3)), axis=1)
assert_array_equal(f.shape, (10,0,3))
assert_array_equal(p.shape, (10,0,3))
def test_short_data(self):
x = np.zeros(8)
x[0] = 1
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
f, p = csd(x, x)
f1, p1 = csd(x, x, nperseg=8)
assert_allclose(f, f1)
assert_allclose(p, p1)
def test_window_long_or_nd(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,
np.array([1,1,1,1,1]))
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,
np.arange(6).reshape((2,3)))
def test_nondefault_noverlap(self):
x = np.zeros(64)
x[::8] = 1
f, p = csd(x, x, nperseg=16, noverlap=4)
q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,
1./6.])
assert_allclose(p, q, atol=1e-12)
def test_bad_noverlap(self):
assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, 'hanning',
2, 7)
def test_nfft_too_short(self):
assert_raises(ValueError, csd, np.ones(12), np.zeros(12), nfft=3,
nperseg=4)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.15958227, 0.24193957, 0.24145224, 0.24100919,
0.24377353], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
@dec.skipif(NumpyVersion(np.__version__) < '1.8.0')
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111,
0.07638889], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
@dec.skipif(NumpyVersion(np.__version__) < '1.8.0')
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = csd(x, x, nperseg=8)
assert_allclose(f, fftpack.fftfreq(8, 1.0))
q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,
0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype,
'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
def test_padded_freqs(self):
x = np.zeros(12)
y = np.ones(12)
nfft = 24
f = fftpack.fftfreq(nfft, 1.0)[:nfft//2+1]
f[-1] *= -1
fodd, _ = csd(x, y, nperseg=5, nfft=nfft)
feven, _ = csd(x, y, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
nfft = 25
f = fftpack.fftfreq(nfft, 1.0)[:(nfft + 1)//2]
fodd, _ = csd(x, y, nperseg=5, nfft=nfft)
feven, _ = csd(x, y, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
class TestCoherence:
def test_identical_input(self):
x = np.random.randn(20)
y = np.copy(x) # So `y is x` -> False
f = np.linspace(0, 0.5, 6)
C = np.ones(6)
f1, C1 = coherence(x, y, nperseg=10)
assert_allclose(f, f1)
assert_allclose(C, C1)
def test_phase_shifted_input(self):
x = np.random.randn(20)
y = -x
f = np.linspace(0, 0.5, 6)
C = np.ones(6)
f1, C1 = coherence(x, y, nperseg=10)
assert_allclose(f, f1)
assert_allclose(C, C1)
class TestSpectrogram:
def test_average_all_segments(self):
x = np.random.randn(1024)
fs = 1.0
window = ('tukey', 0.25)
nperseg = 16
noverlap = 2
f, _, P = spectrogram(x, fs, window, nperseg, noverlap)
fw, Pw = welch(x, fs, window, nperseg, noverlap)
assert_allclose(f, fw)
assert_allclose(np.mean(P, axis=-1), Pw)
class TestLombscargle:
def test_frequency(self):
"""Test if frequency location of peak corresponds to frequency of
generated input signal.
"""
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
P = lombscargle(t, x, f)
# Check if difference between found frequency maximum and input
# frequency is less than accuracy
delta = f[1] - f[0]
assert_(w - f[np.argmax(P)] < (delta/2.))
def test_amplitude(self):
"""Test if height of peak in normalized Lomb-Scargle periodogram
corresponds to amplitude of the generated input signal.
"""
# Input parameters
ampl = 2.
w = 1.
phi = 0.5 * np.pi
nin = 100
nout = 1000
p = 0.7 # Fraction of points to select
# Randomly select a fraction of an array with timesteps
np.random.seed(2353425)
r = np.random.rand(nin)
t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
# Plot a sine wave for the selected times
x = ampl * np.sin(w*t + phi)
# Define the array of frequencies for which to compute the periodogram
f = np.linspace(0.01, 10., nout)
# Calculate Lomb-Scargle periodogram
pgram = lombscargle(t, x, f)
# Normalize
pgram = np.sqrt(4 * pgram / t.shape[0])
# Check if difference between found frequency maximum and input
# frequency is less than accuracy
assert_approx_equal(np.max(pgram), ampl, significant=2)
def test_wrong_shape(self):
t = np.linspace(0, 1, 1)
x = np.linspace(0, 1, 2)
f = np.linspace(0, 1, 3)
assert_raises(ValueError, lombscargle, t, x, f)
def test_zero_division(self):
t = np.zeros(1)
x = np.zeros(1)
f = np.zeros(1)
assert_raises(ZeroDivisionError, lombscargle, t, x, f)
def test_lombscargle_atan_vs_atan2(self):
# https://github.com/scipy/scipy/issues/3787
# This raised a ZeroDivisionError.
t = np.linspace(0, 10, 1000, endpoint=False)
x = np.sin(4*t)
f = np.linspace(0, 50, 500, endpoint=False) + 0.1
q = lombscargle(t, x, f*2*np.pi)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
wagnerand/zamboni
|
mkt/feed/tests/test_views.py
|
3
|
16300
|
# -*- coding: utf-8 -*-
import json
from nose.tools import eq_, ok_
from django.core.urlresolvers import reverse
import mkt.carriers
import mkt.regions
from addons.models import Preview
from mkt.api.tests.test_oauth import RestOAuth
from mkt.collections.constants import COLLECTIONS_TYPE_BASIC
from mkt.collections.models import Collection
from mkt.feed.models import FeedApp, FeedItem
from mkt.site.fixtures import fixture
from mkt.webapps.models import Webapp
class CollectionMixin(object):
collection_data = {
'author': u'My Àuthør',
'collection_type': COLLECTIONS_TYPE_BASIC,
'is_public': True,
'name': {'en-US': u'My Favorite Gamés'},
'slug': u'my-favourite-gamés',
}
def setUp(self):
self.collection = Collection.objects.create(**self.collection_data)
super(CollectionMixin, self).setUp()
class FeedAppMixin(object):
fixtures = fixture('webapp_337141')
def setUp(self):
self.feedapp_data = {
'app': 337141,
'description': {
'en-US': u'pan-fried potatoes'
},
}
self.pullquote_data = {
'pullquote_text': {'en-US': u'The bést!'},
'pullquote_rating': 4,
'pullquote_attribution': {'en-US': u'Jamés Bond'}
}
self.feedapps = []
super(FeedAppMixin, self).setUp()
def create_feedapps(self, n=2, **kwargs):
data = dict(self.feedapp_data)
data.update(kwargs)
if not isinstance(data['app'], Webapp):
data['app'] = Webapp.objects.get(pk=data['app'])
feedapps = [FeedApp.objects.create(**data) for idx in xrange(n)]
self.feedapps.extend(feedapps)
return feedapps
class BaseTestFeedItemViewSet(RestOAuth):
def setUp(self):
super(BaseTestFeedItemViewSet, self).setUp()
self.profile = self.user.get_profile()
def feed_permission(self):
"""
Grant the Feed:Curate permission to the authenticating user.
"""
self.grant_permission(self.profile, 'Feed:Curate')
class TestFeedItemViewSetList(CollectionMixin, BaseTestFeedItemViewSet):
"""
Tests the handling of GET requests to the list endpoint of FeedItemViewSet.
"""
def setUp(self):
super(TestFeedItemViewSetList, self).setUp()
self.url = reverse('api-v2:feeditem-list')
self.item = FeedItem.objects.create(collection=self.collection)
def list(self, client, **kwargs):
res = client.get(self.url, kwargs)
data = json.loads(res.content)
return res, data
def test_list_anonymous(self):
res, data = self.list(self.anon)
eq_(res.status_code, 200)
eq_(data['meta']['total_count'], 1)
eq_(data['objects'][0]['id'], self.item.id)
def test_list_no_permission(self):
res, data = self.list(self.client)
eq_(res.status_code, 200)
eq_(data['meta']['total_count'], 1)
eq_(data['objects'][0]['id'], self.item.id)
def test_list_with_permission(self):
self.feed_permission()
res, data = self.list(self.client)
eq_(res.status_code, 200)
eq_(data['meta']['total_count'], 1)
eq_(data['objects'][0]['id'], self.item.id)
class TestFeedItemViewSetCreate(CollectionMixin, BaseTestFeedItemViewSet):
"""
Tests the handling of POST requests to the list endpoint of FeedItemViewSet.
"""
def setUp(self):
super(TestFeedItemViewSetCreate, self).setUp()
self.url = reverse('api-v2:feeditem-list')
def create(self, client, **kwargs):
res = client.post(self.url, json.dumps(kwargs))
data = json.loads(res.content)
return res, data
def test_create_anonymous(self):
res, data = self.create(self.anon, collection=self.collection.pk)
eq_(res.status_code, 403)
def test_create_no_permission(self):
res, data = self.create(self.client, collection=self.collection.pk)
eq_(res.status_code, 403)
def test_create_with_permission(self):
self.feed_permission()
res, data = self.create(self.client, collection=self.collection.pk,
carrier=mkt.carriers.TELEFONICA.id,
region=mkt.regions.BR.id)
eq_(res.status_code, 201)
eq_(data['collection']['id'], self.collection.pk)
def test_create_no_data(self):
self.feed_permission()
res, data = self.create(self.client)
eq_(res.status_code, 400)
class TestFeedItemViewSetDetail(CollectionMixin, BaseTestFeedItemViewSet):
"""
Tests the handling of GET requests to detail endpoints of FeedItemViewSet.
"""
def setUp(self):
super(TestFeedItemViewSetDetail, self).setUp()
self.item = FeedItem.objects.create(collection=self.collection)
self.url = reverse('api-v2:feeditem-detail',
kwargs={'pk': self.item.pk})
def detail(self, client, **kwargs):
res = client.get(self.url, kwargs)
data = json.loads(res.content)
return res, data
def test_list_anonymous(self):
res, data = self.detail(self.anon)
eq_(res.status_code, 200)
eq_(data['id'], self.item.pk)
def test_list_no_permission(self):
res, data = self.detail(self.client)
eq_(res.status_code, 200)
eq_(data['id'], self.item.pk)
def test_list_with_permission(self):
self.feed_permission()
res, data = self.detail(self.client)
eq_(res.status_code, 200)
eq_(data['id'], self.item.pk)
class TestFeedItemViewSetUpdate(CollectionMixin, BaseTestFeedItemViewSet):
"""
Tests the handling of PATCH requests to detail endpoints of FeedItemViewSet.
"""
def setUp(self):
super(TestFeedItemViewSetUpdate, self).setUp()
self.item = FeedItem.objects.create(collection=self.collection)
self.url = reverse('api-v2:feeditem-detail',
kwargs={'pk': self.item.pk})
def update(self, client, **kwargs):
res = client.patch(self.url, json.dumps(kwargs))
data = json.loads(res.content)
return res, data
def test_update_anonymous(self):
res, data = self.update(self.anon)
eq_(res.status_code, 403)
def test_update_no_permission(self):
res, data = self.update(self.client)
eq_(res.status_code, 403)
def test_update_with_permission(self):
self.feed_permission()
res, data = self.update(self.client, region=mkt.regions.US.id)
eq_(res.status_code, 200)
eq_(data['id'], self.item.pk)
eq_(data['region'], mkt.regions.US.slug)
def test_update_no_items(self):
self.feed_permission()
res, data = self.update(self.client, collection=None)
eq_(res.status_code, 400)
class TestFeedItemViewSetDelete(CollectionMixin, BaseTestFeedItemViewSet):
"""
Tests the handling of DELETE requests to detail endpoints of
FeedItemViewSet.
"""
def setUp(self):
super(TestFeedItemViewSetDelete, self).setUp()
self.item = FeedItem.objects.create(collection=self.collection)
self.url = reverse('api-v2:feeditem-detail',
kwargs={'pk': self.item.pk})
def delete(self, client, **kwargs):
res = client.delete(self.url)
data = json.loads(res.content) if res.content else ''
return res, data
def test_update_anonymous(self):
res, data = self.delete(self.anon)
eq_(res.status_code, 403)
def test_update_no_permission(self):
res, data = self.delete(self.client)
eq_(res.status_code, 403)
def test_update_with_permission(self):
self.feed_permission()
res, data = self.delete(self.client)
eq_(res.status_code, 204)
class BaseTestFeedAppViewSet(FeedAppMixin, RestOAuth):
fixtures = FeedAppMixin.fixtures + RestOAuth.fixtures
def setUp(self):
super(BaseTestFeedAppViewSet, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.profile = self.user.get_profile()
def feed_permission(self):
"""
Grant the Feed:Curate permission to the authenticating user.
"""
self.grant_permission(self.profile, 'Feed:Curate')
class TestFeedAppViewSetList(BaseTestFeedAppViewSet):
"""
Tests the handling of GET requests to the list endpoint of FeedAppViewSet.
"""
num = 2
def setUp(self):
super(TestFeedAppViewSetList, self).setUp()
self.url = reverse('api-v2:feedapp-list')
self.create_feedapps(self.num)
def list(self, client):
res = client.get(self.url)
data = json.loads(res.content)
return res, data
def _test_list(self, client):
res, data = self.list(client)
eq_(res.status_code, 200)
objects = data['objects']
eq_(data['meta']['total_count'], self.num)
eq_(len(objects), self.num)
self.assertSetEqual([obj['id'] for obj in objects],
[fa.id for fa in self.feedapps])
def test_list_anonymous(self):
self._test_list(self.anon)
def test_list_no_permission(self):
self._test_list(self.client)
def test_list_with_permission(self):
self.feed_permission()
self._test_list(self.client)
class TestFeedAppViewSetCreate(BaseTestFeedAppViewSet):
"""
Tests the handling of POST requests to the list endpoint of FeedAppViewSet.
"""
fixtures = BaseTestFeedAppViewSet.fixtures
def setUp(self):
super(TestFeedAppViewSetCreate, self).setUp()
self.url = reverse('api-v2:feedapp-list')
def create(self, client, **kwargs):
res = client.post(self.url, json.dumps(kwargs))
data = json.loads(res.content)
return res, data
def test_create_anonymous(self):
res, data = self.create(self.anon)
eq_(res.status_code, 403)
def test_create_no_permission(self):
res, data = self.create(self.client, **self.feedapp_data)
eq_(res.status_code, 403)
def test_create_with_permission(self):
self.feed_permission()
res, data = self.create(self.client, **self.feedapp_data)
eq_(res.status_code, 201)
eq_(data['app']['id'], self.feedapp_data['app'])
eq_(data['description'], self.feedapp_data['description'])
return res, data
def test_create_with_preview(self):
preview = Preview.objects.create(addon=self.app, position=0)
self.feedapp_data.update(preview=preview.pk)
res, data = self.test_create_with_permission()
eq_(data['preview']['id'], preview.id)
def test_create_with_pullquote(self):
self.feedapp_data.update(**self.pullquote_data)
res, data = self.test_create_with_permission()
for field, value in self.pullquote_data.iteritems():
eq_(data[field], value)
def test_create_with_pullquote_no_rating(self):
del self.pullquote_data['pullquote_rating']
self.test_create_with_pullquote()
def test_create_with_pullquote_no_text(self):
self.feed_permission()
del self.pullquote_data['pullquote_text']
self.feedapp_data.update(**self.pullquote_data)
res, data = self.create(self.client, **self.feedapp_data)
eq_(res.status_code, 400)
ok_('__all__' in data)
def test_create_with_pullquote_bad_rating_fractional(self):
self.feed_permission()
self.pullquote_data['pullquote_rating'] = 4.5
self.feedapp_data.update(**self.pullquote_data)
res, data = self.create(self.client, **self.feedapp_data)
eq_(res.status_code, 400)
ok_('pullquote_rating' in data)
def test_create_with_pullquote_bad_rating_high(self):
self.feed_permission()
self.pullquote_data['pullquote_rating'] = 6
self.feedapp_data.update(**self.pullquote_data)
res, data = self.create(self.client, **self.feedapp_data)
eq_(res.status_code, 400)
ok_('pullquote_rating' in data)
def test_create_with_pullquote_bad_rating_low(self):
self.feed_permission()
self.pullquote_data['pullquote_rating'] = -1
self.feedapp_data.update(**self.pullquote_data)
res, data = self.create(self.client, **self.feedapp_data)
eq_(res.status_code, 400)
ok_('pullquote_rating' in data)
def test_create_no_data(self):
self.feed_permission()
res, data = self.create(self.client)
eq_(res.status_code, 400)
class TestFeedAppViewSetDetail(BaseTestFeedAppViewSet):
"""
Tests the handling of GET requests to detail endpoints of FeedAppViewSet.
"""
def setUp(self):
super(TestFeedAppViewSetDetail, self).setUp()
self.feedapp = self.create_feedapps(1)[0]
self.url = reverse('api-v2:feedapp-detail',
kwargs={'pk': self.feedapp.pk})
def detail(self, client, **kwargs):
res = client.get(self.url)
data = json.loads(res.content)
return res, data
def _test_detail(self, client):
res, data = self.detail(client)
eq_(res.status_code, 200)
eq_(data['id'], self.feedapp.pk)
eq_(data['url'], self.url)
eq_(data['app']['id'], self.feedapp.app.id)
ok_(not data['preview'])
ok_(not data['pullquote_text'])
def test_detail_anonymous(self):
self._test_detail(self.anon)
def test_detail_no_permission(self):
self._test_detail(self.client)
def test_detail_with_permission(self):
self.feed_permission()
self._test_detail(self.client)
class TestFeedAppViewSetUpdate(BaseTestFeedAppViewSet):
"""
Tests the handling of PATCH requests to detail endpoints of FeedAppViewSet.
"""
fixtures = BaseTestFeedAppViewSet.fixtures
def setUp(self):
super(TestFeedAppViewSetUpdate, self).setUp()
self.feedapp = self.create_feedapps(1)[0]
self.url = reverse('api-v2:feedapp-detail',
kwargs={'pk': self.feedapp.pk})
def update(self, client, **kwargs):
res = client.patch(self.url, json.dumps(kwargs))
data = json.loads(res.content)
return res, data
def test_update_anonymous(self):
res, data = self.update(self.anon)
eq_(res.status_code, 403)
def test_update_no_permission(self):
res, data = self.update(self.client, **self.feedapp_data)
eq_(res.status_code, 403)
def test_update_with_permission(self):
self.feed_permission()
new_description = {
'en-US': u"BastaCorp's famous pan-fried potatoes",
'fr': u'pommes de terre sautées de BastaCorp'
}
res, data = self.update(self.client, description=new_description)
eq_(res.status_code, 200)
eq_(data['description'], new_description)
def test_update_invalid_app(self):
self.feed_permission()
res, data = self.update(self.client, app=1)
eq_(res.status_code, 400)
ok_('app' in data)
def test_update_no_app(self):
self.feed_permission()
res, data = self.update(self.client, app=None)
eq_(res.status_code, 400)
ok_('app' in data)
class TestFeedAppViewSetDelete(BaseTestFeedAppViewSet):
"""
Tests the handling of DELETE requests to detail endpoints of FeedAppViewSet.
"""
def setUp(self):
super(TestFeedAppViewSetDelete, self).setUp()
self.feedapp = self.create_feedapps(1)[0]
self.url = reverse('api-v2:feedapp-detail',
kwargs={'pk': self.feedapp.pk})
def delete(self, client, **kwargs):
res = client.delete(self.url)
data = json.loads(res.content) if res.content else ''
return res, data
def test_delete_anonymous(self):
res, data = self.delete(self.anon)
eq_(res.status_code, 403)
def test_delete_no_permission(self):
res, data = self.delete(self.client)
eq_(res.status_code, 403)
def test_delete_with_permission(self):
self.feed_permission()
res, data = self.delete(self.client)
eq_(res.status_code, 204)
|
bsd-3-clause
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/simulator/iam2/test_iam2_project_affinity_group_cascade_delete.py
|
2
|
2762
|
'''
@author: fangxiao
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.iam2_operations as iam2_ops
import zstackwoodpecker.operations.affinitygroup_operations as ag_ops
import zstackwoodpecker.operations.resource_operations as res_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
affinity_group_inv = None
project_uuid = None
project_operator_uuid = None
def test():
global affinity_group_inv,project_uuid,project_operator_uuid
# 1 create project
project_name = 'test_project6'
project_uuid = iam2_ops.create_iam2_project(project_name).uuid
# 2 create project operator
project_operator_name = 'username6'
project_operator_password = 'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86'
attributes = [{"name": "__ProjectOperator__", "value": project_uuid}]
project_operator_uuid = iam2_ops.create_iam2_virtual_id(project_operator_name,project_operator_password,attributes=attributes).uuid
# 3 login in project by project operator
iam2_ops.add_iam2_virtual_ids_to_project([project_operator_uuid],project_uuid)
project_operator_session_uuid = iam2_ops.login_iam2_virtual_id(project_operator_name,project_operator_password)
project_login_uuid = iam2_ops.login_iam2_project(project_name,session_uuid=project_operator_session_uuid).uuid
# 4 create affinity group and add vm into affinity group
ag1 = ag_ops.create_affinity_group(name="ag1",policy="antiHard",session_uuid=project_login_uuid)
vm1 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid)
test_obj_dict.add_vm(vm1)
# 5 delete and expunge the project and check the affinity group
iam2_ops.delete_iam2_project(project_uuid)
iam2_ops.expunge_iam2_project(project_uuid)
cond = res_ops.gen_query_conditions("appliance",'=',"CUSTOMER")
affinity_group_inv = res_ops.query_resource(res_ops.AFFINITY_GROUP,cond)
if affinity_group_inv:
test_util.test_fail(
"affinity_group [%s] is still exist after expunge the project[%s]" % (affinity_group_inv[0].uuid,project_login_uuid))
# 6 delete
iam2_ops.delete_iam2_virtual_id(project_operator_uuid)
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
if affinity_group_inv:
ag_ops.delete_affinity_group(affinity_group_inv[0].uuid)
if project_uuid:
iam2_ops.delete_iam2_project(project_uuid)
iam2_ops.expunge_iam2_project(project_uuid)
if project_operator_uuid:
iam2_ops.delete_iam2_virtual_id(project_operator_uuid)
|
apache-2.0
|
toomoresuch/pysonengine
|
eggs/ipython-0.10.1-py2.6.egg/IPython/external/path.py
|
7
|
32139
|
""" path.py - An object representing a path to a file or directory.
Example:
from IPython.external.path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.2 or later.
URL: http://www.jorendorff.com/articles/python/path
Author: Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
Date: 9 Mar 2007
"""
# TODO
# - Tree-walking functions don't avoid symlink loops. Matt Harrison
# sent me a patch for this.
# - Bug in write_text(). It doesn't support Universal newline mode.
# - Better error message in listdir() when self isn't a
# directory. (On Windows, the error message really sucks.)
# - Make sure everything has a good docstring.
# - Add methods for regex find and replace.
# - guess_content_type() method?
# - Perhaps support arguments to touch().
from __future__ import generators
import sys, warnings, os, fnmatch, glob, shutil, codecs
# deprecated in python 2.6
warnings.filterwarnings('ignore', r'.*md5.*')
import md5
__version__ = '2.2'
__all__ = ['path']
# Platform-specific support for path.owner
if os.name == 'nt':
try:
import win32security
except ImportError:
win32security = None
else:
try:
import pwd
except ImportError:
pwd = None
# Pre-2.3 support. Are unicode filenames supported?
_base = str
_getcwd = os.getcwd
try:
if os.path.supports_unicode_filenames:
_base = unicode
_getcwd = os.getcwdu
except AttributeError:
pass
# Pre-2.3 workaround for booleans
try:
True, False
except NameError:
True, False = 1, 0
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
class TreeWalkWarning(Warning):
pass
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
getcwd = classmethod(getcwd)
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = md5.new()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isdir = os.path.isdir
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
getatime = os.path.getatime
atime = property(
getatime, None, None,
""" Last access time of the file. """)
getmtime = os.path.getmtime
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
getctime = os.path.getctime
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
getsize = os.path.getsize
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def get_owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory. """)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def rmdir(self):
os.rmdir(self)
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
|
mit
|
kimjaejoong/nova
|
nova/tests/functional/v3/test_floating_ips_bulk.py
|
33
|
3980
|
# Copyright 2014 IBM Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova import context
from nova.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('public_interface', 'nova.network.linux_net')
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class FloatingIpsBulkTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-floating-ips-bulk"
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(FloatingIpsBulkTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.floating_ips_bulk.Floating_ips_bulk')
return f
def setUp(self):
super(FloatingIpsBulkTest, self).setUp()
pool = CONF.default_floating_pool
interface = CONF.public_interface
self.ip_pool = [
{
'address': "10.10.10.1",
'pool': pool,
'interface': interface,
'host': None
},
{
'address': "10.10.10.2",
'pool': pool,
'interface': interface,
'host': None
},
{
'address': "10.10.10.3",
'pool': pool,
'interface': interface,
'host': "testHost"
},
]
self.compute.db.floating_ip_bulk_create(
context.get_admin_context(), self.ip_pool)
self.addCleanup(self.compute.db.floating_ip_bulk_destroy,
context.get_admin_context(), self.ip_pool)
def test_floating_ips_bulk_list(self):
response = self._do_get('os-floating-ips-bulk')
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-list-resp',
subs, response, 200)
def test_floating_ips_bulk_list_by_host(self):
response = self._do_get('os-floating-ips-bulk/testHost')
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-list-by-host-resp',
subs, response, 200)
def test_floating_ips_bulk_create(self):
response = self._do_post('os-floating-ips-bulk',
'floating-ips-bulk-create-req',
{"ip_range": "192.168.1.0/24",
"pool": CONF.default_floating_pool,
"interface": CONF.public_interface})
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-create-resp', subs,
response, 200)
def test_floating_ips_bulk_delete(self):
response = self._do_put('os-floating-ips-bulk/delete',
'floating-ips-bulk-delete-req',
{"ip_range": "192.168.1.0/24"})
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-delete-resp', subs,
response, 200)
|
apache-2.0
|
suiyuan2009/tensorflow
|
tensorflow/python/platform/logging_test.py
|
211
|
1133
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
class EventLoaderTest(googletest.TestCase):
def test_log(self):
# Just check that logging works without raising an exception.
logging.error("test log message")
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
dmordom/nipype
|
nipype/interfaces/tests/test_base.py
|
3
|
20220
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import tempfile
import shutil
from nipype.testing import (assert_equal, assert_not_equal, assert_raises,
assert_true, assert_false, with_setup, package_check,
skipif)
import nipype.interfaces.base as nib
from nipype.utils.filemanip import split_filename
from nipype.interfaces.base import Undefined, config
from traits.testing.nose_tools import skip
import traits.api as traits
#test Bunch
def test_bunch():
b = nib.Bunch()
yield assert_equal, b.__dict__,{}
b = nib.Bunch(a=1,b=[2,3])
yield assert_equal, b.__dict__,{'a': 1, 'b': [2,3]}
def test_bunch_attribute():
b = nib.Bunch(a=1,b=[2,3],c=None)
yield assert_equal, b.a ,1
yield assert_equal, b.b, [2,3]
yield assert_equal, b.c, None
def test_bunch_repr():
b = nib.Bunch(b=2,c=3,a=dict(n=1,m=2))
yield assert_equal, repr(b), "Bunch(a={'m': 2, 'n': 1}, b=2, c=3)"
def test_bunch_methods():
b = nib.Bunch(a=2)
b.update(a=3)
newb = b.dictcopy()
yield assert_equal, b.a, 3
yield assert_equal, b.get('a'), 3
yield assert_equal, b.get('badkey', 'otherthing'), 'otherthing'
yield assert_not_equal, b, newb
yield assert_equal, type(dict()), type(newb)
yield assert_equal, newb['a'], 3
def test_bunch_hash():
# NOTE: Since the path to the json file is included in the Bunch,
# the hash will be unique to each machine.
pth = os.path.split(os.path.abspath(__file__))[0]
json_pth = os.path.join(pth, 'realign_json.json')
b = nib.Bunch(infile = json_pth,
otherthing = 'blue',
yat = True)
newbdict, bhash = b._get_bunch_hash()
yield assert_equal, bhash, 'ddcc7b4ec5675df8cf317a48bd1857fa'
# Make sure the hash stored in the json file for `infile` is correct.
jshash = nib.md5()
fp = file(json_pth)
jshash.update(fp.read())
fp.close()
yield assert_equal, newbdict['infile'][0][1], jshash.hexdigest()
yield assert_equal, newbdict['yat'], True
# create a temp file
#global tmp_infile, tmp_dir
#tmp_infile = None
#tmp_dir = None
def setup_file():
#global tmp_infile, tmp_dir
tmp_dir = tempfile.mkdtemp()
tmp_infile = os.path.join(tmp_dir, 'foo.txt')
open(tmp_infile, 'w').writelines('123456789')
return tmp_infile
def teardown_file(tmp_dir):
shutil.rmtree(tmp_dir)
def test_TraitedSpec():
yield assert_true, nib.TraitedSpec().get_hashval()
yield assert_equal, nib.TraitedSpec().__repr__(), '\n\n'
class spec(nib.TraitedSpec):
foo = nib.traits.Int
goo = nib.traits.Float(usedefault=True)
yield assert_equal, spec().foo, Undefined
yield assert_equal, spec().goo, 0.0
specfunc = lambda x : spec(hoo=x)
yield assert_raises, nib.traits.TraitError, specfunc, 1
infields = spec(foo=1)
hashval = ({'foo': 1, 'goo': '0.0000000000'}, 'cb03be1c3182ff941eecea6440c910f0')
yield assert_equal, infields.get_hashval(), hashval
#yield assert_equal, infields.hashval[1], hashval[1]
yield assert_equal, infields.__repr__(), '\nfoo = 1\ngoo = 0.0\n'
@skip
def test_TraitedSpec_dynamic():
from cPickle import dumps, loads
a = nib.BaseTraitedSpec()
a.add_trait('foo', nib.traits.Int)
a.foo = 1
assign_a = lambda : setattr(a, 'foo', 'a')
yield assert_raises, Exception, assign_a
pkld_a = dumps(a)
unpkld_a = loads(pkld_a)
assign_a_again = lambda : setattr(unpkld_a, 'foo', 'a')
yield assert_raises, Exception, assign_a_again
def test_TraitedSpec_logic():
class spec3(nib.TraitedSpec):
_xor_inputs = ('foo', 'bar')
foo = nib.traits.Int(xor = _xor_inputs,
desc = 'foo or bar, not both')
bar = nib.traits.Int(xor = _xor_inputs,
desc = 'bar or foo, not both')
kung = nib.traits.Float(requires = ('foo',),
position = 0,
desc = 'kung foo')
class out3(nib.TraitedSpec):
output = nib.traits.Int
class MyInterface(nib.BaseInterface):
input_spec = spec3
output_spec = out3
myif = MyInterface()
yield assert_raises, TypeError, setattr(myif.inputs, 'kung', 10.0)
myif.inputs.foo = 1
yield assert_equal, myif.inputs.foo, 1
set_bar = lambda : setattr(myif.inputs, 'bar', 1)
yield assert_raises, IOError, set_bar
yield assert_equal, myif.inputs.foo, 1
myif.inputs.kung = 2
yield assert_equal, myif.inputs.kung, 2.0
def test_deprecation():
class DeprecationSpec1(nib.TraitedSpec):
foo = nib.traits.Int(deprecated='0.1')
spec_instance = DeprecationSpec1()
set_foo = lambda : setattr(spec_instance, 'foo', 1)
yield assert_raises, nib.TraitError, set_foo
class DeprecationSpec1numeric(nib.TraitedSpec):
foo = nib.traits.Int(deprecated='0.1')
spec_instance = DeprecationSpec1numeric()
set_foo = lambda : setattr(spec_instance, 'foo', 1)
yield assert_raises, nib.TraitError, set_foo
class DeprecationSpec2(nib.TraitedSpec):
foo = nib.traits.Int(deprecated='100', new_name='bar')
spec_instance = DeprecationSpec2()
set_foo = lambda : setattr(spec_instance, 'foo', 1)
yield assert_raises, nib.TraitError, set_foo
class DeprecationSpec3(nib.TraitedSpec):
foo = nib.traits.Int(deprecated='1000', new_name='bar')
bar = nib.traits.Int()
spec_instance = DeprecationSpec3()
not_raised = True
try:
spec_instance.foo = 1
except nib.TraitError:
not_raised = False
yield assert_true, not_raised
class DeprecationSpec3(nib.TraitedSpec):
foo = nib.traits.Int(deprecated='1000', new_name='bar')
bar = nib.traits.Int()
spec_instance = DeprecationSpec3()
not_raised = True
try:
spec_instance.foo = 1
except nib.TraitError:
not_raised = False
yield assert_true, not_raised
yield assert_equal, spec_instance.foo, Undefined
yield assert_equal, spec_instance.bar, 1
def test_namesource():
tmp_infile = setup_file()
tmpd, nme, ext = split_filename(tmp_infile)
pwd = os.getcwd()
os.chdir(tmpd)
class spec2(nib.CommandLineInputSpec):
moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s",
position=2)
doo = nib.File(exists=True, argstr="%s", position=1)
goo = traits.Int(argstr="%d", position=4)
poo = nib.File(name_source=['goo'], hash_files=False, argstr="%s",position=3)
class TestName(nib.CommandLine):
_cmd = "mycommand"
input_spec = spec2
testobj = TestName()
testobj.inputs.doo = tmp_infile
testobj.inputs.goo = 99
yield assert_true, '%s_generated' % nme in testobj.cmdline
testobj.inputs.moo = "my_%s_template"
yield assert_true, 'my_%s_template' % nme in testobj.cmdline
os.chdir(pwd)
teardown_file(tmpd)
def checknose():
"""check version of nose for known incompatability"""
mod = __import__('nose')
if mod.__versioninfo__[1] <= 11:
return 0
else:
return 1
@skipif(checknose)
def test_TraitedSpec_withFile():
tmp_infile = setup_file()
tmpd, nme = os.path.split(tmp_infile)
yield assert_true, os.path.exists(tmp_infile)
class spec2(nib.TraitedSpec):
moo = nib.File(exists=True)
doo = nib.traits.List(nib.File(exists=True))
infields = spec2(moo=tmp_infile, doo=[tmp_infile])
hashval = infields.get_hashval(hash_method='content')
yield assert_equal, hashval[1], '8c227fb727c32e00cd816c31d8fea9b9'
teardown_file(tmpd)
@skipif(checknose)
def test_TraitedSpec_withNoFileHashing():
tmp_infile = setup_file()
tmpd, nme = os.path.split(tmp_infile)
pwd = os.getcwd()
os.chdir(tmpd)
yield assert_true, os.path.exists(tmp_infile)
class spec2(nib.TraitedSpec):
moo = nib.File(exists=True, hash_files=False)
doo = nib.traits.List(nib.File(exists=True))
infields = spec2(moo=nme, doo=[tmp_infile])
hashval = infields.get_hashval(hash_method='content')
yield assert_equal, hashval[1], '642c326a05add933e9cdc333ce2d0ac2'
class spec3(nib.TraitedSpec):
moo = nib.File(exists=True, name_source="doo")
doo = nib.traits.List(nib.File(exists=True))
infields = spec3(moo=nme, doo=[tmp_infile])
hashval1 = infields.get_hashval(hash_method='content')
class spec4(nib.TraitedSpec):
moo = nib.File(exists=True)
doo = nib.traits.List(nib.File(exists=True))
infields = spec4(moo=nme, doo=[tmp_infile])
hashval2 = infields.get_hashval(hash_method='content')
yield assert_not_equal, hashval1[1], hashval2[1]
os.chdir(pwd)
teardown_file(tmpd)
def test_Interface():
yield assert_equal, nib.Interface.input_spec, None
yield assert_equal, nib.Interface.output_spec, None
yield assert_raises, NotImplementedError, nib.Interface
yield assert_raises, NotImplementedError, nib.Interface.help
yield assert_raises, NotImplementedError, nib.Interface._inputs_help
yield assert_raises, NotImplementedError, nib.Interface._outputs_help
yield assert_raises, NotImplementedError, nib.Interface._outputs
class DerivedInterface(nib.Interface):
def __init__(self):
pass
nif = DerivedInterface()
yield assert_raises, NotImplementedError, nif.run
yield assert_raises, NotImplementedError, nif.aggregate_outputs
yield assert_raises, NotImplementedError, nif._list_outputs
yield assert_raises, NotImplementedError, nif._get_filecopy_info
def test_BaseInterface():
yield assert_equal, nib.BaseInterface.help(), None
yield assert_equal, nib.BaseInterface._get_filecopy_info(), []
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int')
goo = nib.traits.Int(desc='a random int', mandatory=True)
moo = nib.traits.Int(desc='a random int', mandatory=False)
hoo = nib.traits.Int(desc='a random int', usedefault=True)
zoo = nib.File(desc='a file', copyfile=False)
woo = nib.File(desc='a file', copyfile=True)
class OutputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int')
class DerivedInterface(nib.BaseInterface):
input_spec = InputSpec
yield assert_equal, DerivedInterface.help(), None
yield assert_true, 'moo' in ''.join(DerivedInterface._inputs_help())
yield assert_equal, DerivedInterface()._outputs(), None
yield assert_equal, DerivedInterface._get_filecopy_info()[0]['key'], 'woo'
yield assert_true, DerivedInterface._get_filecopy_info()[0]['copy']
yield assert_equal, DerivedInterface._get_filecopy_info()[1]['key'], 'zoo'
yield assert_false, DerivedInterface._get_filecopy_info()[1]['copy']
yield assert_equal, DerivedInterface().inputs.foo, Undefined
yield assert_raises, ValueError, DerivedInterface()._check_mandatory_inputs
yield assert_equal, DerivedInterface(goo=1)._check_mandatory_inputs(), None
yield assert_raises, ValueError, DerivedInterface().run
yield assert_raises, NotImplementedError, DerivedInterface(goo=1).run
class DerivedInterface2(DerivedInterface):
output_spec = OutputSpec
def _run_interface(self, runtime):
return runtime
yield assert_equal, DerivedInterface2.help(), None
yield assert_equal, DerivedInterface2()._outputs().foo, Undefined
yield assert_raises, NotImplementedError, DerivedInterface2(goo=1).run
nib.BaseInterface.input_spec = None
yield assert_raises, Exception, nib.BaseInterface
def test_input_version():
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.9')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
obj = DerivedInterface1()
not_raised = True
try:
obj._check_version_requirements(obj.inputs)
except:
not_raised = False
yield assert_true, not_raised
config.set('execution', 'stop_on_unknown_version', True)
try:
obj._check_version_requirements(obj.inputs)
except:
not_raised = False
yield assert_false, not_raised
config.set_default_config()
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.9')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
_version = '0.8'
obj = DerivedInterface1()
obj.inputs.foo = 1
yield assert_raises, Exception, obj._check_version_requirements
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.9')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
_version = '0.10'
obj = DerivedInterface1()
not_raised = True
try:
obj._check_version_requirements(obj.inputs)
except:
not_raised = False
yield assert_true, not_raised
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.9')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
_version = '0.9'
obj = DerivedInterface1()
obj.inputs.foo = 1
not_raised = True
try:
obj._check_version_requirements(obj.inputs)
except:
not_raised = False
yield assert_true, not_raised
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', max_ver='0.7')
class DerivedInterface2(nib.BaseInterface):
input_spec = InputSpec
_version = '0.8'
obj = DerivedInterface2()
obj.inputs.foo = 1
yield assert_raises, Exception, obj._check_version_requirements
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', max_ver='0.9')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
_version = '0.9'
obj = DerivedInterface1()
obj.inputs.foo = 1
not_raised = True
try:
obj._check_version_requirements(obj.inputs)
except:
not_raised = False
yield assert_true, not_raised
def test_output_version():
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.9')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
_version = '0.10'
obj = DerivedInterface1()
yield assert_equal, obj._check_version_requirements(obj._outputs()), []
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.11')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
_version = '0.10'
obj = DerivedInterface1()
yield assert_equal, obj._check_version_requirements(obj._outputs()), ['foo']
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.11')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
_version = '0.10'
def _run_interface(self, runtime):
return runtime
def _list_outputs(self):
return {'foo': 1}
obj = DerivedInterface1()
yield assert_raises, KeyError, obj.run
def test_Commandline():
yield assert_raises, Exception, nib.CommandLine
ci = nib.CommandLine(command='which')
yield assert_equal, ci.cmd, 'which'
yield assert_equal, ci.inputs.args, Undefined
ci2 = nib.CommandLine(command='which', args='ls')
yield assert_equal, ci2.cmdline, 'which ls'
ci3 = nib.CommandLine(command='echo')
ci3.inputs.environ = {'MYENV' : 'foo'}
res = ci3.run()
yield assert_equal, res.runtime.environ['MYENV'], 'foo'
yield assert_equal, res.outputs, None
class CommandLineInputSpec1(nib.CommandLineInputSpec):
foo = nib.traits.Str(argstr='%s', desc='a str')
goo = nib.traits.Bool(argstr='-g', desc='a bool', position=0)
hoo = nib.traits.List(argstr='-l %s', desc='a list')
moo = nib.traits.List(argstr='-i %d...', desc='a repeated list',
position=-1)
noo = nib.traits.Int(argstr='-x %d', desc='an int')
roo = nib.traits.Str(desc='not on command line')
soo = nib.traits.Bool(argstr="-soo")
nib.CommandLine.input_spec = CommandLineInputSpec1
ci4 = nib.CommandLine(command='cmd')
ci4.inputs.foo = 'foo'
ci4.inputs.goo = True
ci4.inputs.hoo = ['a', 'b']
ci4.inputs.moo = [1, 2, 3]
ci4.inputs.noo = 0
ci4.inputs.roo = 'hello'
ci4.inputs.soo = False
cmd = ci4._parse_inputs()
yield assert_equal, cmd[0], '-g'
yield assert_equal, cmd[-1], '-i 1 -i 2 -i 3'
yield assert_true, 'hello' not in ' '.join(cmd)
yield assert_true, '-soo' not in ' '.join(cmd)
ci4.inputs.soo = True
cmd = ci4._parse_inputs()
yield assert_true, '-soo' in ' '.join(cmd)
class CommandLineInputSpec2(nib.CommandLineInputSpec):
foo = nib.File(argstr='%s', desc='a str', genfile=True)
nib.CommandLine.input_spec = CommandLineInputSpec2
ci5 = nib.CommandLine(command='cmd')
yield assert_raises, NotImplementedError, ci5._parse_inputs
class DerivedClass(nib.CommandLine):
input_spec = CommandLineInputSpec2
def _gen_filename(self, name):
return 'filename'
ci6 = DerivedClass(command='cmd')
yield assert_equal, ci6._parse_inputs()[0], 'filename'
nib.CommandLine.input_spec = nib.CommandLineInputSpec
def test_Commandline_environ():
from nipype import config
config.set_default_config()
ci3 = nib.CommandLine(command='echo')
res = ci3.run()
yield assert_equal, res.runtime.environ['DISPLAY'], ':1'
config.set('execution', 'display_variable', ':3')
res = ci3.run()
yield assert_false, 'DISPLAY' in ci3.inputs.environ
yield assert_equal, res.runtime.environ['DISPLAY'], ':3'
ci3.inputs.environ = {'DISPLAY' : ':2'}
res = ci3.run()
yield assert_equal, res.runtime.environ['DISPLAY'], ':2'
def test_CommandLine_output():
tmp_infile = setup_file()
tmpd, name = os.path.split(tmp_infile)
pwd = os.getcwd()
os.chdir(tmpd)
yield assert_true, os.path.exists(tmp_infile)
ci = nib.CommandLine(command='ls -l')
ci.inputs.terminal_output = 'allatonce'
res = ci.run()
yield assert_equal, res.runtime.merged, ''
yield assert_true, name in res.runtime.stdout
ci = nib.CommandLine(command='ls -l')
ci.inputs.terminal_output = 'file'
res = ci.run()
yield assert_true, 'stdout.nipype' in res.runtime.stdout
ci = nib.CommandLine(command='ls -l')
ci.inputs.terminal_output = 'none'
res = ci.run()
yield assert_equal, res.runtime.stdout, ''
ci = nib.CommandLine(command='ls -l')
res = ci.run()
yield assert_true, 'stdout.nipype' in res.runtime.stdout
os.chdir(pwd)
teardown_file(tmpd)
def test_global_CommandLine_output():
tmp_infile = setup_file()
tmpd, name = os.path.split(tmp_infile)
pwd = os.getcwd()
os.chdir(tmpd)
ci = nib.CommandLine(command='ls -l')
res = ci.run()
yield assert_true, name in res.runtime.stdout
yield assert_true, os.path.exists(tmp_infile)
nib.CommandLine.set_default_terminal_output('allatonce')
ci = nib.CommandLine(command='ls -l')
res = ci.run()
yield assert_equal, res.runtime.merged, ''
yield assert_true, name in res.runtime.stdout
nib.CommandLine.set_default_terminal_output('file')
ci = nib.CommandLine(command='ls -l')
res = ci.run()
yield assert_true, 'stdout.nipype' in res.runtime.stdout
nib.CommandLine.set_default_terminal_output('none')
ci = nib.CommandLine(command='ls -l')
res = ci.run()
yield assert_equal, res.runtime.stdout, ''
os.chdir(pwd)
teardown_file(tmpd)
|
bsd-3-clause
|
yantrabuddhi/atomspace
|
tests/cython/guile/test_pattern.py
|
3
|
2608
|
from unittest import TestCase
from opencog.atomspace import AtomSpace, TruthValue, Atom
from opencog.atomspace import types, is_a, get_type, get_type_name
from opencog.scheme_wrapper import load_scm, scheme_eval, scheme_eval_h
# We are poking atoms into this from the scm files, so we want
# them to still be there, later.
shared_space = AtomSpace()
class SchemeTest(TestCase):
def setUp(self):
global shared_space
self.space = shared_space
def tearDown(self):
pass
# Load several different scheme files, containing atom type
# declarations, and utilities. They should load just fine.
# These don't actually put any atoms into the atomspace.
def test_a_load_core_types(self):
scheme_eval(self.space, "(use-modules (opencog))")
# Load a file that results in atoms placed in the atomspace.
# Make sure the loaded atom is what we think it is.
def test_b_load_file(self):
status = load_scm(self.space, "tests/cython/guile/basic_unify.scm")
self.assertTrue(status)
a1 = self.space.add_node(types.ConceptNode, "hello")
self.assertTrue(a1)
# Make sure the truth value is what's in the SCM file.
expected = TruthValue(0.5, 0.5)
self.assertEquals(a1.tv, expected)
# print a1.tv, expected
# Run some basic evaluation tests
def test_c_eval(self):
basic = scheme_eval_h(self.space,
"(ConceptNode \"whatever\" (stv 0.5 0.5))")
a1 = self.space.add_node(types.ConceptNode, "whatever")
self.assertTrue(a1)
# Make sure the truth value is what's in the SCM file.
expected = TruthValue(0.5, 0.5)
self.assertEquals(a1.tv, expected)
# Actually, the atoms overall should compare.
self.assertEquals(a1, basic)
# Do it again, from a define in the scm file.
again = scheme_eval_h(self.space, "wobbly")
a2 = self.space.add_node(types.ConceptNode, "wobbly")
self.assertTrue(a2)
self.assertEquals(a2, again)
# Run the pattern-matcher/unifier/query-engine.
def test_unifier(self):
scheme_eval(self.space, "(use-modules (opencog query))")
question = scheme_eval_h(self.space, "find-animals")
self.assertTrue(question)
print "\nThe question is:"
print question
answer = scheme_eval_h(self.space, "(cog-bind find-animals)")
self.assertTrue(answer)
print "\nThe answer is:"
print answer
self.assertEqual(answer.type, types.SetLink)
self.assertEqual(answer.arity, 3)
|
agpl-3.0
|
ShinyROM/android_external_chromium_org
|
build/android/PRESUBMIT.py
|
59
|
1751
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for android buildbot.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
_DELETIONS_ONLY_FILES = (
'build/android/findbugs_filter/findbugs_known_bugs.txt',
)
def _CheckDeletionsOnlyFiles(input_api, output_api):
"""Check that a certain listed files only have deletions.
"""
warnings = []
for f in input_api.AffectedFiles():
if f.LocalPath() in _DELETIONS_ONLY_FILES:
if f.ChangedContents():
warnings.append(f.LocalPath())
results = []
if warnings:
results.append(output_api.PresubmitPromptWarning(
'Following files should only contain deletions.', warnings))
return results
def CommonChecks(input_api, output_api):
output = []
def J(*dirs):
"""Returns a path relative to presubmit directory."""
return input_api.os_path.join(input_api.PresubmitLocalPath(), *dirs)
output.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
white_list=[r'PRESUBMIT\.py$', r'buildbot/.*\.py$'],
extra_paths_list=[
J(), J('..', '..', 'third_party', 'android_testrunner'),
J('buildbot')]))
output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, J('buildbot', 'tests')))
output.extend(_CheckDeletionsOnlyFiles(input_api, output_api))
return output
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
|
bsd-3-clause
|
rlefevre1/hpp-rbprm-corba
|
script/tools/quaternion.py
|
12
|
15064
|
# Copyright (c) 2013 CNRS
# Author: Jorrit T'Hooft
#
# This file is part of hpp-corbaserver.
# hpp-corbaserver is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-corbaserver is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-corbaserver. If not, see
# <http://www.gnu.org/licenses/>.
import numpy as np
from numpy import linalg
class Quaternion (object):
"""
Quaternion class :
------------------
A quaternion has a scalar part and a vector part.
In this class the quaternion is represented as an array of 4 elements :
- the first element is the scalar part
- the next 3 elements represents the vector part
One can acces to the array directly with the attribute "array"
e.g. q1=Quaternion(1,0,0,0) --> q1.array
A quaternion can be instanciated with 1, 2 or 4 elements
(see : __init__() for more information).
It can also return a rotation vector, a rotation matrix, or a SO3
(see the methods : to...() for more information).
"""
def __init__(self,*args):
"""
Instanciation of the quaternion with 1, 2 or 4 arguments :
-----------------------------------------------------------
This creates a 4-sized array (self.array) representing the quaternion
with the first element representing the scalar part
and the 3 others the vector part.
With 4 arguments :
------------------
- the first one is used as the scalar part,
the other three as the vector part.
With 2 arguments :
------------------
- the 1-sized argument is used as the scalar part,
the 3-sized argument is used as the vector part.
With 1 argument :
-----------------
- if it is a quaternion it will create a copy of this quaternion.
- if it is a scalar, the scalar will be used as the scalar part
and the vector part will be set at (0,0,0).
- if it is an array, matrix, tuple or list of 4 elements,
the first element is used as the scalar part
and the rest as the vector part.
- if it is an array, matrix, tuple or list of 3 elements,
the 3 elements are interpreted as a rotation vector,
this creates a quaternion representing the same rotation.
- if it is a to 2 dimension array convertible array, matrix, tuple
or list with at least (3*3) elements,
the upper left (3*3) elements are interpreted as a rotation matrix,
this creates a quaternion representing the same rotation.
- if it is an instance of SO3, quaternion is built from rotation
matrix.
With 0 arguments :
------------------
If no argument is given, than the quaternion will be set by default
to with the scalar part set to 1 and the vector part to (0,0,0).
(this is the neutral element for multiplication in quaternion space)
To create a quaternion from Roll, Pitch, Yaw angles :
-----------------------------------------------------
first instanciate a quaternion and than use the method fromRPY()
to change the values of it to the dezired ones.
e.g. : quat().fromRPY(R,P,Y)
"""
error=False
if len(args)==0: # By default, if no argument is given
self.array=np.array([1.,0.,0.,0.])
elif len (args) == 4: # From 4 elements
if np.array(args).size==4:
self.array = np.double(np.array (args))
else:
error=True
elif len (args) == 1:
if type(args[0])==Quaternion: # From a Quaternion
self.array=args[0].array.copy()
elif np.array(args[0]).size==1: # From one sized element, this element will be the scalar part, the vector part will be set at (0,0,0)
self.array=np.double(np.hstack([np.array(args[0]),np.array([0,0,0])]))
elif np.array(args[0]).size==4 and max(np.array(args[0]).shape)==4: # From an array, matrix, tuple or list of 4 elements
self.array = np.double(np.array(args[0])).reshape(4,)
elif np.array(args[0]).size==3 and max(np.array(args[0]).shape)==3: # From an array, matrix, tuple or list of 3 elements interpreted as a rotation vector
rV=np.double(np.array(args[0])).reshape(3,)
alpha=np.double(linalg.norm(rV))
if alpha !=0:
e=rV/alpha
else:
e=rV
self.array=np.hstack([np.cos(alpha/2.),np.sin(alpha/2.)*e])
elif len(np.array(args[0]).shape)==2 and np.array(args[0]).shape[0]>=3 and np.array(args[0]).shape[1]>=3: # From a to 2 dimension array convertible array, matrix, tuple or list with at least (3*3) elements interpreted as a rotation matrix
rM=np.double(np.array(args[0])[:3,:3])
selec=np.zeros(4)
selec[0]=1+rM[0,0]+rM[1,1]+rM[2,2]
selec[1]=1+rM[0,0]-rM[1,1]-rM[2,2]
selec[2]=1-rM[0,0]+rM[1,1]-rM[2,2]
selec[3]=1-rM[0,0]-rM[1,1]+rM[2,2]
param=selec.argmax()
if selec[param]>0:
q=np.zeros(4)
if param==0:
q[0]=np.sqrt(selec[param])
q[1]=(rM[2,1]-rM[1,2])/q[0]
q[2]=(rM[0,2]-rM[2,0])/q[0]
q[3]=(rM[1,0]-rM[0,1])/q[0]
self.array=q*0.5
# print '--1--V3'
elif param==1:
q[1]=np.sqrt(selec[param])
q[0]=(rM[2,1]-rM[1,2])/q[1]
q[2]=(rM[1,0]+rM[0,1])/q[1]
q[3]=(rM[0,2]+rM[2,0])/q[1]
self.array=q*0.5
# print '--2--V3'
elif param==2:
q[2]=np.sqrt(selec[param])
q[0]=(rM[0,2]-rM[2,0])/q[2]
q[1]=(rM[1,0]+rM[0,1])/q[2]
q[3]=(rM[2,1]+rM[1,2])/q[2]
self.array=q*0.5
# print '--3--V3'
elif param==3:
q[3]=np.sqrt(selec[param])
q[0]=(rM[1,0]-rM[0,1])/q[3]
q[1]=(rM[0,2]+rM[2,0])/q[3]
q[2]=(rM[2,1]+rM[1,2])/q[3]
self.array=q*0.5
# print '--4--V3'
else:
error=True
else:
error=True
elif len(args)==2: # From a scalar part (1 element) and a vector part (3 elements)
arg0=np.double(np.array(args[0]))
arg1=np.double(np.array(args[1]))
if arg0.size==1 and arg1.size==3:
self.array=np.zeros(4)
self.array[0]=arg0
self.array[1:4]=arg1[:]
elif arg0.size==3 and arg1.size==1:
self.array=np.zeros(4)
self.array[0]=arg1
self.array[1:4]=arg0[:]
else:
error=True
else:
error=True
if error==False and self.array.shape!=(4,):
del self.array
error=True
if error:
raise TypeError ("Impossible to instanciate the Quaternion object with the given arguments")
def __str__(self):
"""
String representation of the quaternion.
"""
aff='[ '
aff+=str(self.array [0])+' + '
aff+=str(self.array [1])+' i + '
aff+=str(self.array [2])+' j + '
aff+=str(self.array [3])+' k ]'
return aff
def __neg__(self):
"""
Returns a quaternion which elements are the opposite of the original,
(this opposite quaternion represents the same rotation).
"""
return Quaternion(-self.array)
def __add__(self,other):
"""
If other is not a quaternion it is casted to a quaternion,
the elements are added one to one.
"""
if type(other)!=Quaternion:
q2=Quaternion(other)
else:
q2=other
return Quaternion(self.array+q2.array)
def __sub__(self,other):
"""
If other is not a quaternion it is casted to a quaternion,
the elements are substracted one to one.
"""
if type(other)!=Quaternion:
q2=Quaternion(other)
else:
q2=other
return Quaternion(self.array-q2.array)
def __mul__(self,other):
"""
If other is not a quaternion it is casted to a quaternion,
the result of the quaternion multiplication is returned.
"""
if type(other)!=Quaternion:
q2=Quaternion(other)
else:
q2=other
qr=np.zeros(4)
qr[0]=self.array[0]*q2.array[0]-np.vdot(self.array[1:],q2.array[1:])
qr[1:4]=np.cross(self.array[1:4],q2.array[1:4])+self.array[0]*q2.array[1:4]+q2.array[0]*self.array[1:4]
return Quaternion(qr)
def __rmul__(self,other):
"""
other is casted to a quaternion,
the result of the quaternion multiplication is returned.
"""
return Quaternion(other)*self
def transform (self, v):
"""
apply rotation to a vector
"""
u = np.array (self.array [1:4])
s = self.array [0]
return 2*u.dot(v)*u + (s*s - u.dot(u))*v + 2*s*np.cross(u, v)
def __abs__(self):
"""
Returns the norm of the quaternion.
"""
return np.double(linalg.norm(self.array))
def conjugate(self):
"""
Returns the conjugate of the quaternion.
"""
return Quaternion(self.array[0],-self.array[1:4])
def inv(self):
"""
Returns the inverse of the quaternion.
"""
return Quaternion(self.conjugate().array/(abs(self)**2))
def __div__(self,other):
"""
If other is not a quaternion it is casted to a quaternion,
the result of the quaternion multiplication with the inverse of other
is returned.
"""
if type(other)!=Quaternion:
q2=Quaternion(other)
else:
q2=other
return self*q2.inv()
def __pow__(self,n):
"""
Returns quaternion**n with quaternion**0 = Quaternion(1,0,0,0).
"""
r=Quaternion()
for i in range(n):
r=r*self
return r
def normalize (self):
"""
Changes the values of the quaternion to make it a unit quaternion
representing the same rotation as the original one
and returns the updated version.
"""
self.array /= abs(self);
return self
def normalized (self):
"""
Returns the unit quaternion representation of the quaternion
without changing the original.
"""
qr=Quaternion(self)
qr.normalize()
return qr
def toRotationMatrix(self):
"""
Returns a (3*3) array (rotation matrix)
representing the same rotation as the (normalized) quaternion.
"""
q=self.normalized().array
rm=np.zeros((3,3))
rm[0,0]=1-2*(q[2]**2+q[3]**2)
rm[0,1]=2*q[1]*q[2]-2*q[0]*q[3]
rm[0,2]=2*q[1]*q[3]+2*q[0]*q[2]
rm[1,0]=2*q[1]*q[2]+2*q[0]*q[3]
rm[1,1]=1-2*(q[1]**2+q[3]**2)
rm[1,2]=2*q[2]*q[3]-2*q[0]*q[1]
rm[2,0]=2*q[1]*q[3]-2*q[0]*q[2]
rm[2,1]=2*q[2]*q[3]+2*q[0]*q[1]
rm[2,2]=1-2*(q[1]**2+q[2]**2)
return rm
def toRotationVector(self):
"""
Returns a 3-sized array (rotation vector)
representing the same rotation as the (normalized) quaternion.
"""
q=self.normalized().array
rV=np.zeros(3)
alpha=2*np.arccos(q[0])
if linalg.norm(q[1:4])!=0:
rV=alpha*q[1:4]/linalg.norm(q[1:4])
return rV
def copy(self):
"""
Returns a copy of the quaternion.
"""
return Quaternion(self)
def toRPY(self):
"""
Returns a 3-sized array with representing the same rotation
as the (normalized) quaternion. With :
- the first element representing the Roll,
- the second the Pitch
- the third the Yaw
Where Roll Pitch and Yaw are the angles so that the rotation
with the quaternion represents the same rotation as :
- A rotation of R (Roll) about the original x-axis,
followed by a rotation of P (Pitch) about the original y-axis,
followed by a rotation of Y (Yaw) about the original z-axis.
- Or otherwise a rotation of Y about the original z-axis,
followed by a rotation of P about the new y-axis,
followed by a rotation of R about the new x-axis.
"""
q=self.normalized().array
r=np.arctan2(2*(q[0]*q[1]+q[2]*q[3]),1-2*(q[1]**2+q[2]**2))
p=np.arctan2(2*(q[0]*q[2]-q[3]*q[1]),np.sqrt((2*(q[0]*q[1]+q[2]*q[3]))**2+(1-2*(q[1]**2+q[2]**2))**2)) # We cas use arcsin but arctan2 is more robust
y=np.arctan2(2*(q[0]*q[3]+q[1]*q[2]),1-2*(q[2]**2+q[3]**2))
return np.array([r,p,y])
def fromRPY(self,R,P,Y):
"""
Set the values of the quaternion to the values of a unit quaternion
representing the same rotation as the one performed by Roll Pitch Yaw :
- A rotation of R (Roll) about the original x-axis,
followed by a rotation of P (Pitch) about the original y-axis,
followed by a rotation of Y (Yaw) about the original z-axis.
- Or otherwise a rotation of Y about the original z-axis,
followed by a rotation of P about the new y-axis,
followed by a rotation of R about the new x-axis.
"""
r=R/2.
p=P/2.
y=Y/2.
self.array[0]=np.cos(r)*np.cos(p)*np.cos(y)+np.sin(r)*np.sin(p)*np.sin(y)
self.array[1]=np.sin(r)*np.cos(p)*np.cos(y)-np.cos(r)*np.sin(p)*np.sin(y)
self.array[2]=np.cos(r)*np.sin(p)*np.cos(y)+np.sin(r)*np.cos(p)*np.sin(y)
self.array[3]=np.cos(r)*np.cos(p)*np.sin(y)-np.sin(r)*np.sin(p)*np.cos(y)
return self.normalize()
def toTuple (self):
"""
Return quaternion as a tuple a float starting with real part.
"""
return tuple (self.array)
|
lgpl-3.0
|
dariox2/CADL
|
test/testyida6b.py
|
1
|
4901
|
#
# test shuffle_batch - 6b
#
# generates a pair of files (color+bn)
# pending: make the tuple match
#
print("Loading tensorflow...")
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from libs import utils
import datetime
tf.set_random_seed(1)
def create_input_pipeline_yida(files1, files2, batch_size, n_epochs, shape, crop_shape=None,
crop_factor=1.0, n_threads=1, seed=None):
producer1 = tf.train.string_input_producer(
files1, capacity=len(files1), shuffle=False)
producer2 = tf.train.string_input_producer(
files2, capacity=len(files2), shuffle=False)
# We need something which can open the files and read its contents.
reader = tf.WholeFileReader()
# We pass the filenames to this object which can read the file's contents.
# This will create another queue running which dequeues the previous queue.
keys1, vals1 = reader.read(producer1)
keys2, vals2 = reader.read(producer2)
# And then have to decode its contents as we know it is a jpeg image
imgs1 = tf.image.decode_jpeg(vals1, channels=3)
imgs2 = tf.image.decode_jpeg(vals2, channels=3)
# We have to explicitly define the shape of the tensor.
# This is because the decode_jpeg operation is still a node in the graph
# and doesn't yet know the shape of the image. Future operations however
# need explicit knowledge of the image's shape in order to be created.
imgs1.set_shape(shape)
imgs2.set_shape(shape)
# Next we'll centrally crop the image to the size of 100x100.
# This operation required explicit knowledge of the image's shape.
if shape[0] > shape[1]:
rsz_shape = [int(shape[0] / shape[1] * crop_shape[0] / crop_factor),
int(crop_shape[1] / crop_factor)]
else:
rsz_shape = [int(crop_shape[0] / crop_factor),
int(shape[1] / shape[0] * crop_shape[1] / crop_factor)]
rszs1 = tf.image.resize_images(imgs1, rsz_shape[0], rsz_shape[1])
rszs2 = tf.image.resize_images(imgs2, rsz_shape[0], rsz_shape[1])
crops1 = (tf.image.resize_image_with_crop_or_pad(
rszs1, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs1)
crops2 = (tf.image.resize_image_with_crop_or_pad(
rszs2, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs2)
# Now we'll create a batch generator that will also shuffle our examples.
# We tell it how many it should have in its buffer when it randomly
# permutes the order.
min_after_dequeue = len(files1) // 5
# The capacity should be larger than min_after_dequeue, and determines how
# many examples are prefetched. TF docs recommend setting this value to:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
capacity = min_after_dequeue + (n_threads + 1) * batch_size
# Randomize the order and output batches of batch_size.
batch = tf.train.shuffle_batch([crops1, crops2],
enqueue_many=False,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=n_threads,
#seed=seed,
)#shapes=(64,64,3))
# alternatively, we could use shuffle_batch_join to use multiple reader
# instances, or set shuffle_batch's n_threads to higher than 1.
return batch
def CELEByida(path):
fs = [os.path.join(path, f)
for f in os.listdir(path) if f.endswith('.jpg')]
fs=sorted(fs)
return fs
print("Loading celebrities...")
from libs.datasets import CELEB
files1 = CELEByida("../session-1/img_align_celeba/") # only 100
files2 = CELEByida("../session-1/img_align_celeba_n/") # only 100
from libs.dataset_utils import create_input_pipeline
batch_size = 8
n_epochs = 3
input_shape = [218, 178, 3]
crop_shape = [64, 64, 3]
crop_factor = 0.8
seed=15
batch1 = create_input_pipeline_yida(
files1=files1, files2=files2,
batch_size=batch_size,
n_epochs=n_epochs,
crop_shape=crop_shape,
crop_factor=crop_factor,
shape=input_shape,
seed=seed)
mntg=[]
sess = tf.Session()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
batres = sess.run(batch1)
batch_xs1=np.array(batres[0])
batch_xs2=np.array(batres[1])
for i in range(0,len(batch_xs1)):
img=batch_xs1[i] / 255.0
mntg.append(img)
img=batch_xs2[i] / 255.0
mntg.append(img)
TID=datetime.date.today().strftime("%Y%m%d")+"_"+datetime.datetime.now().time().strftime("%H%M%S")
m=utils.montage(mntg, saveto="montage_"+TID+".png")
# mntg[0]=color
# mntg[1]=b/n
plt.figure(figsize=(5, 5))
plt.imshow(m)
plt.show()
# eop
|
apache-2.0
|
sandeepkoduri/GAE-html-to-pdf
|
libs/PIL/TarIO.py
|
21
|
1241
|
#
# The Python Imaging Library.
# $Id$
#
# read files from within a tar file
#
# History:
# 95-06-18 fl Created
# 96-05-28 fl Open files in binary mode
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1995-96.
#
# See the README file for information on usage and redistribution.
#
from PIL import ContainerIO
##
# A file object that provides read access to a given member of a TAR
# file.
class TarIO(ContainerIO.ContainerIO):
def __init__(self, tarfile, file):
"""
Create file object.
:param tarfile: Name of TAR file.
:param file: Name of member file.
"""
fh = open(tarfile, "rb")
while True:
s = fh.read(512)
if len(s) != 512:
raise IOError("unexpected end of tar file")
name = s[:100].decode('utf-8')
i = name.find('\0')
if i == 0:
raise IOError("cannot find subfile")
if i > 0:
name = name[:i]
size = int(s[124:135], 8)
if file == name:
break
fh.seek((size + 511) & (~511), 1)
# Open region
ContainerIO.ContainerIO.__init__(self, fh, fh.tell(), size)
|
mit
|
luisgg/iteexe
|
nevow/zomnesrv.py
|
14
|
3211
|
import time
from nevow import wsgi
from twisted.internet import protocol, reactor
from twisted.protocols import basic
from twisted.python import log
IN_KEY = 'STDIN_FILENAME='
IN_KEY_LEN = len(IN_KEY)
class ZomneProtocol(basic.NetstringReceiver):
def connectionMade(self):
self.environ = {}
def stringReceived(self, data):
key, value = data.split('=', 1)
self.environ[key] = value
if data.startswith(IN_KEY):
filenm = data[IN_KEY_LEN:]
self.stdin = open(filenm).read()
# WSGI variables
self.environ['wsgi.version'] = (1,0)
self.environ['wsgi.multithread'] = False
self.environ['wsgi.multiprocess'] = False
if self.environ.get('HTTPS','off') in ('on','1'):
self.environ['wsgi.url_scheme'] = 'https'
else:
self.environ['wsgi.url_scheme'] = 'http'
# print "ENV", self.environ
result = self.factory.application(self.environ, self.start_response)
for data in result:
if data:
self.write(data)
## We got everything, let's render the request
self.transport.loseConnection()
self.factory.log('%s - - %s "%s" %d %s "%s" "%s"' % (
self.environ['REMOTE_ADDR'],
time.strftime("[%d/%b/%Y:%H:%M:%S +0000]", time.gmtime()),
'%s %s %s' % (
self.environ['REQUEST_METHOD'],
self.environ['REQUEST_URI'],
self.environ['SERVER_PROTOCOL']),
self.responseCode,
self.sentLength or "-",
self.environ.get('HTTP_REFERER', ''),
self.environ.get('HTTP_USER_AGENT', '')))
sentLength = 0
def write(self, what):
self.sentLength += len(what)
self.transport.write(what)
def start_response(self, status, headers, exc_info=None):
self.responseCode = int(status.split()[0])
self.transport.write("Status: %s\r\n" % (status, ))
for key, value in headers:
self.transport.write("%s: %s\r\n" % (key, value))
self.transport.write("\r\n")
return self.write
class NotificationProtocol(protocol.Protocol):
def connectionMade(self):
self.transport.loseConnection()
class NotificationFactory(protocol.ClientFactory):
protocol = NotificationProtocol
class ZomneFactory(protocol.Factory):
def __init__(self, root, logfile=None, prefixURL=None):
"""`prefixURL` is used by WSGI apps. wsgi.py stores it in appRootURL.
It is the HTTP url for the nevow.cgi script"""
if logfile is not None:
self.log = open(logfile, 'a')
if prefixURL:
self.application = wsgi.createWSGIApplication(root, prefixURL)
else:
self.application = wsgi.createWSGIApplication(root)
protocol = ZomneProtocol
def startFactory(self):
"""Tell the other end that we are done starting up.
"""
reactor.connectUNIX('zomne_startup_complete.socket', NotificationFactory())
def log(self, msg):
log.msg(msg)
|
gpl-2.0
|
xinjiguaike/edx-platform
|
common/lib/xmodule/xmodule/capa_module.py
|
37
|
9775
|
"""Implements basics of Capa, including class CapaModule."""
import json
import logging
import sys
import re
from lxml import etree
from pkg_resources import resource_string
import dogstats_wrapper as dog_stats_api
from .capa_base import CapaMixin, CapaFields, ComplexEncoder
from capa import responsetypes
from .progress import Progress
from xmodule.util.misc import escape_html_characters
from xmodule.x_module import XModule, module_attr, DEPRECATION_VSCOMPAT_EVENT
from xmodule.raw_module import RawDescriptor
from xmodule.exceptions import NotFoundError, ProcessingError
log = logging.getLogger("edx.courseware")
class CapaModule(CapaMixin, XModule):
"""
An XModule implementing LonCapa format problems, implemented by way of
capa.capa_problem.LoncapaProblem
CapaModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__
"""
icon_class = 'problem'
js = {
'coffee': [
resource_string(__name__, 'js/src/capa/display.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
resource_string(__name__, 'js/src/capa/imageinput.js'),
resource_string(__name__, 'js/src/capa/schematic.js'),
]
}
js_module_name = "Problem"
css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]}
def __init__(self, *args, **kwargs):
"""
Accepts the same arguments as xmodule.x_module:XModule.__init__
"""
super(CapaModule, self).__init__(*args, **kwargs)
def handle_ajax(self, dispatch, data):
"""
This is called by courseware.module_render, to handle an AJAX call.
`data` is request.POST.
Returns a json dictionary:
{ 'progress_changed' : True/False,
'progress' : 'none'/'in_progress'/'done',
<other request-specific values here > }
"""
handlers = {
'hint_button': self.hint_button,
'problem_get': self.get_problem,
'problem_check': self.check_problem,
'problem_reset': self.reset_problem,
'problem_save': self.save_problem,
'problem_show': self.get_answer,
'score_update': self.update_score,
'input_ajax': self.handle_input_ajax,
'ungraded_response': self.handle_ungraded_response
}
_ = self.runtime.service(self, "i18n").ugettext
generic_error_message = _(
"We're sorry, there was an error with processing your request. "
"Please try reloading your page and trying again."
)
not_found_error_message = _(
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
if dispatch not in handlers:
return 'Error: {} is not a known capa action'.format(dispatch)
before = self.get_progress()
try:
result = handlers[dispatch](data)
except NotFoundError as err:
_, _, traceback_obj = sys.exc_info() # pylint: disable=redefined-outer-name
raise ProcessingError(not_found_error_message), None, traceback_obj
except Exception as err:
_, _, traceback_obj = sys.exc_info() # pylint: disable=redefined-outer-name
raise ProcessingError(generic_error_message), None, traceback_obj
after = self.get_progress()
result.update({
'progress_changed': after != before,
'progress_status': Progress.to_js_status_str(after),
'progress_detail': Progress.to_js_detail_str(after),
})
return json.dumps(result, cls=ComplexEncoder)
class CapaDescriptor(CapaFields, RawDescriptor):
"""
Module implementing problems in the LON-CAPA format,
as implemented by capa.capa_problem
"""
INDEX_CONTENT_TYPE = 'CAPA'
module_class = CapaModule
has_score = True
show_in_read_only_mode = True
template_dir_name = 'problem'
mako_template = "widgets/problem-edit.html"
js = {'coffee': [resource_string(__name__, 'js/src/problem/edit.coffee')]}
js_module_name = "MarkdownEditingDescriptor"
css = {
'scss': [
resource_string(__name__, 'css/editor/edit.scss'),
resource_string(__name__, 'css/problem/edit.scss')
]
}
# The capa format specifies that what we call max_attempts in the code
# is the attribute `attempts`. This will do that conversion
metadata_translations = dict(RawDescriptor.metadata_translations)
metadata_translations['attempts'] = 'max_attempts'
@classmethod
def filter_templates(cls, template, course):
"""
Filter template that contains 'latex' from templates.
Show them only if use_latex_compiler is set to True in
course settings.
"""
return 'latex' not in template['template_id'] or course.use_latex_compiler
def get_context(self):
_context = RawDescriptor.get_context(self)
_context.update({
'markdown': self.markdown,
'enable_markdown': self.markdown is not None,
'enable_latex_compiler': self.use_latex_compiler,
})
return _context
# VS[compat]
# TODO (cpennington): Delete this method once all fall 2012 course are being
# edited in the cms
@classmethod
def backcompat_paths(cls, path):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:capa_descriptor_backcompat_paths"]
)
return [
'problems/' + path[8:],
path[8:],
]
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(CapaDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
CapaDescriptor.due,
CapaDescriptor.graceperiod,
CapaDescriptor.force_save_button,
CapaDescriptor.markdown,
CapaDescriptor.text_customization,
CapaDescriptor.use_latex_compiler,
])
return non_editable_fields
@property
def problem_types(self):
""" Low-level problem type introspection for content libraries filtering by problem type """
tree = etree.XML(self.data)
registered_tags = responsetypes.registry.registered_tags()
return set([node.tag for node in tree.iter() if node.tag in registered_tags])
def index_dictionary(self):
"""
Return dictionary prepared with module content and type for indexing.
"""
xblock_body = super(CapaDescriptor, self).index_dictionary()
# Removing solutions and hints, as well as script and style
capa_content = re.sub(
re.compile(
r"""
<solution>.*?</solution> |
<script>.*?</script> |
<style>.*?</style> |
<[a-z]*hint.*?>.*?</[a-z]*hint>
""",
re.DOTALL |
re.VERBOSE),
"",
self.data
)
capa_content = escape_html_characters(capa_content)
capa_body = {
"capa_content": capa_content,
"display_name": self.display_name,
}
if "content" in xblock_body:
xblock_body["content"].update(capa_body)
else:
xblock_body["content"] = capa_body
xblock_body["content_type"] = self.INDEX_CONTENT_TYPE
xblock_body["problem_types"] = list(self.problem_types)
return xblock_body
def has_support(self, view, functionality):
"""
Override the XBlock.has_support method to return appropriate
value for the multi-device functionality.
Returns whether the given view has support for the given functionality.
"""
if functionality == "multi_device":
return self.lcp.has_multi_device_support
else:
return False
# Proxy to CapaModule for access to any of its attributes
answer_available = module_attr('answer_available')
check_button_name = module_attr('check_button_name')
check_button_checking_name = module_attr('check_button_checking_name')
check_problem = module_attr('check_problem')
choose_new_seed = module_attr('choose_new_seed')
closed = module_attr('closed')
get_answer = module_attr('get_answer')
get_problem = module_attr('get_problem')
get_problem_html = module_attr('get_problem_html')
get_state_for_lcp = module_attr('get_state_for_lcp')
handle_input_ajax = module_attr('handle_input_ajax')
hint_button = module_attr('hint_button')
handle_problem_html_error = module_attr('handle_problem_html_error')
handle_ungraded_response = module_attr('handle_ungraded_response')
is_attempted = module_attr('is_attempted')
is_correct = module_attr('is_correct')
is_past_due = module_attr('is_past_due')
is_submitted = module_attr('is_submitted')
lcp = module_attr('lcp')
make_dict_of_responses = module_attr('make_dict_of_responses')
new_lcp = module_attr('new_lcp')
publish_grade = module_attr('publish_grade')
rescore_problem = module_attr('rescore_problem')
reset_problem = module_attr('reset_problem')
save_problem = module_attr('save_problem')
set_state_from_lcp = module_attr('set_state_from_lcp')
should_show_check_button = module_attr('should_show_check_button')
should_show_reset_button = module_attr('should_show_reset_button')
should_show_save_button = module_attr('should_show_save_button')
update_score = module_attr('update_score')
|
agpl-3.0
|
d8ahazard/shooter-cm9-deviltoast
|
scripts/build-all.py
|
1250
|
9474
|
#! /usr/bin/env python
# Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
gpl-2.0
|
dentaku65/pelisalacarta
|
python/main-classic/channels/enlacia.py
|
4
|
14634
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para enlacia
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os,sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "enlacia"
__category__ = "F,S,D"
__type__ = "generic"
__title__ = "enlacia"
__language__ = "ES"
DEBUG = config.get_setting("debug")
SITE = "http://www.enlacia.com"
def isGeneric():
return True
def mainlist(item):
logger.info("[enlacia.py] mainlist")
itemlist = []
data = scrapertools.cache_page(SITE)
data = scrapertools.get_match(data,'<div class="submenu wrap">(.*?)<a href="http://www.tripledeseo.com"')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for path,title in matches:
itemlist.append( Item(channel=__channel__, title=title , action="categorias", url=SITE+path ,fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg"))
itemlist.append( Item(channel=__channel__, title="Fichas: Todas las fichas" ,action="listadofichas", url=SITE+"/fichas" ,fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg", extra="ver etiquetas") )
itemlist.append( Item(channel=__channel__, title="Etiquetas: Todas las categorías" ,action="etiquetas", url=SITE+"/tag/" ,fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg") )
itemlist.append( Item(channel=__channel__, title="Buscar: Todas las categorías" ,action="search" ,fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg") )
return itemlist
def categorias(item):
logger.info("[enlacia.py] categorias")
itemlist = []
itemlist.append( Item(channel=__channel__, title=item.title+" - Nuevo-antiguo" ,action="listadofichas" ,url=item.url ,fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg") )
itemlist.append( Item(channel=__channel__, title=item.title+" - A-Z" ,action="listadofichas" ,url=item.url+"/orden:nombre" ,fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg") )
itemlist.append( Item(channel=__channel__, title=item.title+" - Nuevo-antiguo [Completo]" ,action="completo" ,url=item.url ,fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg") )
itemlist.append( Item(channel=__channel__, title=item.title+" - A-Z [Completo]" ,action="completo" ,url=item.url+"/orden:nombre" ,fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg") )
return itemlist
def etiquetas(item):
logger.info("[enlacia.py] etiquetas")
itemlist = []
data = scrapertools.cache_page(item.url)
patron = '<a href="(/tag/[^"]+)">([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for path,title in matches:
itemlist.append( Item(channel=__channel__, title=title+" - Nuevo-antiguo" ,action="listadofichas" ,url=SITE+path ,fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg", extra="ver etiquetas") )
itemlist.append( Item(channel=__channel__, title=title+" - A-Z" ,action="listadofichas" ,url=SITE+path+"/orden:nombre" ,fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg", extra="ver etiquetas") )
return itemlist
# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
def search(item,texto,categoria=""):
logger.info("[enlacia.py] "+item.url+" search "+texto)
itemlist = []
url = item.url
texto = texto.replace(" ","+")
logger.info("categoria: "+categoria+" url: "+url)
try:
item.url = "http://www.enlacia.com/busqueda/%s"
item.url = item.url % texto
item.extra = "ver etiquetas"
itemlist.extend(completo(item))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def listadofichas(item):
logger.info("[enlacia.py] listadofichas")
itemlist = []
### Listado ###
data = scrapertools.cache_page(item.url)
listado = scrapertools.get_match(data,'<h2>Listado de fichas</h2>(.*?)</div></div></div>')
patron = '<a href="([^"]+)" class="ficha ficha2"><img src="([^"]+)" border="0" alt="([^"]+)"/>'
patron+= '.*?<span class="categoria">([^<]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(listado)
for path,thumbnail,title,categoria in matches:
item_extra = item.extra
if item.extra == "ver etiquetas": title = "[COLOR blue]"+categoria+":[/COLOR] "+title
itemlist.append( Item(channel=__channel__, title=title , action="temporadas", url=SITE+path, thumbnail=SITE+"/"+thumbnail.replace('.jpg','_g.jpg'), fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg", show=title, extra=item_extra) )
### Paginación ###
try:
pagina_actual = scrapertools.get_match(data, '<span class="pagina pag_actual">([^<]+)</span>')
pagina_siguiente = scrapertools.get_match(data, '<a href="([^"]+)" class="pagina pag_sig">[^<]+</a>')
pagina_final = scrapertools.get_match(data, 'class="pagina">([^<]+)</a><a href="[^"]+" class="pagina pag_sig">')
print "### pagina_siguiente: %s" % pagina_siguiente
#if pagina_actual != pagina_final:
if pagina_siguiente != "":
if "tag/" in pagina_siguiente: pagina_siguiente = "/"+pagina_siguiente
itemlist.append( Item(channel=__channel__, title=">> Página siguiente", action="listadofichas", url=SITE+pagina_siguiente, fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg", extra=item_extra) )
except: pass
return itemlist
# Pone el listado de todas las páginas juntas
def completo(item):
logger.info("[enlacia.py] completo")
itemlist = []
# Guarda el valor por si son etquitas para que lo vea 'listadofichas'
item_extra = item.extra
# Lee las entradas
items_programas = listadofichas(item)
salir = False
while not salir:
# Saca la URL de la siguiente página
ultimo_item = items_programas[ len(items_programas)-1 ]
# Páginas intermedias
if ultimo_item.action=="listadofichas":
# Quita el elemento de "Página siguiente"
ultimo_item = items_programas.pop()
# Añade las entradas de la página a la lista completa
itemlist.extend( items_programas )
# Carga la sigiuente página
ultimo_item.extra = item_extra
items_programas = listadofichas(ultimo_item)
# Última página
else:
# Añade a la lista completa y sale
itemlist.extend( items_programas )
salir = True
return itemlist
def play(item):
logger.info("[enlacia.py] play")
itemlist=[]
# Busca el vídeo
#data = scrapertools.cache_page(item.url)
#videoitemlist = servertools.find_video_items(data=data)
videoitemlist = servertools.find_video_items(data=item.url)
i=1
for videoitem in videoitemlist:
if not "favicon" in videoitem.url:
videoitem.title = "Mirror %d%s" % (i,videoitem.title)
videoitem.fulltitle = item.fulltitle
videoitem.channel=channel=__channel__
videoitem.show = item.show
itemlist.append(videoitem)
i=i+1
return itemlist
def temporadas(item):
logger.info("[enlacia.py] temporadas")
itemlist = []
# Carga la página
data = scrapertools.cache_page(item.url)
if '<div class="mensaje error">No hay vídeos disponibles</div>' in data:
itemlist.append( Item(channel=__channel__, title="No hay vídeos disponibles", folder=False) )
elif '<div class="fficha-temporadas">' not in data:
### paso 1: mostrar_temporada.php ###
id = scrapertools.get_match(data, "<script type=.text/javascript.>mostrar_temporada.'([^']+)'.;</script>")
path = "/ajax/mostrar_temporada.php?id="+id
data = scrapertools.cache_page(SITE+path)
### paso 2: mostrar_capitulo.php ###
id = scrapertools.get_match(data, '<div id="item-([^"]+)" class="item ">')
path = "/ajax/mostrar_capitulo.php?id="+id
item.url = SITE+path
itemlist.extend( findvideos(item))
elif '<span class="temporadas">Temporadas:</span>' not in data:
### paso 1: mostrar_temporada.php ###
id = scrapertools.get_match(data, "<script type=.text/javascript.>mostrar_temporada.'([^']+)'.;</script>")
temporada = scrapertools.get_match(data, '<span class="temporadas">([^<]+)</span>')
path = "/ajax/mostrar_temporada.php?id="+id
item.title = "Temporada: "+temporada
item.url = SITE+path
itemlist.extend( episodios(item))
else:
### paso 1: mostrar_temporada.php ###
patron = '<a id="temp-([^"]+)" class="temp" href="[^"]+">([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for id,temporada in matches:
path = "/ajax/mostrar_temporada.php?id="+id
if temporada == "Extra": temporada = "Temporada: 0"
item.title = "Temporada: "+temporada
item.url = SITE+path
itemlist.extend( episodios(item))
return itemlist
def episodios(item):
logger.info("[enlacia.py] episodios")
itemlist = []
### paso 2: mostrar_capitulo.php ###
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
patron = '<a class="nombre " href="javascript:mostrar_capitulo.([^\)]+).;">([^<]+)</a>'
#patron = '<div id="item-([^"]+)" class="item ">'
matches = re.compile(patron,re.DOTALL).findall(data)
for id,capitulo in matches:
path = "/ajax/mostrar_capitulo.php?id="+id
#logger.info("title="+item.title+", capitulo="+capitulo)
#title=Temporada: Temporada 1, capitulo=Capítulo: 1
nombre_temporada = item.title.replace("Temporada: Temporada ","")
nombre_episodio = capitulo.replace("Capítulo: ","")
if len(nombre_episodio)==1:
nombre_episodio="0"+nombre_episodio
itemlist.append( Item(channel=__channel__, title=nombre_temporada+"x"+nombre_episodio , action="findvideos", url=SITE+path, thumbnail=item.thumbnail, fanart="http://pelisalacarta.mimediacenter.info/fanart/enlacia.jpg", show=item.show) )
return itemlist
def findvideos(item):
logger.info("[enlacia.py] findvideos")
itemlist = []
### paso 3: ver_video.php ###
data = scrapertools.cache_page(item.url)
patron = '<div id="vitem-([^"]+)" class="vitem">.*?'
#patron+= '<img src="..([^"]+)" alt="Tipo enlace"/>.*?'
patron+= '<img src="..([^"]+)" alt="Servidor"/>.*?'
patron+= '<div class="info-idioma"><img src="/images/idiomas/(.).png" />.*?'
patron+= '<div class="info-calidad">([^<]+)</div>'
#Listado de servidores
matches = re.compile(patron,re.DOTALL).findall(data)
#for id,tipo,thumbnail,idioma_id,calidad in matches:
for id,thumbnail,idioma_id,calidad in matches:
path = "/ajax/ver_video.php?id_video="+id
#tipo_id = tipo.replace('/images/tipos/','').split('.')[0]
servidor_id = thumbnail.replace('/images/servidores/','').split('.')[0]
#if servidor_id != "999" and servidor_id != "0":
title = servidores(servidor_id)+" ("+idiomas(idioma_id)+") ("+calidad+")"
#title = tipos(tipo_id)+" en "+servidores(servidor_id)+" ("+idiomas(idioma_id)+") ("+calidad+")"
if id != "0":
# Partes
data = scrapertools.cache_page(SITE+path)
patron = '<a href="([^"]+)" target="_blank">'
matches = re.compile(patron,re.DOTALL).findall(data)
partes = len(matches)
i = 1
for url in matches:
print "### url: "+url
parte = ""
if partes > 1: parte = " [partes: %s/%s]" %(i,partes)
itemlist.append( Item(channel=item.channel, title=title+parte, action="play", url=url, thumbnail=SITE+thumbnail, show=item.show, fulltitle=item.show, folder=False) )
i = i+1
return itemlist
def tipos(id):
lista = {'0':'tipo desconocido', '1':'Emule', '2':'uTorrent', '3':'Descargar', '4':'Ver'}
return lista[id]
def idiomas(id):
lista = {'0':'idioma desconocido', '1':'Español', '2':'Latino', '3':'VOS', '4':'VO', '5':'Catalán'}
return lista[id]
def servidores(id):
lista = {'/images/hdsponsor':'hd', '0':'servidor desconocido', '1':'emule', '2':'bittorrent', '3':'elitetorrent', '4':'youtube', '5':'mitele', '6':'lasextaon', '7':'antena3', '8':'rapidshare', '9':'fileserve', '10':'rtvees', '11':'uploaded', '12':'mediafire', '13':'mtv', '14':'letitbit', '15':'allmyvideos', '16':'vidxden', '17':'depositfiles', '18':'bitshare', '19':'filepost', '20':'turbobit', '21':'wupload', '22':'modovideo', '23':'divxstage', '24':'oneficher', '25':'zippyshare', '26':'fooget', '27':'vk', '28':'freakshare', '29':'filedino', '30':'shareonline', '31':'putlocker', '32':'filebox', '33':'jumbofiles', '34':'shragle', '35':'stagevu', '36':'easybytez', '37':'shareflare', '38':'bulletupload', '39':'filevelocity', '40':'videobam', '41':'uploadstation', '42':'uploaz', '43':'glumbo', '44':'rapidgator', '45':'fileserving', '46':'filefactory', '47':'gigasize', '48':'refile', '49':'vimple', '50':'videoweed', '51':'veevr', '52':'vipfile', '53':'crtvg', '54':'vimeo', '55':'fiberupload', '56':'moevideos', '57':'novamov', '58':'uploading', '59':'fileflyer', '60':'nowvideo', '61':'bayfiles', '62':'piratebay', '63':'movshare', '64':'bupload', '65':'sharpfile', '66':'uploadjet', '67':'henchfile', '68':'uloadto', '69':'z47upload', '70':'muchshare', '71':'magnovideo', '72':'streamcloud', '73':'cloudzer', '74':'playedto', '75':'allbox4', '76':'mega', '77':'videomega', '78':'vidspot', '79':'nowvideo', '80':'upfiles', '81':'uploadable', '82':'streaminto', '83':'nowdownload', '84':'filemonkey', '85':'uploadable', '86':'shockshare', '87':'oleup', '999':'servidor desconocido'}
return lista[id]
|
gpl-3.0
|
CartoDB/cartoframes
|
cartoframes/viz/layout.py
|
1
|
9384
|
from . import constants
from .map import Map
from .html import HTMLLayout
from ..utils.utils import get_center, get_credentials
from ..utils.metrics import send_metrics
from .kuviz import KuvizPublisher
class Layout:
"""Create a layout of visualizations in order to compare them.
Args:
maps (list of :py:class:`Map <cartoframes.viz.Map>`): List of
maps. Zero or more of :py:class:`Map <cartoframes.viz.Map>`.
n_size (number, optional): Number of columns of the layout
m_size (number, optional): Number of rows of the layout
viewport (dict, optional): Properties for display of the maps viewport.
Keys can be `bearing` or `pitch`.
is_static (boolean, optional): By default is False. All the maps in each visualization
are interactive. In order to set them static images for performance reasons
set `is_static` to True.
map_height (number, optional): Height in pixels for each visualization.
Default is 250.
full_height (boolean, optional): When a layout visualization is published, it
will fit the screen height. Otherwise, each visualization height will be
`map_height`. Default True.
Raises:
ValueError: if the input elements are not instances of :py:class:`Map <cartoframes.viz.Map>`.
Examples:
Basic usage.
>>> Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ])
Display a 2x2 layout.
>>> Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ], 2, 2)
Custom Titles.
>>> Layout([
... Map(Layer('table_in_your_account'), title="Visualization 1 custom title"),
... Map(Layer('table_in_your_account'), title="Visualization 2 custom title")),
>>> ])
Viewport.
>>> Layout([
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account'))
>>> ], viewport={ 'zoom': 2 })
>>> Layout([
... Map(Layer('table_in_your_account'), viewport={ 'zoom': 0.5 }),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account'))
>>> ], viewport={ 'zoom': 2 })
Create an static layout
>>> Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ], is_static=True)
"""
def __init__(self,
maps,
n_size=None,
m_size=None,
viewport=None,
map_height=250,
full_height=True,
is_static=False,
**kwargs):
self._maps = maps
self._layout = _init_layout(self._maps, is_static, viewport)
self._n_size = n_size if n_size is not None else len(self._layout)
self._m_size = m_size if m_size is not None else constants.DEFAULT_LAYOUT_M_SIZE
self._viewport = viewport
self._is_static = is_static
self._map_height = map_height
self._full_height = full_height
self._publisher = None
self._carto_vl_path = kwargs.get('_carto_vl_path', None)
self._airship_path = kwargs.get('_airship_path', None)
def _repr_html_(self):
self._html_layout = HTMLLayout()
map_height = '100%' if self._full_height else '{}px'.format(self._map_height)
self._html_layout.set_content(
maps=self._layout,
size=['100%', self._map_height * self._m_size],
n_size=self._n_size,
m_size=self._m_size,
is_static=self._is_static,
map_height=map_height,
full_height=self._full_height,
_carto_vl_path=self._carto_vl_path,
_airship_path=self._airship_path
)
return self._html_layout.html
@send_metrics('map_published')
def publish(self, name, password, credentials=None, if_exists='fail', maps_api_key=None):
"""Publish the layout visualization as a CARTO custom visualization.
Args:
name (str): The visualization name on CARTO.
password (str): By setting it, your visualization will be protected by
password. When someone tries to show the visualization, the password
will be requested. To disable password you must set it to None.
credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`, optional):
A Credentials instance. If not provided, the credentials will be automatically
obtained from the default credentials if available. It is used to create the
publication and also to save local data (if exists) into your CARTO account.
if_exists (str, optional): 'fail' or 'replace'. Behavior in case a publication with
the same name already exists in your account. Default is 'fail'.
maps_api_key (str, optional): The Maps API key used for private datasets.
Example:
Publishing the map visualization.
>>> tlayout = Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ])
>>> tlayout.publish('Custom Map Title', password=None)
"""
_credentials = get_credentials(credentials)
layers = []
for viz_map in self._maps:
for layer in viz_map.layers:
layers.append(layer)
self._publisher = _get_publisher(_credentials)
self._publisher.set_layers(layers, maps_api_key)
html = self._get_publication_html()
return self._publisher.publish(html, name, password, if_exists)
def update_publication(self, name, password, if_exists='fail'):
"""Update the published layout visualization.
Args:
name (str): The visualization name on CARTO.
password (str): setting it your visualization will be protected by
password and using `None` the visualization will be public.
if_exists (str, optional): 'fail' or 'replace'. Behavior in case a publication with the same name already
exists in your account. Default is 'fail'.
Raises:
PublishError: if the map has not been published yet.
"""
html = self._get_publication_html()
return self._publisher.update(html, name, password, if_exists)
def _get_publication_html(self):
if not self._publisher:
_credentials = get_credentials(None)
self._publisher = _get_publisher(_credentials)
html_layout = HTMLLayout('templates/viz/main_layout.html.j2')
layers = self._publisher.get_layers()
layer_index = 0
for viz_map in self._maps:
for layer in viz_map.layers:
layer.credentials = layers[layer_index].credentials
layer_index += 1
maps = _init_layout(self._maps, self._is_static, self._viewport)
map_height = '100%' if self._full_height else '{}px'.format(self._map_height)
html_layout.set_content(
maps=maps,
size=['100%', self._map_height * self._m_size],
n_size=self._n_size,
m_size=self._m_size,
is_static=self._is_static,
is_embed=True,
map_height=map_height
)
return html_layout.html
def _init_layout(maps, is_static, viewport):
layout = []
for map_index, viz in enumerate(maps):
if not isinstance(viz, Map):
raise ValueError('All the elements in the Layout should be an instance of Map.')
viz.is_static = _get_is_static(viz.is_static, is_static)
viz.viewport = _get_viewport(viz.viewport, viewport)
viz.camera = _get_camera(viz.viewport)
for layer in viz.layers:
layer.map_index = map_index
layer.reset_ui(viz)
layout.append(viz.get_content())
return layout
def _get_viewport(map_settings_viewport, layout_viewport):
if map_settings_viewport is not None:
return map_settings_viewport
return layout_viewport
def _get_camera(viewport):
camera = None
if viewport is not None:
camera = {
'center': get_center(viewport),
'zoom': viewport.get('zoom'),
'bearing': viewport.get('bearing'),
'pitch': viewport.get('pitch')
}
return camera
def _get_is_static(map_settings_is_static, layout_is_static):
if map_settings_is_static is not None:
return map_settings_is_static
return layout_is_static
def _get_publisher(credentials):
return KuvizPublisher(credentials)
|
bsd-3-clause
|
xsthunder/acm
|
util/quickpow.py
|
2
|
1827
|
from math import *
def qp(x,p,mod=0):#quickpow
if(type(x) != type(2) or type(p)!=type(2) or type(mod)!=type(2)):
print("qp :err type%s %s %s"%(type(x),type(p),type(mod)))
return
b = 1
while(p>0):
if(p&1==1):
b*=x
p>>=1
x*=x
if(mod>0):
b%=mod
x%=mod
return b
def isp(p):#is prime
if p <=1:
return False
if p == 2:
return True
for i in range(2,int(sqrt(p))+1):
if(p%i):
return True
return False
def getpfac(x):#get primtive factor
if(type(x)!=type(2)):
print("getpfac:err %d",type(x))
if(x<0):
x= -x
mp = {}
for i in range(2,int(sqrt(x))):
if x%i == 0:
mp[i]=1
x/=i
while(x%i==0 and x>0):
x/=i
x = int(x)
i = int(i)
mp[i]+=1
if(x>1):
mp[x]=1
return mp
def getp(x):
if(type(x)!=type(2)):
print("getp:err %d",type(x))
return []
if(x<=1):
return []
if(x>=1e7):
getp("getp:%d too large"%x)
return []
mp = {}
l = []
for i in range(2,x+1):
if i in mp:
continue
tmp = i
l.append(tmp)
while(tmp<=x):
mp[tmp] = 1
tmp+=i
return l
def ispr(g,p):#if primitive root
if(type(g) is not type(2) or type(p) is not type(2)):
print("isPR:err type%s %s"%(type(x),type(p)))
return
if(not isp(p)):
print("p:%d is not prime"%p)
return False
cnt1 = 0
l = list(getpfac(p-1).keys())
for i in l:
if(qp(g,i,p)==1):
return False
return True
#print(ispr(3,998244353))
#print(getpfac(998244353-1))
print((getp(int(20000))))
|
mit
|
obulpathi/poppy
|
scripts/providers/akamai/purge_status.py
|
3
|
2406
|
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import json
import os
import pprint
import requests
import sys
from akamai.edgegrid import EdgeGridAuth
def main(args):
if len(args) != 4:
print("usage: python purge_status.py [env] [url] [purge_id]")
print(
"example : python purge_status.py [prod|test] "
"http://blog.mysite.com/ 2424ada3-d964-11c4-8621-731adc86695c")
sys.exit(2)
env = args[1]
purge_url = args[2]
purge_id = args[3]
config_parser = ConfigParser.RawConfigParser()
config_path = os.path.expanduser('~/.poppy/akamai.conf')
config_parser.read(config_path)
print("")
print("")
print("Fetching purge status")
akamai_purge_status(env, config_parser, purge_url, purge_id)
print("")
print("")
def edge_session(env, config):
s = requests.Session()
s.auth = EdgeGridAuth(
# This is akamai credential
client_token=config.get(env, 'ccu_api_client_token'),
client_secret=config.get(env, 'ccu_api_client_secret'),
access_token=config.get(env, 'ccu_api_access_token'))
return s
def akamai_purge_status(env, config, purge_url, purge_id):
purge_base_url = config.get(env, 'ccu_api_base_url')
purge_status_url = ('{0}ccu/v2/purges/{1}'
.format(purge_base_url, purge_id))
print ("Purge URL: " + purge_url)
print ("Purge ID: " + purge_id)
data = {
'objects': [purge_url]
}
s = edge_session(env, config)
response = s.get(
purge_status_url,
data=json.dumps(data),
headers={'Content-type': 'application/json', 'Accept': 'text/plain'})
print("Status: {0}".format(response.status_code))
pprint.pprint(response.headers)
pprint.pprint(response.json())
if __name__ == "__main__":
main(sys.argv)
|
apache-2.0
|
jazkarta/edx-platform
|
common/djangoapps/django_locale/middleware.py
|
81
|
3736
|
# TODO: This file is imported from the stable Django 1.8 branch. Remove this file
# and re-import this middleware from Django once the codebase is upgraded. [PLAT-671]
# pylint: disable=invalid-name, missing-docstring
"This is the locale selecting middleware that will look at accept headers"
from django.conf import settings
from django.core.urlresolvers import (
LocaleRegexURLResolver, get_resolver, get_script_prefix, is_valid_path,
)
from django.http import HttpResponseRedirect
from django.utils import translation
from django.utils.cache import patch_vary_headers
# Override the Django 1.4 implementation with the 1.8 implementation
from django_locale.trans_real import get_language_from_request
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
response_redirect_class = HttpResponseRedirect
def __init__(self):
self._is_language_prefix_patterns_used = False
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
self._is_language_prefix_patterns_used = True
break
def process_request(self, request):
check_path = self.is_language_prefix_patterns_used()
# This call is broken in Django 1.4:
# https://github.com/django/django/blob/stable/1.4.x/django/utils/translation/trans_real.py#L399
# (we override parse_accept_lang_header to a fixed version in dark_lang.middleware)
language = get_language_from_request(
request, check_path=check_path)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
if (response.status_code == 404 and not language_from_path
and self.is_language_prefix_patterns_used()):
urlconf = getattr(request, 'urlconf', None)
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
if (not path_valid and settings.APPEND_SLASH
and not language_path.endswith('/')):
path_valid = is_valid_path("%s/" % language_path, urlconf)
if path_valid:
script_prefix = get_script_prefix()
language_url = "%s://%s%s" % (
request.scheme,
request.get_host(),
# insert language after the script prefix and before the
# rest of the URL
request.get_full_path().replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
)
return self.response_redirect_class(language_url)
if not (self.is_language_prefix_patterns_used()
and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
def is_language_prefix_patterns_used(self):
"""
Returns `True` if the `LocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
return self._is_language_prefix_patterns_used
|
agpl-3.0
|
micbou/ycmd
|
ycmd/request_validation.py
|
5
|
2598
|
# Copyright (C) 2014 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from ycmd.responses import ServerError
# Throws an exception if request doesn't have all the required fields.
# TODO: Accept a request_type param so that we can also verify missing
# command_arguments and completer_target fields if necessary.
def EnsureRequestValid( request_json ):
required_fields = { 'line_num', 'column_num', 'filepath', 'file_data' }
missing = { x for x in required_fields if x not in request_json }
if 'filepath' not in missing and 'file_data' not in missing:
missing.update( _MissingFieldsForFileData( request_json ) )
if not missing:
return True
message = '\n'.join( _FieldMissingMessage( field ) for field in missing )
raise ServerError( message )
def _FieldMissingMessage( field ):
return 'Request missing required field: {0}'.format( field )
def _FilepathInFileDataSpec( request_json ):
return 'file_data["{0}"]'.format( request_json[ 'filepath' ] )
def _SingleFileDataFieldSpec( request_json, field ):
return '{0}["{1}"]'.format( _FilepathInFileDataSpec( request_json ), field )
def _MissingFieldsForFileData( request_json ):
missing = set()
data_for_file = request_json[ 'file_data' ].get( request_json[ 'filepath' ] )
if data_for_file:
required_data = [ 'filetypes', 'contents' ]
for required in required_data:
if required not in data_for_file:
missing.add( _SingleFileDataFieldSpec( request_json, required ) )
filetypes = data_for_file.get( 'filetypes', [] )
if not filetypes:
missing.add( '{0}[0]'.format(
_SingleFileDataFieldSpec( request_json, 'filetypes' ) ) )
else:
missing.add( _FilepathInFileDataSpec( request_json ) )
return missing
|
gpl-3.0
|
alphacsc/alphacsc
|
examples/csc/plot_lfp_data.py
|
1
|
3791
|
"""
==============================
CSC to learn LFP spiking atoms
==============================
Here, we show how CSC can be used to learn spiking
atoms from Local Field Potential (LFP) data [1].
[1] Hitziger, Sebastian, et al.
Adaptive Waveform Learning: A Framework for Modeling Variability in
Neurophysiological Signals. IEEE Transactions on Signal Processing (2017).
"""
###############################################################################
# First, let us fetch the data (~14 MB)
import os
from mne.utils import _fetch_file
url = ('https://github.com/hitziger/AWL/raw/master/Experiments/data/'
'LFP_data_contiguous_1250_Hz.mat')
fname = './LFP_data_contiguous_1250_Hz.mat'
if not os.path.exists(fname):
_fetch_file(url, fname)
###############################################################################
# It is a mat file, so we use scipy to load it
from scipy import io
data = io.loadmat(fname)
X, sfreq = data['X'].T, float(data['sfreq'])
###############################################################################
# And now let us look at the data
import numpy as np
import matplotlib.pyplot as plt
start, stop = 11000, 15000
times = np.arange(start, stop) / sfreq
plt.plot(times, X[0, start:stop], color='b')
plt.xlabel('Time (s)')
plt.ylabel(r'$\mu$ V')
plt.xlim([9., 12.])
###############################################################################
# and filter it using a convenient function from MNE. This will remove low
# frequency drifts, but we keep the high frequencies
from mne.filter import filter_data
X = filter_data(
X.astype(np.float64), sfreq, l_freq=1, h_freq=None, fir_design='firwin')
###############################################################################
# Now, we define the parameters of our model.
reg = 6.0
n_times = 2500
n_times_atom = 350
n_trials = 100
n_atoms = 3
n_iter = 60
###############################################################################
# Let's stick to one random state for now, but if you want to learn how to
# select the random state, consult :ref:`this example
# <sphx_glr_auto_examples_plot_simulate_randomstate.py>`.
random_state = 10
###############################################################################
# Now, we epoch the trials
overlap = 0
starts = np.arange(0, X.shape[1] - n_times, n_times - overlap)
stops = np.arange(n_times, X.shape[1], n_times - overlap)
X_new = []
for idx, (start, stop) in enumerate(zip(starts, stops)):
if idx >= n_trials:
break
X_new.append(X[0, start:stop])
X_new = np.vstack(X_new)
del X
###############################################################################
# We remove the mean and scale to unit variance.
X_new -= np.mean(X_new)
X_new /= np.std(X_new)
###############################################################################
# The convolutions can result in edge artifacts at the edges of the trials.
# Therefore, we discount the contributions from the edges by windowing the
# trials.
from numpy import hamming
X_new *= hamming(n_times)[None, :]
###############################################################################
# Of course, in a data-limited setting we want to use as much of the data as
# possible. If this is the case, you can set `overlap` to non-zero (for example
# half the epoch length).
#
# Now, we run regular CSC since the trials are not too noisy
from alphacsc import learn_d_z
pobj, times, d_hat, z_hat, reg = learn_d_z(X_new, n_atoms, n_times_atom,
reg=reg, n_iter=n_iter,
random_state=random_state, n_jobs=1)
###############################################################################
# Let's look at the atoms now.
plt.figure()
plt.plot(d_hat.T)
plt.show()
|
bsd-3-clause
|
TNT-Samuel/Coding-Projects
|
DNS Server/Source/Lib/_pyio.py
|
5
|
91130
|
"""
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
import stat
import sys
# Import _thread instead of threading to reduce startup cost
from _thread import allocate_lock as Lock
if sys.platform in {'win32', 'cygwin'}:
from msvcrt import setmode as _setmode
else:
_setmode = None
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't want
# to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise OSError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (deprecated)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
'U' mode is deprecated and will raise an exception in future versions
of Python. It has no effect in Python 3. Use newline to control
universal newlines mode.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
The newly created file is non-inheritable.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, int):
file = os.fspath(file)
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("axrwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
creating = "x" in modes
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if creating or writing or appending or updating:
raise ValueError("mode U cannot be combined with 'x', 'w', 'a', or '+'")
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (creating or reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(creating and "x" or "") +
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd, opener=opener)
result = raw
try:
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (OSError, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return result
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif creating or writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
result = buffer
if binary:
return result
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
result = text
text.mode = mode
return result
except:
result.close()
raise
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pylifecycle.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(OSError, ValueError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. Other bytes-like objects are accepted as method arguments too. In
some cases (such as readinto), a writable object is required. Text I/O
classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise OSError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an OSError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Some operating systems / file systems could provide additional values.
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise OSError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise OSError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise OSError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise a ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An OSError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, size=-1):
r"""Read and return a line of bytes from the stream.
If size is specified, at most size bytes will be read.
Size should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if size >= 0:
n = min(n, size)
return n
else:
def nreadahead():
return 1
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
res = bytearray()
while size < 0 or len(res) < size:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, size=-1):
"""Read and return up to size bytes, where size is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if size is None:
size = -1
if size < 0:
return self.readall()
b = bytearray(size.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than the
length of b in bytes.
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, size=-1):
"""Read and return up to size bytes, where size is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, size=-1):
"""Read up to size bytes with at most one read() system call,
where size is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=False)
def readinto1(self, b):
"""Read bytes into buffer *b*, using at most one system call
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=True)
def _readinto(self, b, read1):
if not isinstance(b, memoryview):
b = memoryview(b)
b = b.cast('B')
if read1:
data = self.read1(len(b))
else:
data = self.read(len(b))
n = len(data)
b[:n] = data
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is always the length of b
in bytes.
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise OSError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise OSError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush on closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError("can not serialize a '{0}' object"
.format(self.__class__.__name__))
def __repr__(self):
modname = self.__class__.__module__
clsname = self.__class__.__qualname__
try:
name = self.name
except Exception:
return "<{}.{}>".format(modname, clsname)
else:
return "<{}.{} name={!r}>".format(modname, clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
if self.closed:
raise ValueError("getbuffer on closed file")
return memoryview(self._buffer)
def close(self):
self._buffer.clear()
super().close()
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
if size < 0:
size = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + size)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, size=-1):
"""This is the same as read.
"""
return self.read(size)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with memoryview(b) as view:
n = view.nbytes # Size of any bytes-like object
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos_index = pos.__index__
except AttributeError:
raise TypeError(f"{pos!r} is not an integer")
else:
pos = pos_index()
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos_index = pos.__index__
except AttributeError:
raise TypeError(f"{pos!r} is not an integer")
else:
pos = pos_index()
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise OSError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def readable(self):
return self.raw.readable()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, size=None):
"""Read size bytes.
Returns exactly size bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If size is negative, read until EOF or until read() would
block.
"""
if size is not None and size < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(size)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
if hasattr(self.raw, 'readall'):
chunk = self.raw.readall()
if chunk is None:
return buf[pos:] or None
else:
return buf[pos:] + chunk
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
chunk = self.raw.read()
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
chunk = self.raw.read(wanted)
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more than avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, size=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(size)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
current = self.raw.read(to_read)
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, size=-1):
"""Reads up to size bytes, with at most one read() system call."""
# Returns up to size bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if size < 0:
size = self.buffer_size
if size == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(size, len(self._read_buf) - self._read_pos))
# Implementing readinto() and readinto1() is not strictly necessary (we
# could rely on the base class that provides an implementation in terms of
# read() and read1()). We do it anyway to keep the _pyio implementation
# similar to the io implementation (which implements the methods for
# performance reasons).
def _readinto(self, buf, read1):
"""Read data into *buf* with at most one system call."""
# Need to create a memoryview object of type 'b', otherwise
# we may not be able to assign bytes to it, and slicing it
# would create a new object.
if not isinstance(buf, memoryview):
buf = memoryview(buf)
if buf.nbytes == 0:
return 0
buf = buf.cast('B')
written = 0
with self._read_lock:
while written < len(buf):
# First try to read from internal buffer
avail = min(len(self._read_buf) - self._read_pos, len(buf))
if avail:
buf[written:written+avail] = \
self._read_buf[self._read_pos:self._read_pos+avail]
self._read_pos += avail
written += avail
if written == len(buf):
break
# If remaining space in callers buffer is larger than
# internal buffer, read directly into callers buffer
if len(buf) - written > self.buffer_size:
n = self.raw.readinto(buf[written:])
if not n:
break # eof
written += n
# Otherwise refill internal buffer - unless we're
# in read1 mode and already got some data
elif not (read1 and written):
if not self._peek_unlocked(1):
break # eof
# In readinto1 mode, return as soon as we have some data
if read1 and written:
break
return written
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
if not raw.writable():
raise OSError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def writable(self):
return self.raw.writable()
def write(self, b):
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
if self.closed:
raise ValueError("write to closed file")
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush on closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise OSError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
def close(self):
with self._write_lock:
if self.raw is None or self.closed:
return
# We have to release the lock and call self.flush() (which will
# probably just re-take the lock) in case flush has been overridden in
# a subclass or the user set self.flush to something. This is the same
# behavior as the C implementation.
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
with self._write_lock:
self.raw.close()
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE):
"""Constructor.
The arguments are two RawIO instances.
"""
if not reader.readable():
raise OSError('"reader" argument must be readable.')
if not writer.writable():
raise OSError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, size=-1):
if size is None:
size = -1
return self.reader.read(size)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, size=0):
return self.reader.peek(size)
def read1(self, size=-1):
return self.reader.read1(size)
def readinto1(self, b):
return self.reader.readinto1(b)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
try:
self.writer.close()
finally:
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise OSError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, size=None):
if size is None:
size = -1
self.flush()
return BufferedReader.read(self, size)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, size=0):
self.flush()
return BufferedReader.peek(self, size)
def read1(self, size=-1):
self.flush()
return BufferedReader.read1(self, size)
def readinto1(self, b):
self.flush()
return BufferedReader.readinto1(self, b)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class FileIO(RawIOBase):
_fd = -1
_created = False
_readable = False
_writable = False
_appending = False
_seekable = None
_closefd = True
def __init__(self, file, mode='r', closefd=True, opener=None):
"""Open a file. The mode can be 'r' (default), 'w', 'x' or 'a' for reading,
writing, exclusive creation or appending. The file will be created if it
doesn't exist when opened for writing or appending; it will be truncated
when opened for writing. A FileExistsError will be raised if it already
exists when opened for creating. Opening a file for creating implies
writing so this mode behaves in a similar way to 'w'. Add a '+' to the mode
to allow simultaneous reading and writing. A custom opener can be used by
passing a callable as *opener*. The underlying file descriptor for the file
object is then obtained by calling opener with (*name*, *flags*).
*opener* must return an open file descriptor (passing os.open as *opener*
results in functionality similar to passing None).
"""
if self._fd >= 0:
# Have to close the existing file first.
try:
if self._closefd:
os.close(self._fd)
finally:
self._fd = -1
if isinstance(file, float):
raise TypeError('integer argument expected, got float')
if isinstance(file, int):
fd = file
if fd < 0:
raise ValueError('negative file descriptor')
else:
fd = -1
if not isinstance(mode, str):
raise TypeError('invalid mode: %s' % (mode,))
if not set(mode) <= set('xrwab+'):
raise ValueError('invalid mode: %s' % (mode,))
if sum(c in 'rwax' for c in mode) != 1 or mode.count('+') > 1:
raise ValueError('Must have exactly one of create/read/write/append '
'mode and at most one plus')
if 'x' in mode:
self._created = True
self._writable = True
flags = os.O_EXCL | os.O_CREAT
elif 'r' in mode:
self._readable = True
flags = 0
elif 'w' in mode:
self._writable = True
flags = os.O_CREAT | os.O_TRUNC
elif 'a' in mode:
self._writable = True
self._appending = True
flags = os.O_APPEND | os.O_CREAT
if '+' in mode:
self._readable = True
self._writable = True
if self._readable and self._writable:
flags |= os.O_RDWR
elif self._readable:
flags |= os.O_RDONLY
else:
flags |= os.O_WRONLY
flags |= getattr(os, 'O_BINARY', 0)
noinherit_flag = (getattr(os, 'O_NOINHERIT', 0) or
getattr(os, 'O_CLOEXEC', 0))
flags |= noinherit_flag
owned_fd = None
try:
if fd < 0:
if not closefd:
raise ValueError('Cannot use closefd=False with file name')
if opener is None:
fd = os.open(file, flags, 0o666)
else:
fd = opener(file, flags)
if not isinstance(fd, int):
raise TypeError('expected integer from opener')
if fd < 0:
raise OSError('Negative file descriptor')
owned_fd = fd
if not noinherit_flag:
os.set_inheritable(fd, False)
self._closefd = closefd
fdfstat = os.fstat(fd)
try:
if stat.S_ISDIR(fdfstat.st_mode):
raise IsADirectoryError(errno.EISDIR,
os.strerror(errno.EISDIR), file)
except AttributeError:
# Ignore the AttribueError if stat.S_ISDIR or errno.EISDIR
# don't exist.
pass
self._blksize = getattr(fdfstat, 'st_blksize', 0)
if self._blksize <= 1:
self._blksize = DEFAULT_BUFFER_SIZE
if _setmode:
# don't translate newlines (\r\n <=> \n)
_setmode(fd, os.O_BINARY)
self.name = file
if self._appending:
# For consistent behaviour, we explicitly seek to the
# end of file (otherwise, it might be done only on the
# first write()).
os.lseek(fd, 0, SEEK_END)
except:
if owned_fd is not None:
os.close(owned_fd)
raise
self._fd = fd
def __del__(self):
if self._fd >= 0 and self._closefd and not self.closed:
import warnings
warnings.warn('unclosed file %r' % (self,), ResourceWarning,
stacklevel=2, source=self)
self.close()
def __getstate__(self):
raise TypeError("cannot serialize '%s' object", self.__class__.__name__)
def __repr__(self):
class_name = '%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)
if self.closed:
return '<%s [closed]>' % class_name
try:
name = self.name
except AttributeError:
return ('<%s fd=%d mode=%r closefd=%r>' %
(class_name, self._fd, self.mode, self._closefd))
else:
return ('<%s name=%r mode=%r closefd=%r>' %
(class_name, name, self.mode, self._closefd))
def _checkReadable(self):
if not self._readable:
raise UnsupportedOperation('File not open for reading')
def _checkWritable(self, msg=None):
if not self._writable:
raise UnsupportedOperation('File not open for writing')
def read(self, size=None):
"""Read at most size bytes, returned as bytes.
Only makes one system call, so less data may be returned than requested
In non-blocking mode, returns None if no data is available.
Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
if size is None or size < 0:
return self.readall()
try:
return os.read(self._fd, size)
except BlockingIOError:
return None
def readall(self):
"""Read all data from the file, returned as bytes.
In non-blocking mode, returns as much as is immediately available,
or None if no data is available. Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
bufsize = DEFAULT_BUFFER_SIZE
try:
pos = os.lseek(self._fd, 0, SEEK_CUR)
end = os.fstat(self._fd).st_size
if end >= pos:
bufsize = end - pos + 1
except OSError:
pass
result = bytearray()
while True:
if len(result) >= bufsize:
bufsize = len(result)
bufsize += max(bufsize, DEFAULT_BUFFER_SIZE)
n = bufsize - len(result)
try:
chunk = os.read(self._fd, n)
except BlockingIOError:
if result:
break
return None
if not chunk: # reached the end of the file
break
result += chunk
return bytes(result)
def readinto(self, b):
"""Same as RawIOBase.readinto()."""
m = memoryview(b).cast('B')
data = self.read(len(m))
n = len(data)
m[:n] = data
return n
def write(self, b):
"""Write bytes b to file, return number written.
Only makes one system call, so not all of the data may be written.
The number of bytes actually written is returned. In non-blocking mode,
returns None if the write would block.
"""
self._checkClosed()
self._checkWritable()
try:
return os.write(self._fd, b)
except BlockingIOError:
return None
def seek(self, pos, whence=SEEK_SET):
"""Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values
are SEEK_CUR or 1 (move relative to current position, positive or negative),
and SEEK_END or 2 (move relative to end of file, usually negative, although
many platforms allow seeking beyond the end of a file).
Note that not all file objects are seekable.
"""
if isinstance(pos, float):
raise TypeError('an integer is required')
self._checkClosed()
return os.lseek(self._fd, pos, whence)
def tell(self):
"""tell() -> int. Current file position.
Can raise OSError for non seekable files."""
self._checkClosed()
return os.lseek(self._fd, 0, SEEK_CUR)
def truncate(self, size=None):
"""Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().
The current file position is changed to the value of size.
"""
self._checkClosed()
self._checkWritable()
if size is None:
size = self.tell()
os.ftruncate(self._fd, size)
return size
def close(self):
"""Close the file.
A closed file cannot be used for further I/O operations. close() may be
called more than once without error.
"""
if not self.closed:
try:
if self._closefd:
os.close(self._fd)
finally:
super().close()
def seekable(self):
"""True if file supports random-access."""
self._checkClosed()
if self._seekable is None:
try:
self.tell()
except OSError:
self._seekable = False
else:
self._seekable = True
return self._seekable
def readable(self):
"""True if file was opened in a read mode."""
self._checkClosed()
return self._readable
def writable(self):
"""True if file was opened in a write mode."""
self._checkClosed()
return self._writable
def fileno(self):
"""Return the underlying file descriptor (an integer)."""
self._checkClosed()
return self._fd
def isatty(self):
"""True if the file is connected to a TTY device."""
self._checkClosed()
return os.isatty(self._fd)
@property
def closefd(self):
"""True if the file descriptor will be closed by close()."""
return self._closefd
@property
def mode(self):
"""String giving the file mode"""
if self._created:
if self._readable:
return 'xb+'
else:
return 'xb'
elif self._appending:
if self._readable:
return 'ab+'
else:
return 'ab'
elif self._readable:
if self._writable:
return 'rb+'
else:
return 'rb'
else:
return 'wb'
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, size=-1):
"""Read at most size characters from stream, where size is an int.
Read from underlying buffer until we have size characters or we hit EOF.
If size is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
# The write_through argument has no effect here since this
# implementation always writes through. The argument is present only
# so that the signature can match the signature of the C version.
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
self._check_newline(newline)
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding(False)
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if not codecs.lookup(encoding)._is_text_encoding:
msg = ("%r is not a text encoding; "
"use codecs.open() to handle arbitrary codecs")
raise LookupError(msg % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
self._configure(encoding, errors, newline,
line_buffering, write_through)
def _check_newline(self, newline):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
def _configure(self, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
self._encoding = encoding
self._errors = errors
self._encoder = None
self._decoder = None
self._b2cratio = 0.0
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._line_buffering = line_buffering
self._write_through = write_through
# don't write a BOM in the middle of a file
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<{}.{}".format(self.__class__.__module__,
self.__class__.__qualname__)
try:
name = self.name
except Exception:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except Exception:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def write_through(self):
return self._write_through
@property
def buffer(self):
return self._buffer
def reconfigure(self, *,
encoding=None, errors=None, newline=Ellipsis,
line_buffering=None, write_through=None):
"""Reconfigure the text stream with new parameters.
This also flushes the stream.
"""
if (self._decoder is not None
and (encoding is not None or errors is not None
or newline is not Ellipsis)):
raise UnsupportedOperation(
"It is not possible to set the encoding or newline of stream "
"after the first read")
if errors is None:
if encoding is None:
errors = self._errors
else:
errors = 'strict'
elif not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
if encoding is None:
encoding = self._encoding
else:
if not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if newline is Ellipsis:
newline = self._readnl
self._check_newline(newline)
if line_buffering is None:
line_buffering = self.line_buffering
if write_through is None:
write_through = self.write_through
self.flush()
self._configure(encoding, errors, newline,
line_buffering, write_through)
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
decoded_chars = self._decoder.decode(input_chunk, eof)
self._set_decoded_chars(decoded_chars)
if decoded_chars:
self._b2cratio = len(input_chunk) / len(self._decoded_chars)
else:
self._b2cratio = 0.0
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise OSError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Fast search for an acceptable start point, close to our
# current pos.
# Rationale: calling decoder.decode() has a large overhead
# regardless of chunk size; we want the number of such calls to
# be O(1) in most situations (common decoders, non-crazy input).
# Actually, it will be exactly 1 for fixed-size codecs (all
# 8-bit codecs, also UTF-16 and UTF-32).
skip_bytes = int(self._b2cratio * chars_to_skip)
skip_back = 1
assert skip_bytes <= len(next_input)
while skip_bytes > 0:
decoder.setstate((b'', dec_flags))
# Decode up to temptative start point
n = len(decoder.decode(next_input[:skip_bytes]))
if n <= chars_to_skip:
b, d = decoder.getstate()
if not b:
# Before pos and no bytes buffered in decoder => OK
dec_flags = d
chars_to_skip -= n
break
# Skip back by buffered amount and reset heuristic
skip_bytes -= len(b)
skip_back = 1
else:
# We're too far ahead, skip back a bit
skip_bytes -= skip_back
skip_back = skip_back * 2
else:
skip_bytes = 0
decoder.setstate((b'', dec_flags))
# Note our initial start point.
start_pos = position + skip_bytes
start_flags = dec_flags
if chars_to_skip == 0:
# We haven't moved from the start point.
return self._pack_cookie(start_pos, start_flags)
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
bytes_fed = 0
need_eof = 0
# Chars decoded since `start_pos`
chars_decoded = 0
for i in range(skip_bytes, len(next_input)):
bytes_fed += 1
chars_decoded += len(decoder.decode(next_input[i:i+1]))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise OSError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
def _reset_encoder(position):
"""Reset the encoder (merely useful for proper BOM handling)"""
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if position != 0:
encoder.setstate(0)
else:
encoder.reset()
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
_reset_encoder(position)
return position
if whence != 0:
raise ValueError("unsupported whence (%r)" % (whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise OSError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
_reset_encoder(cookie)
return cookie
def read(self, size=None):
self._checkReadable()
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
decoder = self._decoder or self._get_decoder()
if size < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have size characters to return.
eof = False
result = self._get_decoded_chars(size)
while len(result) < size and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(size - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, size=None):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if size >= 0 and len(line) >= size:
endpos = size # reached length size
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if size >= 0 and endpos > size:
endpos = size # don't exceed size
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="surrogatepass",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
decoder = self._decoder or self._get_decoder()
old_state = decoder.getstate()
decoder.reset()
try:
return decoder.decode(self.buffer.getvalue(), final=True)
finally:
decoder.setstate(old_state)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's an implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
|
gpl-3.0
|
b0ttl3z/SickRage
|
lib/imdb/parser/http/searchPersonParser.py
|
76
|
3764
|
"""
parser.http.searchPersonParser module (imdb package).
This module provides the HTMLSearchPersonParser class (and the
search_person_parser instance), used to parse the results of a search
for a given person.
E.g., when searching for the name "Mel Gibson", the parsed page would be:
http://akas.imdb.com/find?q=Mel+Gibson&nm=on&mx=20
Copyright 2004-2013 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from imdb.utils import analyze_name, build_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
def _cleanName(n):
"""Clean the name in a title tag."""
if not n:
return u''
n = n.replace('Filmography by type for', '') # FIXME: temporary.
return n
class DOMBasicPersonParser(DOMBasicMovieParser):
"""Simply get the name of a person and the imdbID.
It's used by the DOMHTMLSearchPersonParser class to return a result
for a direct match (when a search on IMDb results in a single
person, the web server sends directly the movie page."""
_titleFunct = lambda self, x: analyze_name(_cleanName(x), canonical=1)
_reAKASp = re.compile(r'(?:aka|birth name) (<em>")(.*?)"(<br>|<\/em>|<\/td>)',
re.I | re.M)
class DOMHTMLSearchPersonParser(DOMHTMLSearchMovieParser):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, for persons."""
_BaseParser = DOMBasicPersonParser
_notDirectHitTitle = '<title>find - imdb'
_titleBuilder = lambda self, x: build_name(x, canonical=True)
_linkPrefix = '/name/nm'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()",
'index': "./text()[1]",
'akas': ".//div[@class='_imdbpyAKA']/text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
analyze_name((x.get('name') or u'') + \
(x.get('index') or u''),
canonical=1), x.get('akas')
))]
extractors = [Extractor(label='search',
path="//td[@class='result_text']/a[starts-with(@href, '/name/nm')]/..",
attrs=_attrs)]
def preprocess_string(self, html_string):
if self._notDirectHitTitle in html_string[:10240].lower():
html_string = _reAKASp.sub(
r'\1<div class="_imdbpyAKA">\2::</div>\3',
html_string)
return DOMHTMLSearchMovieParser.preprocess_string(self, html_string)
_OBJECTS = {
'search_person_parser': ((DOMHTMLSearchPersonParser,),
{'kind': 'person', '_basic_parser': DOMBasicPersonParser})
}
|
gpl-3.0
|
israeleriston/scientific-week
|
backend/venv/lib/python3.5/site-packages/sqlalchemy/orm/interfaces.py
|
27
|
22073
|
# orm/interfaces.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines some key base classes prominent within the internals,
as well as the now-deprecated ORM extension classes.
Other than the deprecated extensions, this module and the
classes within are mostly private, though some attributes
are exposed when inspecting mappings.
"""
from __future__ import absolute_import
from .. import util
from ..sql import operators
from .base import (ONETOMANY, MANYTOONE, MANYTOMANY,
EXT_CONTINUE, EXT_STOP, NOT_EXTENSION)
from .base import (InspectionAttr, InspectionAttr,
InspectionAttrInfo, _MappedAttribute)
import collections
from .. import inspect
from . import path_registry
# imported later
MapperExtension = SessionExtension = AttributeExtension = None
__all__ = (
'AttributeExtension',
'EXT_CONTINUE',
'EXT_STOP',
'ONETOMANY',
'MANYTOMANY',
'MANYTOONE',
'NOT_EXTENSION',
'LoaderStrategy',
'MapperExtension',
'MapperOption',
'MapperProperty',
'PropComparator',
'SessionExtension',
'StrategizedProperty',
)
class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots):
"""Represent a particular class attribute mapped by :class:`.Mapper`.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
__slots__ = (
'_configure_started', '_configure_finished', 'parent', 'key',
'info'
)
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
The collection typically only applies to a RelationshipProperty.
"""
is_property = True
"""Part of the InspectionAttr interface; states this object is a
mapper property.
"""
def _memoized_attr_info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`.relationship`, or :func:`.composite`
functions.
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
def setup(self, context, entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
def create_row_processor(self, context, path,
mapper, result, adapter, populators):
"""Produce row processing functions and append to the given
set of populators lists.
"""
def cascade_iterator(self, type_, state, visited_instances=None,
halt_on=None):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
This method typically only applies to RelationshipProperty.
"""
return iter(())
def set_parent(self, parent, init):
"""Set the parent mapper that references this MapperProperty.
This method is overridden by some subclasses to perform extra
setup when the mapper is first known.
"""
self.parent = parent
def instrument_class(self, mapper):
"""Hook called by the Mapper to the property to initiate
instrumentation of the class attribute managed by this
MapperProperty.
The MapperProperty here will typically call out to the
attributes module to set up an InstrumentedAttribute.
This step is the first of two steps to set up an InstrumentedAttribute,
and is called early in the mapper setup process.
The second step is typically the init_class_attribute step,
called from StrategizedProperty via the post_instrument_class()
hook. This step assigns additional state to the InstrumentedAttribute
(specifically the "impl") which has been determined after the
MapperProperty has determined what kind of persistence
management it needs to do (e.g. scalar, object, collection, etc).
"""
def __init__(self):
self._configure_started = False
self._configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
:class:`.MapperProperty`.
This is basically a ``getattr()`` call::
return getattr(self.parent.class_, self.key)
I.e. if this :class:`.MapperProperty` were named ``addresses``,
and the class to which it is mapped is ``User``, this sequence
is possible::
>>> from sqlalchemy import inspect
>>> mapper = inspect(User)
>>> addresses_property = mapper.attrs.addresses
>>> addresses_property.class_attribute is User.addresses
True
>>> User.addresses.property is addresses_property
True
"""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
The given Mapper is the Mapper invoking the operation, which
may not be the same Mapper as self.parent in an inheritance
scenario; however, Mapper will always at least be a sub-mapper of
self.parent.
This method is typically used by StrategizedProperty, which delegates
it to LoaderStrategy.init_class_attribute() to perform final setup
on the class-bound InstrumentedAttribute.
"""
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive, _resolve_conflict_map):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object.
"""
def __repr__(self):
return '<%s at 0x%x; %s>' % (
self.__class__.__name__,
id(self), getattr(self, 'key', 'no key'))
class PropComparator(operators.ColumnOperators):
r"""Defines SQL operators for :class:`.MapperProperty` objects.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
.. note:: With the advent of Hybrid properties introduced in SQLAlchemy
0.7, as well as Core-level operator redefinition in
SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
instances is extremely rare. See :ref:`hybrids_toplevel` as well
as :ref:`types_operators`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \
ColumnProperty,\
CompositeProperty,\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
See also:
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = 'prop', 'property', '_parententity', '_adapt_to_entity'
def __init__(self, prop, parentmapper, adapt_to_entity=None):
self.prop = self.property = prop
self._parententity = adapt_to_entity or parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def _query_clause_element(self):
return self.__clause_element__()
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parententity, adapt_to_entity)
@property
def _parentmapper(self):
"""legacy; this is renamed to _parententity to be
compatible with QueryableAttribute."""
return inspect(self._parententity).mapper
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
r"""Redefine this object in terms of a polymorphic subclass.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
"""
return self.operate(PropComparator.of_type_op, class_)
def any(self, criterion=None, **kwargs):
r"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
r"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
The mechanics of StrategizedProperty are used for every Query
invocation for every mapped attribute participating in that Query,
to determine first how the attribute will be rendered in SQL
and secondly how the attribute will retrieve a value from a result
row and apply it to a mapped object. The routines here are very
performance-critical.
"""
__slots__ = (
'_strategies', 'strategy',
'_wildcard_token', '_default_path_loader_key'
)
strategy_wildcard_key = None
def _memoized_attr__wildcard_token(self):
return ("%s:%s" % (
self.strategy_wildcard_key, path_registry._WILDCARD_TOKEN), )
def _memoized_attr__default_path_loader_key(self):
return (
"loader",
("%s:%s" % (
self.strategy_wildcard_key, path_registry._DEFAULT_TOKEN), )
)
def _get_context_loader(self, context, path):
load = None
# use EntityRegistry.__getitem__()->PropRegistry here so
# that the path is stated in terms of our base
search_path = dict.__getitem__(path, self)
# search among: exact match, "attr.*", "default" strategy
# if any.
for path_key in (
search_path._loader_key,
search_path._wildcard_path_loader_key,
search_path._default_path_loader_key
):
if path_key in context.attributes:
load = context.attributes[path_key]
break
return load
def _get_strategy(self, key):
try:
return self._strategies[key]
except KeyError:
cls = self._strategy_lookup(*key)
self._strategies[key] = self._strategies[
cls] = strategy = cls(self, key)
return strategy
def setup(
self, context, entity, path, adapter, **kwargs):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.setup_query(context, entity, path, loader, adapter, **kwargs)
def create_row_processor(
self, context, path, mapper,
result, adapter, populators):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.create_row_processor(
context, path, loader,
mapper, result, adapter, populators)
def do_init(self):
self._strategies = {}
self.strategy = self._get_strategy(self.strategy_key)
def post_instrument_class(self, mapper):
if not self.parent.non_primary and \
not mapper.class_manager._attr_has_impl(self.key):
self.strategy.init_class_attribute(mapper)
_all_strategies = collections.defaultdict(dict)
@classmethod
def strategy_for(cls, **kw):
def decorate(dec_cls):
# ensure each subclass of the strategy has its
# own _strategy_keys collection
if '_strategy_keys' not in dec_cls.__dict__:
dec_cls._strategy_keys = []
key = tuple(sorted(kw.items()))
cls._all_strategies[cls][key] = dec_cls
dec_cls._strategy_keys.append(key)
return dec_cls
return decorate
@classmethod
def _strategy_lookup(cls, *key):
for prop_cls in cls.__mro__:
if prop_cls in cls._all_strategies:
strategies = cls._all_strategies[prop_cls]
try:
return strategies[key]
except KeyError:
pass
raise Exception("can't locate strategy for %s %s" % (cls, key))
class MapperOption(object):
"""Describe a modification to a Query."""
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def process_query(self, query):
"""Apply a modification to the given :class:`.Query`."""
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
This is typically used during a lazy load or scalar refresh
operation to propagate options stated in the original Query to the
new Query being used for the load. It occurs for those options that
specify propagate_to_loaders=True.
"""
self.process_query(query)
class LoaderStrategy(object):
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
For example, simple column attributes will add their represented
column to the list of selected columns, a joined eager loader
may establish join clauses to add to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
__slots__ = 'parent_property', 'is_class_level', 'parent', 'key', \
'strategy_key', 'strategy_opts'
def __init__(self, parent, strategy_key):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
self.strategy_key = strategy_key
self.strategy_opts = dict(strategy_key)
def init_class_attribute(self, mapper):
pass
def setup_query(self, context, entity, path, loadopt, adapter, **kwargs):
"""Establish column and other state for a given QueryContext.
This method fulfills the contract specified by MapperProperty.setup().
StrategizedProperty delegates its setup() method
directly to this method.
"""
def create_row_processor(self, context, path, loadopt, mapper,
result, adapter, populators):
"""Establish row processing functions for a given QueryContext.
This method fulfills the contract specified by
MapperProperty.create_row_processor().
StrategizedProperty delegates its create_row_processor() method
directly to this method.
"""
def __str__(self):
return str(self.parent_property)
|
mit
|
acmihal/Cinnamon
|
files/usr/lib/cinnamon-looking-glass/lookingglass_proxy.py
|
12
|
3941
|
from gi.repository import Gio, Gtk, GObject, Gdk, Pango, GLib
LG_DBUS_NAME = "org.Cinnamon.LookingGlass"
LG_DBUS_PATH = "/org/Cinnamon/LookingGlass"
class LookingGlassProxy:
def __init__ (self):
self._signals = []
self._statusChangeCallbacks = []
self._proxy = None
Gio.bus_watch_name(Gio.BusType.SESSION, LG_DBUS_NAME, Gio.BusNameWatcherFlags.NONE, self._onConnect, self._onDisconnect)
def addStatusChangeCallback(self, callback):
self._statusChangeCallbacks.append(callback)
def refreshStatus(self):
if self._proxy != None:
self._setStatus(True)
else:
self._setStatus(False)
def getIsReady(self):
return self._proxy != None
def connect(self, name, callback):
self._signals.append((name, callback))
def _onSignal(self, proxy, sender_name, signal_name, params):
for name, callback in self._signals:
if signal_name == name:
callback(*params)
def _setStatus(self, state):
for callback in self._statusChangeCallbacks:
callback(state)
def _onConnect(self, connection, name, owner):
if self._proxy:
return
self._initProxy()
def _onDisconnect(self, connection, name):
self._proxy = None
self._setStatus(False)
def _initProxy(self):
try:
self._proxy = Gio.DBusProxy.new_for_bus( Gio.BusType.SESSION, Gio.DBusProxyFlags.NONE, None,
LG_DBUS_NAME, LG_DBUS_PATH, LG_DBUS_NAME, None, self._onProxyReady, None)
except dbus.exceptions.DBusException as e:
print(e)
self._proxy = None
def _onProxyReady(self, object, result, data=None):
self._proxy = Gio.DBusProxy.new_for_bus_finish(result)
self._proxy.connect("g-signal", self._onSignal)
self._setStatus(True)
# Proxy Methods:
def Eval(self, code):
if self._proxy:
try:
self._proxy.Eval('(s)', code)
except:
pass
def GetResults(self):
if self._proxy:
try:
return self._proxy.GetResults('()')
except:
pass
return (False, "")
def AddResult(self, code):
if self._proxy:
try:
self._proxy.AddResult('(s)', code)
except:
pass
def GetErrorStack(self):
if self._proxy:
try:
return self._proxy.GetErrorStack('()')
except:
pass
return (False, "")
def GetMemoryInfo(self):
if self._proxy:
try:
return self._proxy.GetMemoryInfo('()')
except:
pass
return (False, 0, {})
def FullGc(self):
if self._proxy:
try:
self._proxy.FullGc('()')
except:
pass
def Inspect(self, code):
if self._proxy:
try:
return self._proxy.Inspect('(s)', code)
except:
pass
return (False, "")
def GetLatestWindowList(self):
if self._proxy:
try:
return self._proxy.GetLatestWindowList('()')
except:
pass
return (False, "")
def StartInspector(self):
if self._proxy:
try:
self._proxy.StartInspector('()')
except:
pass
def GetExtensionList(self):
if self._proxy:
try:
return self._proxy.GetExtensionList('()')
except:
pass
return (False, "")
def ReloadExtension(self, uuid):
if self._proxy:
try:
return self._proxy.ReloadExtension('(s)', uuid)
except:
pass
return (False, "")
|
gpl-2.0
|
AlessioCasco/gandi-dyndns
|
gandi-dyndns.py
|
1
|
9763
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gandi-dyndns
@author: AlessioCasco
"""
from bottle import route, run, request, response
from optparse import OptionParser
import logging as log
import xmlrpclib
import json
import sys
import re
gandi_fqdn_ip = {}
@route('/ping', method=['GET', 'POST'])
def ping():
'''Function for monitoring/ping'''
response.headers['Server'] = 'gandi-dyndns'
response.status = 200
return('I\'am alive!\n')
@route('/nic_update', method=['GET', 'POST'])
def gandi_dyndns():
'''Main function'''
response.headers['Server'] = 'gandi-dyndns'
# dictionary gandi_fqdn_ip, has fqdn:ip key:value from all the legit requests
global gandi_fqdn_ip
# dictionar ynew_fqdn_ip, has fqdn:ip key:value from the current request
new_fqdn_ip = {}
# define the action to perform into the gandi_api function
action = ''
try:
fqdn, new_ip, fqdn_match = fetch_parameters()
except TypeError:
response.status = 400
return
# create new dictionary with the info we got from the webserver
new_fqdn_ip[fqdn] = new_ip
# check if we need to fetch the ip from gandi
try:
if new_fqdn_ip[fqdn] != gandi_fqdn_ip[fqdn]:
log.debug('Received IP differs from the one saved on Gandi, will update it')
action = 'update'
gandi_fqdn_ip = gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action)
return
except KeyError:
log.debug('Do not know the current Gandi IP for fqdn %s, will fetch it' % fqdn)
try:
action = 'fetch'
gandi_fqdn_ip = gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action)
if new_fqdn_ip[fqdn] != gandi_fqdn_ip[fqdn]:
action = 'update'
gandi_fqdn_ip = gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action)
return
except ValueError:
response.status = 404
return
log.debug('Nothing to do, received IP is same as the one configured on gandi for %s' % fqdn)
return
def fetch_parameters():
'''Fetch parameters from the GET request'''
new_ip = ''
method = request.environ.get('REQUEST_METHOD')
# check for missing parameters
if not request.params.ip and not request.params.fqdn:
log.error('Received malformed request, both parameters (fqdn & ip) are missing. Got: \"%s\"' % request.url)
return
elif not request.params.ip:
new_ip = request.environ.get('REMOTE_ADDR')
log.debug('IP parameter is missing, will use client source one: %s' % new_ip)
elif not request.params.fqdn:
log.error('Received malformed request, fqdn parameter is missing. Got: \"%s\"' % request.url)
return
if not new_ip:
new_ip = request.params.ip
fqdn = request.params.fqdn
# check if parameters have correct informations
fqdn_match = re.match(r'^([a-zA-Z0-9][a-zA-Z0-9-]{1,61})\.([a-zA-Z0-9][a-zA-Z0-9-]{1,61}\.[a-zA-Z]{2,}$)', fqdn)
ip_match = re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', new_ip)
priv_ip_match = re.match(r'^(?:10|127|172\.(?:1[6-9]|2[0-9]|3[01])|192\.168)\..*', new_ip)
if not fqdn_match and not ip_match:
log.error('Received invalid values on both parameters. Got fqdn:\"%s\" & IP: %s' % (fqdn, new_ip))
return
elif not ip_match:
log.error('Received invalid ip value. Got %s' % new_ip)
return
elif priv_ip_match:
log.error('Received IP is not a public one. Got %s' % new_ip)
return
elif not fqdn_match:
log.error('Received invalid fqdn value. Got \"%s\"' % fqdn)
return
log.debug('Received %s request: fqdn:\"%s\" & IP: %s' % (method, fqdn, new_ip))
return fqdn, new_ip, fqdn_match
def gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action):
'''Funcion for managing the Gandi API'''
# define some variables about gandi
api = xmlrpclib.ServerProxy('https://rpc.gandi.net/xmlrpc/')
apikey = config['apikey']
hostname = (fqdn_match.group(1))
domain = (fqdn_match.group(2))
# check if the domain is managed by the apikey provided
if not (api.domain.list(apikey, {'~fqdn': domain})):
log.error('Apikey provided does not manage %s domain' % domain)
raise ValueError('Apikey provided does not manage %s domain' % domain)
# check available zones
zones = api.domain.zone.list(apikey)
for zone in zones:
if (zone['name']) == domain:
zone_id = zone['id']
log.debug('Zone id %s found, for domain %s' % (zone_id, domain))
break
else:
log.error('Could not find zone file called %s, you must have a zone having same name as the domain you want to manage' % domain)
raise ValueError('Could not find zone file called %s, you must have a zone having same name as the domain you want to manage' % domain)
# check if we have to fetch the gandi api
if action == 'fetch':
# check & retrieve informations from recods in zone
records = api.domain.zone.record.list(apikey, zone_id, 0)
for record in records:
if (record['name'] == hostname and record['type'].lower() == 'a'):
# add fqdn/ip to the gandi_fqdn_ip dictionary
gandi_fqdn_ip[fqdn] = record['value']
log.debug('DNS \'A\' record found for subdomain \'%s\' having value %s' % (hostname, gandi_fqdn_ip[fqdn]))
break
else:
log.error('Unable to find a DNS \'A\' record for subdomain \'%s\'' % hostname)
raise ValueError('Unable to find a DNS \'A\' record for subdomain \'%s\'' % hostname)
return gandi_fqdn_ip
# check if we have to update the the ip
elif action == 'update':
# create a new zone from the existing one
zone_version = api.domain.zone.version.new(apikey, zone_id)
log.debug('New zone created, new version: %s' % zone_version)
# delete the A record from the new version
api.domain.zone.record.delete(apikey, zone_id, zone_version, {"type": ["A"], "name": [hostname]})
log.debug('Deleted \'A\' record from new zone version %s' % zone_version)
# add the A record we want
new_record = api.domain.zone.record.add(apikey, zone_id, zone_version, {"type": "A", "name": hostname, "value": new_fqdn_ip[fqdn], "ttl": 300})
log.debug('New \'A\' record added as follow: %s' % new_record)
# active the new zone version
if api.domain.zone.version.set(apikey, zone_id, zone_version):
log.info('New IP %s for fqdn %s updated succesfully.' % (new_fqdn_ip[fqdn], fqdn))
else:
log.error('Unable to update IP %s for fqdn %s' % (new_fqdn_ip[fqdn], fqdn))
return
# update gandi_fqdn_ip with the value just saved in the new zone version
gandi_fqdn_ip[fqdn] = new_fqdn_ip[fqdn]
return gandi_fqdn_ip
def init_application():
def get_options():
'''Load options from the command line'''
default_config = "config.json"
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option(
"-c",
"--config",
dest="configfile",
default=default_config,
help='Config file relative or absolute path. Default is %s' % default_config)
(options, args) = parser.parse_args()
if options.configfile is not None:
options.configfile = options.configfile.strip(' ')
return options
def read_config_file(configfile):
'''Loads the config file from disk'''
try:
with open(configfile) as f:
config = validate_config(json.load(f))
return config
# catch if file doesn't exist
except IOError:
print('Config file %s not found' % configfile)
sys.exit(1)
# catch if json file is not formatted corectly
except ValueError:
print('Json file is not formatted properly')
sys.exit(1)
def validate_config(raw_config):
'''Checks the config file.'''
# check if required patameters are present inside the config
if 'port' not in raw_config or 'bind' not in raw_config or 'apikey' not in raw_config or 'logging' not in raw_config:
print('Config file has missing parameters')
sys.exit(1)
else:
return raw_config
def configure_logging(config):
'''Configure logging'''
if config['logging']['log_enable'] == "false":
log.disable('CRITICAL')
return
elif config['logging']['log_enable'] == "true":
try:
log.basicConfig(
format='%(asctime)-15s [%(levelname)s] %(message)s',
filename=config['logging']['log_file'],
level=config['logging']['log_level'])
except ValueError:
print('Log level is not set with a correct value, check the README.md for the full list')
sys.exit(1)
except IOError:
print('Unable to create the log file, check if gandi-dyndns has write permissions')
sys.exit(1)
return
else:
print('Bad congig file, log_enable is not set with a correct value, (true|false) are the two only options')
sys.exit(1)
options = get_options()
config = read_config_file(options.configfile)
configure_logging(config)
return config
if __name__ == "__main__":
config = init_application()
# init webserver
run(host=config["bind"], port=config["port"], quiet=True)
|
mit
|
wooyek/flask-social-blueprint
|
example/sqla/website/settings/base.py
|
5
|
2741
|
# coding=utf-8
# Created 2014 by Janusz Skonieczny
import logging
import os
SRC_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# ============================================================================
# a flask settings
# http://flask.pocoo.org/docs/config/#configuring-from-files
# ============================================================================
SECRET_KEY = '47e585de7f22984d5ee291c2f31412384bfc32d0'
FLASH_MESSAGES = True
# Flask-SQLAlchemy
# http://pythonhosted.org/Flask-SQLAlchemy/config.html
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(SRC_DIR, "db.sqlite")
SQLALCHEMY_ECHO = False # Doubles log statements, investigate
# Flask-Login
# https://flask-login.readthedocs.org/en/latest/#protecting-views
LOGIN_DISABLED = False
# Flask-Security
# http://pythonhosted.org/Flask-Security/configuration.html
SECURITY_PASSWORD_SALT = "abc"
# SECURITY_PASSWORD_HASH = "bcrypt" # requires py-bcrypt
# SECURITY_PASSWORD_HASH = "pbkdf2_sha512"
SECURITY_PASSWORD_HASH = "plaintext"
SECURITY_EMAIL_SENDER = "support@example.com"
SECURITY_CONFIRMABLE = True
SECURITY_REGISTERABLE = True
SECURITY_RECOVERABLE = True
SECURITY_CHANGEABLE = True
SECURITY_CONFIRM_SALT = "570be5f24e690ce5af208244f3e539a93b6e4f05"
SECURITY_REMEMBER_SALT = "de154140385c591ea771dcb3b33f374383e6ea47"
SECURITY_DEFAULT_REMEMBER_ME = True
# Set secret keys for CSRF protection
CSRF_SESSION_KEY = '8a7474974efcf76896aa84eea9cbe016bbc08828'
CSRF_ENABLED = True
# Flask-Babel
# http://pythonhosted.org/Flask-Babel/
BABEL_DEFAULT_LOCALE = "en"
BABEL_DEFAULT_TIMEZONE = "UTC"
# Flask-Mail
# http://pythonhosted.org/Flask-Mail/
SERVER_EMAIL = 'Flask-SocialBlueprint <support@example.com>'
# Flask-SocialBlueprint
# https://github.com/wooyek/flask-social-blueprint
SOCIAL_BLUEPRINT = {
# https://developers.facebook.com/apps/
"flask_social_blueprint.providers.Facebook": {
# App ID
'consumer_key': '197…',
# App Secret
'consumer_secret': 'c956c1…'
},
# https://apps.twitter.com/app/new
"flask_social_blueprint.providers.Twitter": {
# Your access token from API Keys tab
'consumer_key': 'bkp…',
# access token secret
'consumer_secret': 'pHUx…'
},
# https://console.developers.google.com/project
"flask_social_blueprint.providers.Google": {
# Client ID
'consumer_key': '797….apps.googleusercontent.com',
# Client secret
'consumer_secret': 'bDG…'
},
# https://github.com/settings/applications/new
"flask_social_blueprint.providers.Github": {
# Client ID
'consumer_key': '6f6…',
# Client Secret
'consumer_secret': '1a9…'
},
}
|
mit
|
KokareIITP/django
|
tests/str/tests.py
|
149
|
1245
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from unittest import skipIf
from django.test import TestCase
from django.utils import six
from .models import Article, InternationalArticle
class SimpleTests(TestCase):
@skipIf(six.PY3, "tests a __str__ method returning unicode under Python 2")
def test_basic(self):
a = Article.objects.create(
headline=b'Parrot programs in Python',
pub_date=datetime.datetime(2005, 7, 28)
)
self.assertEqual(str(a), str('Parrot programs in Python'))
self.assertEqual(repr(a), str('<Article: Parrot programs in Python>'))
def test_international(self):
a = InternationalArticle.objects.create(
headline='Girl wins €12.500 in lottery',
pub_date=datetime.datetime(2005, 7, 28)
)
if six.PY3:
self.assertEqual(str(a), 'Girl wins €12.500 in lottery')
else:
# On Python 2, the default str() output will be the UTF-8 encoded
# output of __unicode__() -- or __str__() when the
# python_2_unicode_compatible decorator is used.
self.assertEqual(str(a), b'Girl wins \xe2\x82\xac12.500 in lottery')
|
bsd-3-clause
|
digideskio/merchant
|
billing/models/paylane_models.py
|
7
|
1253
|
# -*- coding: utf-8 -*-
# vim:tabstop=4:expandtab:sw=4:softtabstop=4
from django.db import models
class PaylaneTransaction(models.Model):
transaction_date = models.DateTimeField(auto_now_add=True)
amount = models.FloatField()
customer_name = models.CharField(max_length=200)
customer_email = models.CharField(max_length=200)
product = models.CharField(max_length=200)
success = models.BooleanField(default=False)
error_code = models.IntegerField(default=0)
error_description = models.CharField(max_length=300, blank=True)
acquirer_error = models.CharField(max_length=40, blank=True)
acquirer_description = models.CharField(max_length=300, blank=True)
def __unicode__(self):
return u'Transaction for %s (%s)' % (self.customer_name, self.customer_email)
class Meta:
app_label = __name__.split(".")[0]
class PaylaneAuthorization(models.Model):
sale_authorization_id = models.BigIntegerField(db_index=True)
first_authorization = models.BooleanField(default=False)
transaction = models.OneToOneField(PaylaneTransaction)
def __unicode__(self):
return u'Authorization: %s' % (self.sale_authorization_id)
class Meta:
app_label = __name__.split(".")[0]
|
bsd-3-clause
|
st135yle/django-site
|
dbenv/lib/python3.4/site-packages/django/contrib/gis/geos/prototypes/threadsafe.py
|
46
|
2880
|
import threading
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import (
CONTEXT_PTR, error_h, lgeos, notice_h,
)
class GEOSContextHandle(GEOSBase):
"""
Python object representing a GEOS context handle.
"""
ptr_type = CONTEXT_PTR
destructor = lgeos.finishGEOS_r
def __init__(self):
# Initializing the context handler for this thread with
# the notice and error handler.
self.ptr = lgeos.initGEOS_r(notice_h, error_h)
# Defining a thread-local object and creating an instance
# to hold a reference to GEOSContextHandle for this thread.
class GEOSContext(threading.local):
handle = None
thread_context = GEOSContext()
class GEOSFunc(object):
"""
Class that serves as a wrapper for GEOS C Functions, and will
use thread-safe function variants when available.
"""
def __init__(self, func_name):
try:
# GEOS thread-safe function signatures end with '_r', and
# take an additional context handle parameter.
self.cfunc = getattr(lgeos, func_name + '_r')
self.threaded = True
# Create a reference here to thread_context so it's not
# garbage-collected before an attempt to call this object.
self.thread_context = thread_context
except AttributeError:
# Otherwise, use usual function.
self.cfunc = getattr(lgeos, func_name)
self.threaded = False
def __call__(self, *args):
if self.threaded:
# If a context handle does not exist for this thread, initialize one.
if not self.thread_context.handle:
self.thread_context.handle = GEOSContextHandle()
# Call the threaded GEOS routine with pointer of the context handle
# as the first argument.
return self.cfunc(self.thread_context.handle.ptr, *args)
else:
return self.cfunc(*args)
def __str__(self):
return self.cfunc.__name__
# argtypes property
def _get_argtypes(self):
return self.cfunc.argtypes
def _set_argtypes(self, argtypes):
if self.threaded:
new_argtypes = [CONTEXT_PTR]
new_argtypes.extend(argtypes)
self.cfunc.argtypes = new_argtypes
else:
self.cfunc.argtypes = argtypes
argtypes = property(_get_argtypes, _set_argtypes)
# restype property
def _get_restype(self):
return self.cfunc.restype
def _set_restype(self, restype):
self.cfunc.restype = restype
restype = property(_get_restype, _set_restype)
# errcheck property
def _get_errcheck(self):
return self.cfunc.errcheck
def _set_errcheck(self, errcheck):
self.cfunc.errcheck = errcheck
errcheck = property(_get_errcheck, _set_errcheck)
|
mit
|
socialsweethearts/django-allauth
|
allauth/socialaccount/providers/foursquare/views.py
|
71
|
1366
|
import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import FoursquareProvider
class FoursquareOAuth2Adapter(OAuth2Adapter):
provider_id = FoursquareProvider.id
access_token_url = 'https://foursquare.com/oauth2/access_token'
# Issue ?? -- this one authenticates over and over again...
# authorize_url = 'https://foursquare.com/oauth2/authorize'
authorize_url = 'https://foursquare.com/oauth2/authenticate'
profile_url = 'https://api.foursquare.com/v2/users/self'
def complete_login(self, request, app, token, **kwargs):
# Foursquare needs a version number for their API requests as documented here https://developer.foursquare.com/overview/versioning
resp = requests.get(self.profile_url,
params={'oauth_token': token.token, 'v': '20140116'})
extra_data = resp.json()['response']['user']
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(FoursquareOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(FoursquareOAuth2Adapter)
|
mit
|
frouty/odoogoeen
|
openerp/addons/base/ir/wizard/__init__.py
|
64
|
1073
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard_menu
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
MattsFleaMarket/python-for-android
|
python3-alpha/python3-src/Lib/test/test_file.py
|
59
|
11895
|
import sys
import os
import unittest
from array import array
from weakref import proxy
import io
import _pyio as pyio
from test.support import TESTFN, run_unittest
from collections import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(b'teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
def testReadinto(self):
# verify readinto
self.f.write(b'12')
self.f.close()
a = array('b', b'x'*10)
self.f = self.open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual(b'12', a.tobytes()[:n])
def testReadinto_text(self):
# verify readinto refuses text files
a = array('b', b'x'*10)
self.f.close()
self.f = self.open(TESTFN, 'r')
if hasattr(self.f, "readinto"):
self.assertRaises(TypeError, self.f.readinto, a)
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList([b'1', b'2'])
self.f.writelines(l)
self.f.close()
self.f = self.open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testErrors(self):
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
if hasattr(f, "readinto"):
self.assertRaises((IOError, TypeError), f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = [('fileno', ()),
('flush', ()),
('isatty', ()),
('__next__', ()),
('read', ()),
('write', (b"",)),
('readline', ()),
('readlines', ()),
('seek', (0,)),
('tell', ()),
('write', (b"",)),
('writelines', ([],)),
('__iter__', ()),
]
methods.append(('truncate', ()))
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname, args in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method, *args)
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1/0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(IOError, self.f.read)
class CAutoFileTests(AutoFileTests):
open = io.open
class PyAutoFileTests(AutoFileTests):
open = staticmethod(pyio.open)
class OtherFileTests(unittest.TestCase):
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = self.open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises((IOError, ValueError), sys.stdin.seek, -1)
else:
print((
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.'), file=sys.__stdout__)
self.assertRaises((IOError, ValueError), sys.stdin.truncate)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.open(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = self.open(TESTFN, 'wb', s)
f.write(str(s).encode("ascii"))
f.close()
f.close()
f = self.open(TESTFN, 'rb', s)
d = int(f.read().decode("ascii"))
f.close()
f.close()
except IOError as msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
os.unlink(TESTFN)
f = self.open(TESTFN, 'wb')
try:
f.write(b'12345678901') # 11 bytes
f.close()
f = self.open(TESTFN,'rb+')
data = f.read(5)
if data != b'12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
finally:
f.close()
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods.
dataoffset = 16384
filler = b"ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
b"spam, spam and eggs\n",
b"eggs, spam, ham and spam\n",
b"saussages, spam, spam and eggs\n",
b"spam, ham, spam and eggs\n",
b"spam, spam, spam, spam, spam, ham, spam\n",
b"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("b", b" "*100),))]
try:
# Prepare the testfile
bag = self.open(TESTFN, "wb")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = self.open(TESTFN, 'rb')
if next(f) != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
meth(*args) # This simply shouldn't fail
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = self.open(TESTFN, 'rb')
for i in range(nchunks):
next(f)
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("b", b"\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tobytes()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
f.close()
# Reading after iteration hit EOF shouldn't hurt either
f = self.open(TESTFN, 'rb')
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class COtherFileTests(OtherFileTests):
open = io.open
class PyOtherFileTests(OtherFileTests):
open = staticmethod(pyio.open)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(CAutoFileTests, PyAutoFileTests,
COtherFileTests, PyOtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
apache-2.0
|
florian-dacosta/OpenUpgrade
|
addons/hr_timesheet/wizard/__init__.py
|
381
|
1079
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_sign_in_out
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
gappleto97/Senior-Project
|
common/peers.py
|
1
|
16088
|
from multiprocessing import Queue
import multiprocessing, os, pickle, select, socket, sys, time, rsa, traceback
from common.safeprint import safeprint
from common.bounty import *
global ext_port
global ext_ip
global port
global myPriv
global myPub
global propQueue
ext_port = -1
ext_ip = ""
port = 44565
myPub, myPriv = rsa.newkeys(1024)
propQueue = multiprocessing.Queue()
seedlist = [("127.0.0.1", 44565), ("localhost", 44565),
("10.132.80.128", 44565)]
peerlist = [("24.10.111.111", 44565)]
remove = []
bounties = []
# constants
peers_file = "data" + os.sep + "peerlist.pickle"
key_request = "Key Request".encode('utf-8')
close_signal = "Close Signal".encode("utf-8")
peer_request = "Requesting Peers".encode("utf-8")
bounty_request = "Requesting Bounties".encode("utf-8")
incoming_bounties = "Incoming Bounties".encode("utf-8")
incoming_bounty = "Incoming Bounty".encode("utf-8")
valid_signal = "Bounty was valid".encode("utf-8")
invalid_signal = "Bounty was invalid".encode("utf-8")
end_of_message = "End of message".encode("utf-8")
sig_length = len(max(
close_signal, peer_request, bounty_request, incoming_bounties,
incoming_bounty, valid_signal, invalid_signal, key=len))
def pad(string):
return string + " ".encode('utf-8') * (sig_length - (((len(string) - 1) % sig_length) + 1))
close_signal = pad(close_signal)
peer_request = pad(peer_request)
bounty_request = pad(bounty_request)
incoming_bounties = pad(incoming_bounties)
incoming_bounty = pad(incoming_bounty)
valid_signal = pad(valid_signal)
invalid_signal = pad(invalid_signal)
end_of_message = pad(end_of_message)
signals = [close_signal, peer_request, bounty_request, incoming_bounty, valid_signal, invalid_signal]
def send(msg, conn, key):
while key is None:
safeprint("Key not found. Requesting key")
conn.send(key_request)
try:
key = pickle.loads(conn.recv(1024))
key = rsa.PublicKey(key[0], key[1])
safeprint("Key received")
except EOFError:
continue
if not isinstance(msg, type("a".encode('utf-8'))):
msg = msg.encode('utf-8')
x = 0
while x < len(msg) - 117:
conn.sendall(rsa.encrypt(msg[x:x+117], key))
x += 117
conn.sendall(rsa.encrypt(msg[x:], key))
conn.sendall(rsa.encrypt(end_of_message, key))
return key
def recv(conn):
received = "".encode('utf-8')
a = ""
try:
while True:
a = conn.recv(128)
if a == key_request:
safeprint("Key requested. Sending key")
conn.sendall(pickle.dumps((myPriv.n, myPriv.e), 0))
continue
a = rsa.decrypt(a, myPriv)
safeprint("Packet = " + str(a), verbosity=3)
if a == end_of_message:
return received
received += a
except rsa.pkcs1.DecryptionError as error:
safeprint("Decryption error---Content: " + str(a))
return "".encode('utf-8')
def get_lan_ip():
"""Retrieves the LAN ip. Expanded from http://stackoverflow.com/a/28950776"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('8.8.8.8', 23))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
def getFromFile():
"""Load peerlist from a file"""
if os.path.exists(peers_file):
try:
peerlist.extend(pickle.load(open(peers_file, "rb")))
trimPeers()
except:
safeprint("Could not load peerlist from file")
def saveToFile():
"""Save peerlist to a file"""
if not os.path.exists(peers_file.split(os.sep)[0]):
os.mkdir(peers_file.split(os.sep)[0])
pickle.dump(peerlist[:], open(peers_file, "wb"), 0)
def getFromSeeds():
"""Make peer requests to each address on the seedlist"""
for seed in seedlist:
safeprint(seed, verbosity=1)
peerlist.extend(requestPeerlist(seed))
time.sleep(1)
def requestPeerlist(address):
"""Request the peerlist of another node. Currently has additional test commands"""
conn = socket.socket()
conn.settimeout(5)
safeprint(address, verbosity=1)
try:
conn.connect(address)
key = send(peer_request, conn, None)
received = recv(conn)
safeprint(pickle.loads(received), verbosity=2)
if recv(conn) == peer_request:
handlePeerRequest(conn, False, key=key, received=pickle.loads(received))
recv(conn)
conn.close()
return pickle.loads(received)
except Exception as error:
safeprint("Failed:" + str(type(error)))
safeprint(error)
remove.extend([address])
return []
def requestBounties(address):
"""Request the bountylist of another node"""
conn = socket.socket()
conn.settimeout(5)
safeprint(address, verbosity=1)
try:
conn.connect(address)
key = send(bounty_request, conn, None)
received = recv(conn)
if recv(conn) == bounty_request:
handleBountyRequest(conn, False, key=key, received=pickle.loads(received))
safeprint(recv(conn))
conn.close()
addBounties(pickle.loads(received))
except Exception as error:
safeprint("Failed:" + str(type(error)))
safeprint(error)
remove.extend([address])
def initializePeerConnections(newPort, newip, newport):
"""Populate the peer list from a previous session, seeds, and from the peer list if its size is less than 12. Then save this new list to a file"""
port = newPort # Does this affect the global variable?
ext_ip = newip # Does this affect the global variable?
ext_port = newport # Does this affect the global variable?
safeprint([ext_ip, ext_port])
getFromFile()
safeprint("peers fetched from file", verbosity=1)
getFromSeeds()
safeprint("peers fetched from seedlist", verbosity=1)
trimPeers()
if len(peerlist) < 12:
safeprint(len(peerlist))
newlist = []
for peer in peerlist:
newlist.extend(requestPeerlist(peer))
peerlist.extend(newlist)
trimPeers()
safeprint("getting bounties from peers and seeds", verbosity=1)
for peer in peerlist[:] + seedlist[:]:
requestBounties(peer)
safeprint("peer network extended", verbosity=1)
saveToFile()
safeprint("peer network saved to file", verbosity=1)
safeprint(peerlist)
safeprint([ext_ip, ext_port])
def trimPeers():
"""Trim the peerlist to a single set, and remove any that were marked as erroneous before"""
temp = list(set(peerlist[:]))
for peer in remove:
try:
del temp[temp.index(peer)]
except:
continue
del remove[:]
del peerlist[:]
peerlist.extend(temp)
def listen(port, outbound, q, v, serv):
"""BLOCKING function which should only be run in a daemon thread. Listens and responds to other nodes"""
if serv:
from server.bounty import verify, addBounty
server = socket.socket()
server.bind(("0.0.0.0", port))
server.listen(10)
server.settimeout(5)
if sys.version_info[0] < 3 and sys.platform == "win32":
server.setblocking(True)
global ext_ip, ext_port
if outbound is True:
safeprint("UPnP mode is disabled")
else:
safeprint("UPnP mode is enabled")
if not portForward(port):
outbound = True
safeprint([outbound, ext_ip, ext_port])
q.put([outbound, ext_ip, ext_port])
while v.value: # is True is implicit
safeprint("listening on " + str(get_lan_ip()) + ":" + str(port), verbosity=3)
if not outbound:
safeprint("forwarded from " + ext_ip + ":" + str(ext_port), verbosity=3)
try:
conn, addr = server.accept()
server.setblocking(True)
conn.setblocking(True)
safeprint("connection accepted")
packet = recv(conn)
safeprint("Received: " + packet.decode(), verbosity=3)
key = None
if packet == peer_request:
key = handlePeerRequest(conn, True, key=key)
elif packet == bounty_request:
key = handleBountyRequest(conn, True, key=key)
elif packet == incoming_bounty:
key = handleIncomingBounty(conn, key=key)
send(close_signal, conn, key)
conn.close()
server.settimeout(5)
safeprint("connection closed")
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
traceback.print_exc()
def handlePeerRequest(conn, exchange, key=None, received=[]):
"""Given a socket, send the proper messages to complete a peer request"""
if ext_port != -1:
unfiltered = peerlist[:] + [((ext_ip, ext_port), myPub.n, myPub.e)]
unfiltered = peerlist[:]
filtered = list(set(unfiltered) - set(received))
safeprint("Unfiltered: " + str(unfiltered), verbosity=3)
safeprint("Filtered: " + str(filtered), verbosity=3)
toSend = pickle.dumps(filtered, 0)
safeprint("Sending")
key = send(toSend, conn, key)
if exchange:
send(peer_request, conn, key)
received = recv(conn)
safeprint("Received exchange", verbosity=1)
safeprint(pickle.loads(received), verbosity=3)
peerlist.extend(pickle.loads(received))
trimPeers()
return key
def handleBountyRequest(conn, exchange, key=None, received=[]):
"""Given a socket, send the proper messages to complete a bounty request"""
unfiltered = getBountyList()
filtered = list(set(unfiltered) - set(received))
toSend = pickle.dumps(filtered, 0)
safeprint("Sending")
key = send(toSend, conn, key)
if exchange:
send(bounty_request, conn, key)
received = recv(conn)
safeprint("Received exchange")
try:
safeprint(pickle.loads(received), verbosity=2)
bounties = pickle.loads(received)
valids = addBounties(bounties)
toSend = []
for i in range(len(bounties)):
if valids[i] >= 0: # If the bounty is valid and not a duplicate, add it to propagation list
toSend.append(bounties[i])
propQueue.put((incoming_bounties, toSend))
except Exception as error:
safeprint("Could not add bounties")
safeprint(type(error))
traceback.print_exc()
# later add function to request without charity bounties
return key
def handleIncomingBounty(conn, key=None):
"""Given a socket, store an incoming bounty & report it valid or invalid"""
received = recv(conn)
safeprint("Adding bounty: " + received.decode())
try:
valid = addBounty(received)
if valid >= -1: # If valid, even if a duplicate, send valid signal
safeprint("Sending valid signal")
send(valid_signal, conn, key)
if valid >= 0: # If valid and not already received, propagate
propQueue.put((incoming_bounty, received))
else:
send(invalid_signal, conn, key)
except Exception as error:
send(invalid_signal, conn, key)
safeprint("Incoming failed: " + str(type(error)))
safeprint(error)
traceback.print_exc()
return key
def propagate(tup):
try:
conn = socket.socket()
address = tup[1]
conn.connect(address)
key = send(incoming_bounty, conn, None)
send(pickle.dumps(tup[0], 0), conn, key)
recv(conn)
conn.close()
except socket.error as Error:
safeprint("Connection to " + str(address) + " failed; cannot propagate")
def portForward(port):
"""Attempt to forward a port on your router to the specified local port. Prints lots of debug info."""
try:
import miniupnpc
u = miniupnpc.UPnP(None, None, 200, port)
# Begin Debug info
safeprint('inital(default) values :')
safeprint(' discoverdelay' + str(u.discoverdelay))
safeprint(' lanaddr' + str(u.lanaddr))
safeprint(' multicastif' + str(u.multicastif))
safeprint(' minissdpdsocket' + str(u.minissdpdsocket))
safeprint('Discovering... delay=%ums' % u.discoverdelay)
safeprint(str(u.discover()) + 'device(s) detected')
# End Debug info
u.selectigd()
global ext_ip
ext_ip = u.externalipaddress()
safeprint("external ip is: " + str(ext_ip))
for i in range(0, 20):
try:
safeprint("Port forward try: " + str(i), verbosity=1)
if u.addportmapping(port+i, 'TCP', get_lan_ip(), port, 'Bounty Net', ''):
global ext_port
ext_port = port + i
safeprint("External port is " + str(ext_port))
return True
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
return False
def listenp(port, v):
"""BLOCKING function which should only be run in a daemon thread. Listens and responds to other nodes"""
import time
while v.value: # is True is implicit
safeprint("listenp-ing", verbosity=3)
try:
while propQueue.empty() and v.value:
time.sleep(0.01)
packet = propQueue.get()
safeprint("Received: " + str(packet), verbosity=3)
if packet[0] == incoming_bounty:
bounty = pickle.loads(packet[1])
if bounty.isValid():
from multiprocessing.pool import ThreadPool
ThreadPool().map(propagate, [(bounty, x) for x in peerlist[:]])
elif packet[0] == incoming_bounties:
for bounty in packet[1]:
if bounty.isValid():
from multiprocessing.pool import ThreadPool
ThreadPool().map(propagate, [(bounty, x) for x in peerlist[:]])
safeprint("Packet processed")
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
def sync(items):
if items.get('config'):
from common import settings
settings.config = items.get('config')
if items.get('peerList'):
global peerlist
peerList = items.get('peerList')
if items.get('bountyList'):
from common import bounty
bounty.bountyList = items.get('bountyList')
if items.get('bountyLock'):
from common import bounty
bounty.bountyLock = items.get('bountyLock')
if items.get('propQueue'):
global propQueue
propQueue = items.get('propQueue')
class listener(multiprocessing.Process): # pragma: no cover
"""A class to deal with the listener method"""
def __init__(self, port, outbound, q, v, serv):
multiprocessing.Process.__init__(self)
self.outbound = outbound
self.port = port
self.q = q
self.v = v
self.serv = serv
def run(self):
safeprint("listener started")
sync(self.items)
listen(self.port, self.outbound, self.q, self.v, self.serv)
safeprint("listener stopped")
class propagator(multiprocessing.Process): # pragma: no cover
"""A class to deal with the listener method"""
def __init__(self, port, v):
multiprocessing.Process.__init__(self)
self.port = port
self.v = v
def run(self):
safeprint("propagator started")
sync(self.items)
listenp(self.port, self.v)
safeprint("propagator stopped")
|
mit
|
remb0/CouchPotatoServer
|
libs/guessit/transfo/post_process.py
|
4
|
2521
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from guessit.patterns import subtitle_exts
import logging
log = logging.getLogger(__name__)
def process(mtree):
# 1- try to promote language to subtitle language where it makes sense
for node in mtree.nodes():
if 'language' not in node.guess:
continue
def promote_subtitle():
# pylint: disable=W0631
node.guess.set('subtitleLanguage', node.guess['language'],
confidence=node.guess.confidence('language'))
del node.guess['language']
# - if we matched a language in a file with a sub extension and that
# the group is the last group of the filename, it is probably the
# language of the subtitle
# (eg: 'xxx.english.srt')
if (mtree.node_at((-1,)).value.lower() in subtitle_exts and
node == mtree.leaves()[-2]):
promote_subtitle()
# - if a language is in an explicit group just preceded by "st",
# it is a subtitle language (eg: '...st[fr-eng]...')
try:
idx = node.node_idx
previous = mtree.node_at((idx[0], idx[1] - 1)).leaves()[-1]
if previous.value.lower()[-2:] == 'st':
promote_subtitle()
except IndexError:
pass
# 2- ", the" at the end of a series title should be prepended to it
for node in mtree.nodes():
if 'series' not in node.guess:
continue
series = node.guess['series']
lseries = series.lower()
if lseries[-4:] == ',the':
node.guess['series'] = 'The ' + series[:-4]
if lseries[-5:] == ', the':
node.guess['series'] = 'The ' + series[:-5]
|
gpl-3.0
|
crazy-canux/django
|
tests/gis_tests/utils.py
|
327
|
1377
|
from unittest import skip
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
def no_backend(test_func, backend):
"Use this decorator to disable test on specified backend."
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] == backend:
@skip("This test is skipped on '%s' backend" % backend)
def inner():
pass
return inner
else:
return test_func
# Decorators to disable entire test functions for specific
# spatial backends.
def no_oracle(func):
return no_backend(func, 'oracle')
# Shortcut booleans to omit only portions of tests.
_default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1]
oracle = _default_db == 'oracle'
postgis = _default_db == 'postgis'
mysql = _default_db == 'mysql'
spatialite = _default_db == 'spatialite'
# MySQL spatial indices can't handle NULL geometries.
gisfield_may_be_null = not mysql
if oracle and 'gis' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']:
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys as SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import PostGISSpatialRefSys as SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys as SpatialRefSys
else:
SpatialRefSys = None
|
bsd-3-clause
|
fuhongliang/erpnext
|
erpnext/controllers/status_updater.py
|
6
|
10062
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, comma_or
from frappe import msgprint, _, throw
from frappe.model.document import Document
def validate_status(status, options):
if status not in options:
frappe.throw(_("Status must be one of {0}").format(comma_or(options)))
status_map = {
"Lead": [
["Converted", "has_customer"],
["Opportunity", "has_opportunity"],
],
"Opportunity": [
["Lost", "eval:self.status=='Lost'"],
["Quotation", "has_quotation"],
["Converted", "has_ordered_quotation"]
],
"Quotation": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Lost", "eval:self.status=='Lost'"],
["Ordered", "has_sales_order"],
["Cancelled", "eval:self.docstatus==2"],
],
"Sales Order": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Stopped", "eval:self.status=='Stopped'"],
["Cancelled", "eval:self.docstatus==2"],
],
"Delivery Note": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
],
"Purchase Receipt": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
]
}
class StatusUpdater(Document):
"""
Updates the status of the calling records
Delivery Note: Update Delivered Qty, Update Percent and Validate over delivery
Sales Invoice: Update Billed Amt, Update Percent and Validate over billing
Installation Note: Update Installed Qty, Update Percent Qty and Validate over installation
"""
def update_prevdoc_status(self):
self.update_qty()
self.validate_qty()
def set_status(self, update=False):
if self.is_new():
return
if self.doctype in status_map:
_status = self.status
sl = status_map[self.doctype][:]
sl.reverse()
for s in sl:
if not s[1]:
self.status = s[0]
break
elif s[1].startswith("eval:"):
if eval(s[1][5:]):
self.status = s[0]
break
elif getattr(self, s[1])():
self.status = s[0]
break
if self.status != _status and self.status not in ("Submitted", "Cancelled"):
self.add_comment("Label", _(self.status))
if update:
frappe.db.set_value(self.doctype, self.name, "status", self.status)
def validate_qty(self):
"""Validates qty at row level"""
self.tolerance = {}
self.global_tolerance = None
for args in self.status_updater:
if "target_ref_field" not in args:
# if target_ref_field is not specified, the programmer does not want to validate qty / amount
continue
# get unique transactions to update
for d in self.get_all_children():
if d.doctype == args['source_dt'] and d.get(args["join_field"]):
args['name'] = d.get(args['join_field'])
# get all qty where qty > target_field
item = frappe.db.sql("""select item_code, `{target_ref_field}`,
`{target_field}`, parenttype, parent from `tab{target_dt}`
where `{target_ref_field}` < `{target_field}`
and name=%s and docstatus=1""".format(**args),
args['name'], as_dict=1)
if item:
item = item[0]
item['idx'] = d.idx
item['target_ref_field'] = args['target_ref_field'].replace('_', ' ')
if not item[args['target_ref_field']]:
msgprint(_("Note: System will not check over-delivery and over-booking for Item {0} as quantity or amount is 0").format(item.item_code))
elif args.get('no_tolerance'):
item['reduce_by'] = item[args['target_field']] - item[args['target_ref_field']]
if item['reduce_by'] > .01:
msgprint(_("Allowance for over-{0} crossed for Item {1}")
.format(args["overflow_type"], item.item_code))
throw(_("{0} must be reduced by {1} or you should increase overflow tolerance")
.format(_(item.target_ref_field.title()), item["reduce_by"]))
else:
self.check_overflow_with_tolerance(item, args)
def check_overflow_with_tolerance(self, item, args):
"""
Checks if there is overflow condering a relaxation tolerance
"""
# check if overflow is within tolerance
tolerance, self.tolerance, self.global_tolerance = get_tolerance_for(item['item_code'],
self.tolerance, self.global_tolerance)
overflow_percent = ((item[args['target_field']] - item[args['target_ref_field']]) /
item[args['target_ref_field']]) * 100
if overflow_percent - tolerance > 0.01:
item['max_allowed'] = flt(item[args['target_ref_field']] * (100+tolerance)/100)
item['reduce_by'] = item[args['target_field']] - item['max_allowed']
msgprint(_("Allowance for over-{0} crossed for Item {1}.")
.format(args["overflow_type"], item["item_code"]))
throw(_("{0} must be reduced by {1} or you should increase overflow tolerance")
.format(_(item["target_ref_field"].title()), item["reduce_by"]))
def update_qty(self, change_modified=True):
"""Updates qty or amount at row level
:param change_modified: If true, updates `modified` and `modified_by` for target parent doc
"""
for args in self.status_updater:
# condition to include current record (if submit or no if cancel)
if self.docstatus == 1:
args['cond'] = ' or parent="%s"' % self.name.replace('"', '\"')
else:
args['cond'] = ' and parent!="%s"' % self.name.replace('"', '\"')
args['set_modified'] = ''
if change_modified:
args['set_modified'] = ', modified = now(), modified_by = "{0}"'\
.format(frappe.db.escape(frappe.session.user))
self._update_children(args)
if "percent_join_field" in args:
self._update_percent_field(args)
def _update_children(self, args):
"""Update quantities or amount in child table"""
for d in self.get_all_children():
if d.doctype != args['source_dt']:
continue
# updates qty in the child table
args['detail_id'] = d.get(args['join_field'])
args['second_source_condition'] = ""
if args.get('second_source_dt') and args.get('second_source_field') \
and args.get('second_join_field'):
if not args.get("second_source_extra_cond"):
args["second_source_extra_cond"] = ""
args['second_source_condition'] = """ + ifnull((select sum(%(second_source_field)s)
from `tab%(second_source_dt)s`
where `%(second_join_field)s`="%(detail_id)s"
and (`tab%(second_source_dt)s`.docstatus=1) %(second_source_extra_cond)s), 0) """ % args
if args['detail_id']:
if not args.get("extra_cond"): args["extra_cond"] = ""
frappe.db.sql("""update `tab%(target_dt)s`
set %(target_field)s = (select sum(%(source_field)s)
from `tab%(source_dt)s` where `%(join_field)s`="%(detail_id)s"
and (docstatus=1 %(cond)s) %(extra_cond)s) %(second_source_condition)s
where name='%(detail_id)s'""" % args)
def _update_percent_field(self, args):
"""Update percent field in parent transaction"""
unique_transactions = set([d.get(args['percent_join_field']) for d in self.get_all_children(args['source_dt'])])
for name in unique_transactions:
if not name:
continue
args['name'] = name
# update percent complete in the parent table
if args.get('target_parent_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(target_parent_field)s = (select sum(if(%(target_ref_field)s >
ifnull(%(target_field)s, 0), %(target_field)s,
%(target_ref_field)s))/sum(%(target_ref_field)s)*100
from `tab%(target_dt)s` where parent="%(name)s") %(set_modified)s
where name='%(name)s'""" % args)
# update field
if args.get('status_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(status_field)s = if(ifnull(%(target_parent_field)s,0)<0.001,
'Not %(keyword)s', if(%(target_parent_field)s>=99.99,
'Fully %(keyword)s', 'Partly %(keyword)s'))
where name='%(name)s'""" % args)
if args.get("set_modified"):
frappe.get_doc(args["target_parent_dt"], name).notify_update()
def update_billing_status_for_zero_amount_refdoc(self, ref_dt):
ref_fieldname = ref_dt.lower().replace(" ", "_")
zero_amount_refdoc = []
all_zero_amount_refdoc = frappe.db.sql_list("""select name from `tab%s`
where docstatus=1 and base_net_total = 0""" % ref_dt)
for item in self.get("items"):
if item.get(ref_fieldname) \
and item.get(ref_fieldname) in all_zero_amount_refdoc \
and item.get(ref_fieldname) not in zero_amount_refdoc:
zero_amount_refdoc.append(item.get(ref_fieldname))
if zero_amount_refdoc:
self.update_biling_status(zero_amount_refdoc, ref_dt, ref_fieldname)
def update_biling_status(self, zero_amount_refdoc, ref_dt, ref_fieldname):
for ref_dn in zero_amount_refdoc:
ref_doc_qty = flt(frappe.db.sql("""select sum(ifnull(qty, 0)) from `tab%s Item`
where parent=%s""" % (ref_dt, '%s'), (ref_dn))[0][0])
billed_qty = flt(frappe.db.sql("""select sum(ifnull(qty, 0))
from `tab%s Item` where %s=%s and docstatus=1""" %
(self.doctype, ref_fieldname, '%s'), (ref_dn))[0][0])
per_billed = ((ref_doc_qty if billed_qty > ref_doc_qty else billed_qty)\
/ ref_doc_qty)*100
frappe.db.set_value(ref_dt, ref_dn, "per_billed", per_billed)
if frappe.get_meta(ref_dt).get_field("billing_status"):
if per_billed < 0.001: billing_status = "Not Billed"
elif per_billed >= 99.99: billing_status = "Fully Billed"
else: billing_status = "Partly Billed"
frappe.db.set_value(ref_dt, ref_dn, "billing_status", billing_status)
def get_tolerance_for(item_code, item_tolerance={}, global_tolerance=None):
"""
Returns the tolerance for the item, if not set, returns global tolerance
"""
if item_tolerance.get(item_code):
return item_tolerance[item_code], item_tolerance, global_tolerance
tolerance = flt(frappe.db.get_value('Item',item_code,'tolerance') or 0)
if not tolerance:
if global_tolerance == None:
global_tolerance = flt(frappe.db.get_value('Stock Settings', None, 'tolerance'))
tolerance = global_tolerance
item_tolerance[item_code] = tolerance
return tolerance, item_tolerance, global_tolerance
|
agpl-3.0
|
Curso-OpenShift/Formulario
|
OverFlow/ProjectFormulario/env/lib/python2.7/site-packages/django/db/backends/postgresql/client.py
|
47
|
2120
|
import os
import subprocess
from django.core.files.temp import NamedTemporaryFile
from django.db.backends.base.client import BaseDatabaseClient
from django.utils.six import print_
def _escape_pgpass(txt):
"""
Escape a fragment of a PostgreSQL .pgpass file.
"""
return txt.replace('\\', '\\\\').replace(':', '\\:')
class DatabaseClient(BaseDatabaseClient):
executable_name = 'psql'
@classmethod
def runshell_db(cls, conn_params):
args = [cls.executable_name]
host = conn_params.get('host', '')
port = conn_params.get('port', '')
dbname = conn_params.get('database', '')
user = conn_params.get('user', '')
passwd = conn_params.get('password', '')
if user:
args += ['-U', user]
if host:
args += ['-h', host]
if port:
args += ['-p', str(port)]
args += [dbname]
temp_pgpass = None
try:
if passwd:
# Create temporary .pgpass file.
temp_pgpass = NamedTemporaryFile(mode='w+')
try:
print_(
_escape_pgpass(host) or '*',
str(port) or '*',
_escape_pgpass(dbname) or '*',
_escape_pgpass(user) or '*',
_escape_pgpass(passwd),
file=temp_pgpass,
sep=':',
flush=True,
)
os.environ['PGPASSFILE'] = temp_pgpass.name
except UnicodeEncodeError:
# If the current locale can't encode the data, we let
# the user input the password manually.
pass
subprocess.call(args)
finally:
if temp_pgpass:
temp_pgpass.close()
if 'PGPASSFILE' in os.environ: # unit tests need cleanup
del os.environ['PGPASSFILE']
def runshell(self):
DatabaseClient.runshell_db(self.connection.get_connection_params())
|
gpl-3.0
|
yanchen036/tensorflow
|
tensorflow/contrib/remote_fused_graph/pylib/python/ops/remote_fused_graph_ops_test.py
|
96
|
2585
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.remote_fused_graph_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.remote_fused_graph.pylib.python.ops import remote_fused_graph_ops
# pylint: enable=unused-import,wildcard-import,line-too-long
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class RemoteFusedGraphExecuteTest(test_util.TensorFlowTestCase):
"""Tests for RemoteFusedGraphExecute op."""
def testBuild(self):
graph = graph_pb2.GraphDef()
node = graph.node.add()
node.name = "a"
node.op = "op0"
node = graph.node.add()
node.name = "b"
node.op = "op1"
inputs = [ops.convert_n_to_tensor([1], dtypes.int64)]
output_types = [np.int64, np.int64]
graph_input_node_names = ["a"]
graph_output_node_names = ["a", "b"]
executor_name = ""
serialized_executor_parameters = b""
default_graph_input_tensor_type_shapes = [[dtypes.int64, [1]]]
default_graph_output_tensor_type_shapes = [[dtypes.int64, [1]],
[dtypes.int64, [1]]]
output_nodes = remote_fused_graph_ops.remote_fused_graph_execute(
inputs, output_types, graph, graph_input_node_names,
graph_output_node_names, executor_name, serialized_executor_parameters,
default_graph_input_tensor_type_shapes,
default_graph_output_tensor_type_shapes)
self.assertEqual(2, len(output_nodes))
for output_node in output_nodes:
with self.test_session(use_gpu=False):
output_node.eval()
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
arangodb/arangodb
|
3rdParty/V8/v7.9.317/test/wasm-spec-tests/testcfg.py
|
1
|
1810
|
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from testrunner.local import testsuite
from testrunner.objects import testcase
proposal_flags = [{
'name': 'reference-types',
'flags': ['--experimental-wasm-anyref',
'--no-experimental-wasm-bulk-memory']
},
{
'name': 'bulk-memory-operations',
'flags': ['--experimental-wasm-bulk-memory']
},
{
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--no-experimental-wasm-bulk-memory']
},
{
'name': 'JS-BigInt-integration',
'flags': ['--experimental-wasm-bigint']
},
]
class TestLoader(testsuite.JSTestLoader):
pass
class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
self.test_root = os.path.join(self.root, "tests")
self._test_loader.test_root = self.test_root
def _test_loader_class(self):
return TestLoader
def _test_class(self):
return TestCase
class TestCase(testcase.D8TestCase):
def _get_files_params(self):
return [os.path.join(self.suite.test_root, self.path + self._get_suffix())]
def _get_source_flags(self):
for proposal in proposal_flags:
if os.sep.join(['proposals', proposal['name']]) in self.path:
return proposal['flags']
return []
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
|
apache-2.0
|
hayderimran7/zulip
|
bots/jabber_mirror.py
|
42
|
2049
|
#!/usr/bin/env python
# Copyright (C) 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import subprocess
import os
import traceback
import signal
from zulip import RandomExponentialBackoff
def die(signal, frame):
# We actually want to exit, so run os._exit (so as not to be caught and restarted)
os._exit(1)
signal.signal(signal.SIGINT, die)
args = [os.path.join(os.path.dirname(sys.argv[0]), "jabber_mirror_backend.py")]
args.extend(sys.argv[1:])
backoff = RandomExponentialBackoff(timeout_success_equivalent=300)
while backoff.keep_going():
print "Starting Jabber mirroring bot"
try:
ret = subprocess.call(args)
except:
traceback.print_exc()
else:
if ret == 2:
# Don't try again on initial configuration errors
sys.exit(ret)
backoff.fail()
print ""
print ""
print "ERROR: The Jabber mirroring bot is unable to continue mirroring Jabber."
print "Please contact support@zulip.com if you need assistence."
print ""
sys.exit(1)
|
apache-2.0
|
pascalmouret/treeio-achievements
|
achievements/views.py
|
1
|
13734
|
"""
Here are the functions which actually prepare the data and render the pages.
Most of the functions here are very similar since tree.io is, more or less, following
the CRUD (Create, Retrieve, Update, Delete) pattern.
The only thing special are the MassForms, which are quite common in tree.io and I only
adapted the code to fit my purposes.
Also: The forms.py file is in many ways more important since all forms are defined there.
"""
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from treeio.core.models import User
from treeio.core.rendering import render_to_response
from treeio.core.decorators import treeio_login_required, handle_response_format
from achievements.forms import MassActionUserForm, MassActionUserAchievementsForm, MassActionAchievementsForm, \
PrototypeForm, AchievementForm
from achievements.models import Prototype, Achievement
def _get_default_context(request, type):
"""
This function generates a context with a prepared massform.
Arguments:
request -- a Django Request object
type -- the type of MassForm you want
"""
context = {}
massform = type(request.user.get_profile())
context.update({'massform': massform})
return context
def _process_mass_form(f):
"""
This decorator checks if and which mass-form type is received and reacts in a proper fashion. (read: saves)
By excluding this, the views themselfes get a bit less crowded. And it is the way it is in every other module
as well.
Arguments:
f -- the function that is decorated
"""
def wrap(request, *args, **kwargs):
"""
Checks first which MassForm we are dealing with, then check if the user has the necessary permission.
If that all checks out, execute the save() action.
Arguments:
request -- the Django-request
*args -- catch args to pass them on afterwards
**kwargs -- catch kwargs to pass them on afterwards
"""
user = request.user.get_profile()
# check for massform and check permission
if 'massform' in request.POST and request.user.get_profile().is_admin(module_name='achievements'):
for key in request.POST:
if 'mass-user' in key:
try:
user = User.objects.get(pk=request.POST[key])
form = MassActionUserForm(request.user.get_profile(), request.POST, instance=user)
if form.is_valid():
form.save()
except Exception:
pass
if 'mass-achievement' in key:
try:
prototype = Prototype.objects.get(pk=request.POST[key])
form = MassActionAchievementsForm(request.user.get_profile(), request.POST, instance=prototype)
if form.is_valid():
form.save()
except Exception:
pass
if 'mass-userachievement' in key:
try:
achievement = Achievement.objects.get(pk=request.POST[key])
form = MassActionUserAchievementsForm(request.user.get_profile(),
request.POST, instance=achievement)
if form.is_valid():
form.save()
except Exception:
pass
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
@handle_response_format
@treeio_login_required
@_process_mass_form
def index(request, response_format='html'):
"""
This view displays a list of user, with their achievements (icons). Has a MassForm.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
users = User.objects.all()
context = _get_default_context(request, MassActionUserForm)
context.update({'users': users})
return render_to_response('achievements/index', context, context_instance=RequestContext(request),
response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def user(request, user_id, response_format='html'):
"""
This just displays one user and his achievements. Has a MassForm.
Arguments:
request -- a Django Request object
user_id -- the id of the requested User object
response_format -- defines which format the response should be
"""
user = User.objects.get(pk=user_id)
achievements = Achievement.objects.filter(user=user)
context = _get_default_context(request, MassActionUserAchievementsForm)
context.update({'u': user, 'achievements': achievements})
return render_to_response('achievements/user', context, context_instance=RequestContext(request),
response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def prototypes(request, response_format='html'):
"""
Gives an overview over all available Achievements, with the description. Has a MassForm.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
prototypes = Prototype.objects.filter(trash=False)
context = _get_default_context(request, MassActionAchievementsForm)
context.update({'protos': prototypes})
return render_to_response('achievements/prototypes', context, context_instance=RequestContext(request),
response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_add(request, response_format='html'):
"""
This delivers a view to create a new Prototype.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
if request.POST:
if not 'cancel' in request.POST:
form = PrototypeForm(request.user.get_profile(), request.POST, files=request.FILES)
if form.is_valid():
prototype = form.save() # TODO: saver
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
else:
return HttpResponseRedirect(reverse('achievements_prototypes'))
else:
form = PrototypeForm(request.user)
return render_to_response('achievements/prototype_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_edit(request, prototype_id, response_format='html'):
"""
Opens a form to edit a Prototype.
Arguments:
request -- a Django Request object
prototype_id -- the id of the requested Prototype object
response_format -- defines which format the response should be
"""
prototype = get_object_or_404(Prototype, pk=prototype_id)
if not request.user.get_profile().has_permission(prototype, mode='w'):
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
if request.POST:
if not 'cancel' in request.POST:
form = PrototypeForm(request.user.get_profile(), request.POST, files=request.FILES, instance=prototype)
if form.is_valid():
prototype = form.save()
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
else:
return HttpResponseRedirect(reverse('achievements_prototypes'))
else:
form = PrototypeForm(request.user, instance=prototype)
return render_to_response('achievements/prototype_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_detail(request, prototype_id, response_format='html'):
"""
Opens a simple overview for one Prototype.
Arguments:
request -- a Django Request object
prototype_id -- the id of the requested Prototype object
response_format -- defines which format the response should be
"""
prototype = get_object_or_404(Prototype, pk=prototype_id)
return render_to_response('achievements/prototype_detail', {'prototype': prototype},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_delete(request, prototype_id, response_format='html'):
"""
Simply deletes a Prototype and redirects to the list. If the permissions are alright, of course.
Arguments:
request -- a Django Request object
prototype_id -- the id of the requested Prototype object
response_format -- defines which format the response should be
"""
prototype = get_object_or_404(Prototype, pk=prototype_id)
if request.user.get_profile().has_permission(Prototype, mode='w'):
prototype.delete()
else:
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
return HttpResponseRedirect(reverse('achievements_prototypes'))
@handle_response_format
@treeio_login_required
def achievement_add(request, response_format='html'):
"""
Opens an empty form for a new Achievement.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
if request.POST:
if not 'cancel' in request.POST:
form = AchievementForm(request.user.get_profile(), request.POST, files=request.FILES)
if form.is_valid():
achievement = form.save() # TODO: saver
return HttpResponseRedirect(reverse('achievements_achievement_detail', args=[achievement.id]))
else:
return HttpResponseRedirect(reverse('achievements'))
else:
form = AchievementForm(request.user)
return render_to_response('achievements/achievement_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def achievement_edit(request, achievement_id, response_format='html'):
"""
Opens a form to edit a specific Achievement.
Arguments:
request -- a Django Request object
achievement_id -- the id of the requested Achievement object
response_format -- defines which format the response should be
"""
achievement = get_object_or_404(Achievement, pk=achievement_id)
if request.POST:
if not 'cancel' in request.POST:
form = AchievementForm(request.user.get_profile(), request.POST, files=request.FILES, instance=achievement)
if form.is_valid():
achievement = form.save() # TODO: saver
return HttpResponseRedirect(reverse('achievements_achievement_detail', args=[achievement.id]))
else:
return HttpResponseRedirect(reverse('achievements'))
else:
form = AchievementForm(request.user, instance=achievement)
return render_to_response('achievements/achievement_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def achievement_detail(request, achievement_id, response_format='html'):
"""
Opens a simple overview for one Achievement.
Arguments:
request -- a Django Request object
achievement_id -- the id of the requested Achievement object
response_format -- defines which format the response should be
"""
achievement = get_object_or_404(Achievement, pk=achievement_id)
return render_to_response('achievements/achievement_detail', {'achievement': achievement},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def achievement_delete(request, achievement_id, response_format='html'):
"""
Simply deletes a Achievement and redirects to the list. If the permissions are alright, of course.
Arguments:
request -- a Django Request object
achievement_id -- the id of the requested Achievement object
response_format -- defines which format the response should be
"""
achievement = get_object_or_404(Achievement, pk=achievement_id)
if request.user.get_profile().has_permission(Prototype, mode='w'):
achievement.delete()
else:
return HttpResponseRedirect(reverse('achievements_achievement_detail', args=[achievement.id]))
return HttpResponseRedirect(reverse('achievements'))
@handle_response_format
@treeio_login_required
def widget_achievement_stream(request, response_format='html'):
"""
Gets the last three Achievements and gives them to the widget template. This will be rendered as the Widget.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
achievements = Achievement.objects.all()[:3]
return render_to_response('achievements/widgets/newest', {'achievements': achievements},
context_instance=RequestContext(request), response_format=response_format)
|
bsd-2-clause
|
Aristocles/CouchPotatoServer
|
libs/guessit/transfo/guess_weak_episodes_rexps.py
|
94
|
2184
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import Guess
from guessit.transfo import SingleNodeGuesser
from guessit.patterns import weak_episode_rexps
import re
import logging
log = logging.getLogger(__name__)
def guess_weak_episodes_rexps(string, node):
if 'episodeNumber' in node.root.info:
return None, None
for rexp, span_adjust in weak_episode_rexps:
match = re.search(rexp, string, re.IGNORECASE)
if match:
metadata = match.groupdict()
span = (match.start() + span_adjust[0],
match.end() + span_adjust[1])
epnum = int(metadata['episodeNumber'])
if epnum > 100:
season, epnum = epnum // 100, epnum % 100
# episodes which have a season > 25 are most likely errors
# (Simpsons is at 23!)
if season > 25:
continue
return Guess({ 'season': season,
'episodeNumber': epnum },
confidence=0.6, raw=string[span[0]:span[1]]), span
else:
return Guess(metadata, confidence=0.3, raw=string[span[0]:span[1]]), span
return None, None
guess_weak_episodes_rexps.use_node = True
def process(mtree):
SingleNodeGuesser(guess_weak_episodes_rexps, 0.6, log).process(mtree)
|
gpl-3.0
|
chiefspace/udemy-rest-api
|
udemy_rest_api_section6/env/lib/python3.4/site-packages/jinja2/defaults.py
|
130
|
1323
|
# -*- coding: utf-8 -*-
"""
jinja2.defaults
~~~~~~~~~~~~~~~
Jinja default filters and tags.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import range_type
from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner
# defaults for the parser / lexer
BLOCK_START_STRING = '{%'
BLOCK_END_STRING = '%}'
VARIABLE_START_STRING = '{{'
VARIABLE_END_STRING = '}}'
COMMENT_START_STRING = '{#'
COMMENT_END_STRING = '#}'
LINE_STATEMENT_PREFIX = None
LINE_COMMENT_PREFIX = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
NEWLINE_SEQUENCE = '\n'
KEEP_TRAILING_NEWLINE = False
# default filters, tests and namespace
from jinja2.filters import FILTERS as DEFAULT_FILTERS
from jinja2.tests import TESTS as DEFAULT_TESTS
DEFAULT_NAMESPACE = {
'range': range_type,
'dict': dict,
'lipsum': generate_lorem_ipsum,
'cycler': Cycler,
'joiner': Joiner
}
# default policies
DEFAULT_POLICIES = {
'compiler.ascii_str': True,
'urlize.rel': 'noopener',
'urlize.target': None,
'truncate.leeway': 5,
'json.dumps_function': None,
'json.dumps_kwargs': {'sort_keys': True},
}
# export all constants
__all__ = tuple(x for x in locals().keys() if x.isupper())
|
gpl-2.0
|
sgarrity/bedrock
|
tests/functional/newsletter/test_newsletter_landing.py
|
4
|
2552
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from selenium.common.exceptions import TimeoutException
from pages.newsletter.developer import DeveloperNewsletterPage
from pages.newsletter.firefox import FirefoxNewsletterPage
from pages.newsletter.mozilla import MozillaNewsletterPage
@pytest.mark.nondestructive
@pytest.mark.parametrize('page_class', [FirefoxNewsletterPage, MozillaNewsletterPage])
def test_default_values(page_class, base_url, selenium):
page = page_class(selenium, base_url).open()
assert '' == page.email
assert 'United States' == page.country
assert 'English' == page.language
assert page.html_format_selected
assert not page.text_format_selected
assert not page.privacy_policy_accepted
assert page.is_privacy_policy_link_displayed
@pytest.mark.nondestructive
def test_default_values_developer_newsletter(base_url, selenium):
page = DeveloperNewsletterPage(selenium, base_url).open()
assert '' == page.email
assert 'United States' == page.country
assert page.html_format_selected
assert not page.text_format_selected
assert not page.privacy_policy_accepted
assert page.is_privacy_policy_link_displayed
@pytest.mark.nondestructive
@pytest.mark.parametrize('page_class', [FirefoxNewsletterPage, MozillaNewsletterPage])
def test_successful_sign_up(page_class, base_url, selenium):
page = page_class(selenium, base_url).open()
page.type_email('success@example.com')
page.select_country('United Kingdom')
page.select_language('Deutsch')
page.select_text_format()
page.accept_privacy_policy()
page.click_sign_me_up()
assert page.sign_up_successful
@pytest.mark.nondestructive
def test_successful_sign_up_developer_newsletter(base_url, selenium):
page = DeveloperNewsletterPage(selenium, base_url).open()
page.type_email('success@example.com')
page.select_country('United Kingdom')
page.select_text_format()
page.accept_privacy_policy()
page.click_sign_me_up()
assert page.sign_up_successful
@pytest.mark.nondestructive
@pytest.mark.parametrize('page_class', [DeveloperNewsletterPage, FirefoxNewsletterPage, MozillaNewsletterPage])
def test_sign_up_fails_when_missing_required_fields(page_class, base_url, selenium):
page = page_class(selenium, base_url).open()
with pytest.raises(TimeoutException):
page.click_sign_me_up()
|
mpl-2.0
|
magnushiie/geopy
|
geopy/geocoders/photon.py
|
11
|
8382
|
"""
:class:`.Photon` geocoder.
"""
from geopy.compat import urlencode, string_compare
from geopy.geocoders.base import (
Geocoder,
DEFAULT_FORMAT_STRING,
DEFAULT_TIMEOUT,
DEFAULT_SCHEME
)
from geopy.location import Location
from geopy.util import logger
__all__ = ("Photon", )
class Photon(Geocoder): # pylint: disable=W0223
"""
Geocoder using Photon geocoding service (data based on OpenStreetMap and
service provided by Komoot on https://photon.komoot.de).
Documentation at https://github.com/komoot/photon
"""
def __init__(
self,
format_string=DEFAULT_FORMAT_STRING,
scheme=DEFAULT_SCHEME,
timeout=DEFAULT_TIMEOUT,
proxies=None,
domain='photon.komoot.de'
): # pylint: disable=R0913
"""
Initialize a Photon/Komoot geocoder which aims to let you "search as
you type with OpenStreetMap". No API Key is needed by this platform.
:param string format_string: String containing '%s' where
the string to geocode should be interpolated before querying
the geocoder. For example: '%s, Mountain View, CA'. The default
is just '%s'.
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is https. Note that SSL connections' certificates are not
verified.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
:param string domain: Should be the localized Photon domain to
connect to. The default is 'photon.komoot.de', but you
can change it to a domain of your own.
"""
super(Photon, self).__init__(
format_string, scheme, timeout, proxies
)
self.domain = domain.strip('/')
self.api = "%s://%s/api" % (self.scheme, self.domain)
self.reverse_api = "%s://%s/reverse" % (self.scheme, self.domain)
def geocode(
self,
query,
exactly_one=True,
timeout=None,
location_bias=None,
language=False,
osm_tag=None
): # pylint: disable=W0221
"""
Geocode a location query.
:param string query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param location_bias: The coordinates to used as location bias.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param string language: Preferred language in which to return results.
:param osm_tag: The expression to filter (include/exclude) by key and/
or value, str as 'key:value' or list/set of str if multiple filters
are requiered as ['key:!val', '!key', ':!value']
"""
params = {
'q': self.format_string % query
}
if exactly_one:
params['limit'] = 1
if language:
params['lang'] = language
if location_bias:
try:
lat, lon = [x.strip() for x
in self._coerce_point_to_string(location_bias)
.split(',')]
params['lon'] = lon
params['lat'] = lat
except ValueError:
raise ValueError(("Location bias must be a"
" coordinate pair or Point"))
if osm_tag:
if isinstance(osm_tag, string_compare):
params['osm_tag'] = osm_tag
else:
try:
params['osm_tag'] = '&osm_tag='.join(osm_tag)
except ValueError:
raise ValueError(
"osm_tag must be a string expression or "
"a set/list of string expressions"
)
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
)
def reverse(
self,
query,
exactly_one=True,
timeout=None,
language=False,
osm_tag=None
): # pylint: disable=W0221
"""
Returns a reverse geocoded location.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param string language: Preferred language in which to return results.
:param osm_tag: The expression to filter (include/exclude) by key and/
or value, str as 'key:value' or list/set of str if multiple filters
are requiered as ['key:!val', '!key', ':!value']
"""
try:
lat, lon = [x.strip() for x in
self._coerce_point_to_string(query).split(',')]
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
'lat': lat,
'lon': lon,
}
if exactly_one:
params['limit'] = 1
if language:
params['lang'] = language
if osm_tag:
if isinstance(osm_tag, string_compare):
params['osm_tag'] = osm_tag
else:
try:
params['osm_tag'] = '&osm_tag='.join(osm_tag)
except ValueError:
raise ValueError(("osm_tag must be a string expression or "
"a set/list of string expressions"))
url = "?".join((self.reverse_api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
@classmethod
def _parse_json(cls, resources, exactly_one=True):
"""
Parse display name, latitude, and longitude from a JSON response.
"""
if not len(resources): # pragma: no cover
return None
if exactly_one:
return cls.parse_resource(resources['features'][0])
else:
return [cls.parse_resource(resource) for resource
in resources['features']]
@classmethod
def parse_resource(cls, resource):
"""
Return location and coordinates tuple from dict.
"""
name_elements = ['name', 'housenumber', 'street',
'postcode', 'street', 'city',
'state', 'country']
name = [resource.get(k) for k
in name_elements if resource.get(k)]
location = ', '.join(name)
latitude = resource['geometry']['coordinates'][1] or None
longitude = resource['geometry']['coordinates'][0] or None
if latitude and longitude:
latitude = float(latitude)
longitude = float(longitude)
return Location(location, (latitude, longitude), resource)
|
mit
|
javierTerry/odoo
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/ConvertFieldsToBraces.py
|
384
|
2324
|
#########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import unohelper
import string
import re
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from LoginTest import *
database="test"
uid = 3
class ConvertFieldsToBraces( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.aReportSyntex=[]
self.getFields()
def getFields(self):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
oPar.getAnchor().Text.insertString(oPar.getAnchor(),oPar.Items[1],False)
oPar.dispose()
if __name__<>"package":
ConvertFieldsToBraces(None)
else:
g_ImplementationHelper.addImplementation( ConvertFieldsToBraces, "org.openoffice.openerp.report.convertFB", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
emonty/ansible
|
lib/ansible/plugins/action/gather_facts.py
|
14
|
5775
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
from ansible import constants as C
from ansible.executor.module_common import get_action_args_with_defaults
from ansible.plugins.action import ActionBase
from ansible.utils.vars import combine_vars
class ActionModule(ActionBase):
def _get_module_args(self, fact_module, task_vars):
mod_args = self._task.args.copy()
# deal with 'setup specific arguments'
if fact_module != 'setup':
# network facts modules must support gather_subset
if self._connection._load_name not in ('network_cli', 'httpapi', 'netconf'):
subset = mod_args.pop('gather_subset', None)
if subset not in ('all', ['all']):
self._display.warning('Ignoring subset(%s) for %s' % (subset, fact_module))
timeout = mod_args.pop('gather_timeout', None)
if timeout is not None:
self._display.warning('Ignoring timeout(%s) for %s' % (timeout, fact_module))
fact_filter = mod_args.pop('filter', None)
if fact_filter is not None:
self._display.warning('Ignoring filter(%s) for %s' % (fact_filter, fact_module))
# Strip out keys with ``None`` values, effectively mimicking ``omit`` behavior
# This ensures we don't pass a ``None`` value as an argument expecting a specific type
mod_args = dict((k, v) for k, v in mod_args.items() if v is not None)
# handle module defaults
mod_args = get_action_args_with_defaults(fact_module, mod_args, self._task.module_defaults, self._templar)
return mod_args
def _combine_task_result(self, result, task_result):
filtered_res = {
'ansible_facts': task_result.get('ansible_facts', {}),
'warnings': task_result.get('warnings', []),
'deprecations': task_result.get('deprecations', []),
}
return combine_vars(result, filtered_res)
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
result = super(ActionModule, self).run(tmp, task_vars)
result['ansible_facts'] = {}
modules = C.config.get_config_value('FACTS_MODULES', variables=task_vars)
parallel = task_vars.pop('ansible_facts_parallel', self._task.args.pop('parallel', None))
if 'smart' in modules:
connection_map = C.config.get_config_value('CONNECTION_FACTS_MODULES', variables=task_vars)
network_os = self._task.args.get('network_os', task_vars.get('ansible_network_os', task_vars.get('ansible_facts', {}).get('network_os')))
modules.extend([connection_map.get(network_os or self._connection._load_name, 'setup')])
modules.pop(modules.index('smart'))
failed = {}
skipped = {}
if parallel is False or (len(modules) == 1 and parallel is None):
# serially execute each module
for fact_module in modules:
# just one module, no need for fancy async
mod_args = self._get_module_args(fact_module, task_vars)
res = self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=False)
if res.get('failed', False):
failed[fact_module] = res
elif res.get('skipped', False):
skipped[fact_module] = res
else:
result = self._combine_task_result(result, res)
self._remove_tmp_path(self._connection._shell.tmpdir)
else:
# do it async
jobs = {}
for fact_module in modules:
mod_args = self._get_module_args(fact_module, task_vars)
self._display.vvvv("Running %s" % fact_module)
jobs[fact_module] = (self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=True))
while jobs:
for module in jobs:
poll_args = {'jid': jobs[module]['ansible_job_id'], '_async_dir': os.path.dirname(jobs[module]['results_file'])}
res = self._execute_module(module_name='async_status', module_args=poll_args, task_vars=task_vars, wrap_async=False)
if res.get('finished', 0) == 1:
if res.get('failed', False):
failed[module] = res
elif res.get('skipped', False):
skipped[module] = res
else:
result = self._combine_task_result(result, res)
del jobs[module]
break
else:
time.sleep(0.1)
else:
time.sleep(0.5)
if skipped:
result['msg'] = "The following modules were skipped: %s\n" % (', '.join(skipped.keys()))
result['skipped_modules'] = skipped
if len(skipped) == len(modules):
result['skipped'] = True
if failed:
result['failed'] = True
result['msg'] = "The following modules failed to execute: %s\n" % (', '.join(failed.keys()))
result['failed_modules'] = failed
# tell executor facts were gathered
result['ansible_facts']['_ansible_facts_gathered'] = True
# hack to keep --verbose from showing all the setup module result
result['_ansible_verbose_override'] = True
return result
|
gpl-3.0
|
ruddra/django-oscar
|
oscar/templatetags/history_tags.py
|
6
|
1843
|
import urlparse
from django import template
from django.db.models import get_model
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import resolve, Resolver404
from oscar.apps.customer import history
Site = get_model('sites', 'Site')
register = template.Library()
@register.inclusion_tag('customer/history/recently_viewed_products.html',
takes_context=True)
def recently_viewed_products(context):
"""
Inclusion tag listing the most recently viewed products
"""
request = context['request']
products = history.get(request)
return {'products': products,
'request': request}
@register.assignment_tag(takes_context=True)
def get_back_button(context):
"""
Show back button, custom title available for different urls, for
example 'Back to search results', no back button if user came from other
site
"""
request = context.get('request', None)
if not request:
raise Exception('Cannot get request from context')
referrer = request.META.get('HTTP_REFERER', None)
if not referrer:
return None
try:
url = urlparse.urlparse(referrer)
except:
return None
if request.get_host() != url.netloc:
try:
Site.objects.get(domain=url.netloc)
except Site.DoesNotExist:
# Came from somewhere else, don't show back button:
return None
try:
match = resolve(url.path)
except Resolver404:
return None
# This dict can be extended to link back to other browsing pages
titles = {
'search:search': _('Back to search results'),
}
title = titles.get(match.view_name, None)
if title is None:
return None
return {'url': referrer, 'title': unicode(title), 'match': match}
|
bsd-3-clause
|
jeffbaumes/jeffbaumes-vtk
|
Examples/Medical/Python/Medical3.py
|
13
|
7546
|
#!/usr/bin/env python
# This example reads a volume dataset, extracts two isosurfaces that
# represent the skin and bone, creates three orthogonal planes
# (saggital, axial, coronal), and displays them.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the renderer, the render window, and the interactor. The
# renderer draws into the render window, the interactor enables mouse-
# and keyboard-based interaction with the scene.
aRenderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(aRenderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# The following reader is used to read a series of 2D slices (images)
# that compose the volume. The slice dimensions are set, and the
# pixel spacing. The data Endianness must also be specified. The reader
# usese the FilePrefix in combination with the slice number to construct
# filenames using the format FilePrefix.%d. (In this case the FilePrefix
# is the root name of the file: quarter.)
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64, 64)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix(VTK_DATA_ROOT + "/Data/headsq/quarter")
v16.SetImageRange(1, 93)
v16.SetDataSpacing(3.2, 3.2, 1.5)
# An isosurface, or contour value of 500 is known to correspond to the
# skin of the patient. Once generated, a vtkPolyDataNormals filter is
# is used to create normals for smooth surface shading during rendering.
# The triangle stripper is used to create triangle strips from the
# isosurface these render much faster on may systems.
skinExtractor = vtk.vtkContourFilter()
skinExtractor.SetInputConnection(v16.GetOutputPort())
skinExtractor.SetValue(0, 500)
skinNormals = vtk.vtkPolyDataNormals()
skinNormals.SetInputConnection(skinExtractor.GetOutputPort())
skinNormals.SetFeatureAngle(60.0)
skinStripper = vtk.vtkStripper()
skinStripper.SetInputConnection(skinNormals.GetOutputPort())
skinMapper = vtk.vtkPolyDataMapper()
skinMapper.SetInputConnection(skinStripper.GetOutputPort())
skinMapper.ScalarVisibilityOff()
skin = vtk.vtkActor()
skin.SetMapper(skinMapper)
skin.GetProperty().SetDiffuseColor(1, .49, .25)
skin.GetProperty().SetSpecular(.3)
skin.GetProperty().SetSpecularPower(20)
# An isosurface, or contour value of 1150 is known to correspond to the
# skin of the patient. Once generated, a vtkPolyDataNormals filter is
# is used to create normals for smooth surface shading during rendering.
# The triangle stripper is used to create triangle strips from the
# isosurface these render much faster on may systems.
boneExtractor = vtk.vtkContourFilter()
boneExtractor.SetInputConnection(v16.GetOutputPort())
boneExtractor.SetValue(0, 1150)
boneNormals = vtk.vtkPolyDataNormals()
boneNormals.SetInputConnection(boneExtractor.GetOutputPort())
boneNormals.SetFeatureAngle(60.0)
boneStripper = vtk.vtkStripper()
boneStripper.SetInputConnection(boneNormals.GetOutputPort())
boneMapper = vtk.vtkPolyDataMapper()
boneMapper.SetInputConnection(boneStripper.GetOutputPort())
boneMapper.ScalarVisibilityOff()
bone = vtk.vtkActor()
bone.SetMapper(boneMapper)
bone.GetProperty().SetDiffuseColor(1, 1, .9412)
# An outline provides context around the data.
outlineData = vtk.vtkOutlineFilter()
outlineData.SetInputConnection(v16.GetOutputPort())
mapOutline = vtk.vtkPolyDataMapper()
mapOutline.SetInputConnection(outlineData.GetOutputPort())
outline = vtk.vtkActor()
outline.SetMapper(mapOutline)
outline.GetProperty().SetColor(0, 0, 0)
# Now we are creating three orthogonal planes passing through the
# volume. Each plane uses a different texture map and therefore has
# diferent coloration.
# Start by creatin a black/white lookup table.
bwLut = vtk.vtkLookupTable()
bwLut.SetTableRange(0, 2000)
bwLut.SetSaturationRange(0, 0)
bwLut.SetHueRange(0, 0)
bwLut.SetValueRange(0, 1)
bwLut.Build()
# Now create a lookup table that consists of the full hue circle (from
# HSV).
hueLut = vtk.vtkLookupTable()
hueLut.SetTableRange(0, 2000)
hueLut.SetHueRange(0, 1)
hueLut.SetSaturationRange(1, 1)
hueLut.SetValueRange(1, 1)
hueLut.Build()
# Finally, create a lookup table with a single hue but having a range
# in the saturation of the hue.
satLut = vtk.vtkLookupTable()
satLut.SetTableRange(0, 2000)
satLut.SetHueRange(.6, .6)
satLut.SetSaturationRange(0, 1)
satLut.SetValueRange(1, 1)
satLut.Build()
# Create the first of the three planes. The filter vtkImageMapToColors
# maps the data through the corresponding lookup table created above.
# The vtkImageActor is a type of vtkProp and conveniently displays an
# image on a single quadrilateral plane. It does this using texture
# mapping and as a result is quite fast. (Note: the input image has to
# be unsigned char values, which the vtkImageMapToColors produces.)
# Note also that by specifying the DisplayExtent, the pipeline
# requests data of this extent and the vtkImageMapToColors only
# processes a slice of data.
saggitalColors = vtk.vtkImageMapToColors()
saggitalColors.SetInputConnection(v16.GetOutputPort())
saggitalColors.SetLookupTable(bwLut)
saggital = vtk.vtkImageActor()
saggital.SetInput(saggitalColors.GetOutput())
saggital.SetDisplayExtent(32, 32, 0, 63, 0, 92)
# Create the second (axial) plane of the three planes. We use the same
# approach as before except that the extent differs.
axialColors = vtk.vtkImageMapToColors()
axialColors.SetInputConnection(v16.GetOutputPort())
axialColors.SetLookupTable(hueLut)
axial = vtk.vtkImageActor()
axial.SetInput(axialColors.GetOutput())
axial.SetDisplayExtent(0, 63, 0, 63, 46, 46)
# Create the third (coronal) plane of the three planes. We use the same
# approach as before except that the extent differs.
coronalColors = vtk.vtkImageMapToColors()
coronalColors.SetInputConnection(v16.GetOutputPort())
coronalColors.SetLookupTable(satLut)
coronal = vtk.vtkImageActor()
coronal.SetInput(coronalColors.GetOutput())
coronal.SetDisplayExtent(0, 63, 32, 32, 0, 92)
# It is convenient to create an initial view of the data. The FocalPoint
# and Position form a vector direction. Later on (ResetCamera() method)
# this vector is used to position the camera to look at the data in
# this direction.
aCamera = vtk.vtkCamera()
aCamera.SetViewUp(0, 0, -1)
aCamera.SetPosition(0, 1, 0)
aCamera.SetFocalPoint(0, 0, 0)
aCamera.ComputeViewPlaneNormal()
# Actors are added to the renderer.
aRenderer.AddActor(outline)
aRenderer.AddActor(saggital)
aRenderer.AddActor(axial)
aRenderer.AddActor(coronal)
#aRenderer.AddActor(axial)
#aRenderer.AddActor(coronal)
aRenderer.AddActor(skin)
aRenderer.AddActor(bone)
# Turn off bone for this example.
bone.VisibilityOff()
# Set skin to semi-transparent.
skin.GetProperty().SetOpacity(0.5)
# An initial camera view is created. The Dolly() method moves
# the camera towards the FocalPoint, thereby enlarging the image.
aRenderer.SetActiveCamera(aCamera)
aRenderer.ResetCamera()
aCamera.Dolly(1.5)
# Set a background color for the renderer and set the size of the
# render window (expressed in pixels).
aRenderer.SetBackground(1, 1, 1)
renWin.SetSize(640, 480)
# Note that when camera movement occurs (as it does in the Dolly()
# method), the clipping planes often need adjusting. Clipping planes
# consist of two planes: near and far along the view direction. The
# near plane clips out objects in front of the plane the far plane
# clips out objects behind the plane. This way only what is drawn
# between the planes is actually rendered.
aRenderer.ResetCameraClippingRange()
# Interact with the data.
iren.Initialize()
renWin.Render()
iren.Start()
|
bsd-3-clause
|
hynek/pyopenssl
|
doc/conf.py
|
4
|
8243
|
# -*- coding: utf-8 -*-
#
# pyOpenSSL documentation build configuration file, created by
# sphinx-quickstart on Sat Jul 16 07:12:22 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import codecs
import os
import re
import sys
HERE = os.path.abspath(os.path.dirname(__file__))
def read_file(*parts):
"""
Build an absolute path from *parts* and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "ascii") as f:
return f.read()
def find_version(*file_paths):
version_file = read_file(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
DOC_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(DOC_DIR, "..")))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyOpenSSL'
authors = u"The pyOpenSSL developers"
copyright = u"2001 " + authors
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = find_version("..", "src", "OpenSSL", "version.py")
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyOpenSSLdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyOpenSSL.tex', u'pyOpenSSL Documentation',
authors, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyopenssl', u'pyOpenSSL Documentation',
[authors], 1)
]
intersphinx_mapping = {
"https://docs.python.org/3": None,
"https://cryptography.io/en/latest/": None,
}
|
apache-2.0
|
apixandru/intellij-community
|
python/lib/Lib/site-packages/django/utils/tree.py
|
310
|
5778
|
"""
A class for storing a tree graph. Primarily used for filter constructs in the
ORM.
"""
from django.utils.copycompat import deepcopy
class Node(object):
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = 'DEFAULT'
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
Warning: You probably don't want to pass in the 'negated' parameter. It
is NOT the same as constructing a node and calling negate() on the
result.
"""
self.children = children and children[:] or []
self.connector = connector or self.default
self.subtree_parents = []
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
_new_instance = classmethod(_new_instance)
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c
in self.children]))
return '(%s: %s)' % (self.connector, ', '.join([str(c) for c in
self.children]))
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = deepcopy(self.children, memodict)
obj.subtree_parents = deepcopy(self.subtree_parents, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __nonzero__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, node, conn_type):
"""
Adds a new node to the tree. If the conn_type is the same as the root's
current connector type, the node is added to the first level.
Otherwise, the whole tree is pushed down one level and a new root
connector is created, connecting the existing tree and the new node.
"""
if node in self.children and conn_type == self.connector:
return
if len(self.children) < 2:
self.connector = conn_type
if self.connector == conn_type:
if isinstance(node, Node) and (node.connector == conn_type or
len(node) == 1):
self.children.extend(node.children)
else:
self.children.append(node)
else:
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, node]
def negate(self):
"""
Negate the sense of the root connector. This reorganises the children
so that the current node has a single child: a negated node containing
all the previous children. This slightly odd construction makes adding
new children behave more intuitively.
Interpreting the meaning of this negate is up to client code. This
method is useful for implementing "not" arrangements.
"""
self.children = [self._new_instance(self.children, self.connector,
not self.negated)]
self.connector = self.default
def start_subtree(self, conn_type):
"""
Sets up internal state so that new nodes are added to a subtree of the
current node. The conn_type specifies how the sub-tree is joined to the
existing children.
"""
if len(self.children) == 1:
self.connector = conn_type
elif self.connector != conn_type:
self.children = [self._new_instance(self.children, self.connector,
self.negated)]
self.connector = conn_type
self.negated = False
self.subtree_parents.append(self.__class__(self.children,
self.connector, self.negated))
self.connector = self.default
self.negated = False
self.children = []
def end_subtree(self):
"""
Closes off the most recently unmatched start_subtree() call.
This puts the current state into a node of the parent tree and returns
the current instances state to be the parent.
"""
obj = self.subtree_parents.pop()
node = self.__class__(self.children, self.connector)
self.connector = obj.connector
self.negated = obj.negated
self.children = obj.children
self.children.append(node)
|
apache-2.0
|
inspyration/odoo
|
addons/portal/wizard/share_wizard.py
|
158
|
9630
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
UID_ROOT = 1
SHARED_DOCS_MENU = "Documents"
SHARED_DOCS_CHILD_MENU = "Shared Documents"
class share_wizard_portal(osv.TransientModel):
"""Inherited share wizard to automatically create appropriate
menus in the selected portal upon sharing with a portal group."""
_inherit = "share.wizard"
def _user_type_selection(self, cr, uid, context=None):
selection = super(share_wizard_portal, self)._user_type_selection(cr, uid, context=context)
selection.extend([('existing',_('Users you already shared with')),
('groups',_('Existing Groups (e.g Portal Groups)'))])
return selection
_columns = {
'user_ids': fields.many2many('res.users', 'share_wizard_res_user_rel', 'share_id', 'user_id', 'Existing users', domain=[('share', '=', True)]),
'group_ids': fields.many2many('res.groups', 'share_wizard_res_group_rel', 'share_id', 'group_id', 'Existing groups', domain=[('share', '=', False)]),
}
def _check_preconditions(self, cr, uid, wizard_data, context=None):
if wizard_data.user_type == 'existing':
self._assert(wizard_data.user_ids,
_('Please select at least one user to share with'),
context=context)
elif wizard_data.user_type == 'groups':
self._assert(wizard_data.group_ids,
_('Please select at least one group to share with'),
context=context)
return super(share_wizard_portal, self)._check_preconditions(cr, uid, wizard_data, context=context)
def _create_or_get_submenu_named(self, cr, uid, parent_menu_id, menu_name, context=None):
if not parent_menu_id:
return
Menus = self.pool.get('ir.ui.menu')
parent_menu = Menus.browse(cr, uid, parent_menu_id) # No context
menu_id = None
max_seq = 10
for child_menu in parent_menu.child_id:
max_seq = max(max_seq, child_menu.sequence)
if child_menu.name == menu_name:
menu_id = child_menu.id
break
if not menu_id:
# not found, create it
menu_id = Menus.create(cr, UID_ROOT,
{'name': menu_name,
'parent_id': parent_menu.id,
'sequence': max_seq + 10, # at the bottom
})
return menu_id
def _sharing_root_menu_id(self, cr, uid, portal, context=None):
"""Create or retrieve root ID of sharing menu in portal menu
:param portal: browse_record of portal, constructed with a context WITHOUT language
"""
parent_menu_id = self._create_or_get_submenu_named(cr, uid, portal.parent_menu_id.id, SHARED_DOCS_MENU, context=context)
if parent_menu_id:
child_menu_id = self._create_or_get_submenu_named(cr, uid, parent_menu_id, SHARED_DOCS_CHILD_MENU, context=context)
return child_menu_id
def _create_shared_data_menu(self, cr, uid, wizard_data, portal, context=None):
"""Create sharing menus in portal menu according to share wizard options.
:param wizard_data: browse_record of share.wizard
:param portal: browse_record of portal, constructed with a context WITHOUT language
"""
root_menu_id = self._sharing_root_menu_id(cr, uid, portal, context=context)
if not root_menu_id:
# no specific parent menu, cannot create the sharing menu at all.
return
# Create the shared action and menu
action_def = self._shared_action_def(cr, uid, wizard_data, context=None)
action_id = self.pool.get('ir.actions.act_window').create(cr, UID_ROOT, action_def)
menu_data = {'name': action_def['name'],
'sequence': 10,
'action': 'ir.actions.act_window,'+str(action_id),
'parent_id': root_menu_id,
'icon': 'STOCK_JUSTIFY_FILL'}
menu_id = self.pool.get('ir.ui.menu').create(cr, UID_ROOT, menu_data)
return menu_id
def _create_share_users_group(self, cr, uid, wizard_data, context=None):
# Override of super() to handle the possibly selected "existing users"
# and "existing groups".
# In both cases, we call super() to create the share group, but when
# sharing with existing groups, we will later delete it, and copy its
# access rights and rules to the selected groups.
super_result = super(share_wizard_portal,self)._create_share_users_group(cr, uid, wizard_data, context=context)
# For sharing with existing groups, we don't create a share group, instead we'll
# alter the rules of the groups so they can see the shared data
if wizard_data.group_ids:
# get the list of portals and the related groups to install their menus.
res_groups = self.pool.get('res.groups')
all_portal_group_ids = res_groups.search(cr, UID_ROOT, [('is_portal', '=', True)])
# populate result lines with the users of each group and
# setup the menu for portal groups
for group in wizard_data.group_ids:
if group.id in all_portal_group_ids:
self._create_shared_data_menu(cr, uid, wizard_data, group.id, context=context)
for user in group.users:
new_line = {'user_id': user.id,
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
elif wizard_data.user_ids:
# must take care of existing users, by adding them to the new group, which is super_result[0],
# and adding the shortcut
selected_user_ids = [x.id for x in wizard_data.user_ids]
self.pool.get('res.users').write(cr, UID_ROOT, selected_user_ids, {'groups_id': [(4, super_result[0])]})
self._setup_action_and_shortcut(cr, uid, wizard_data, selected_user_ids, make_home=False, context=context)
# populate the result lines for existing users too
for user in wizard_data.user_ids:
new_line = { 'user_id': user.id,
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
return super_result
def copy_share_group_access_and_delete(self, cr, wizard_data, share_group_id, context=None):
# In the case of sharing with existing groups, the strategy is to copy
# access rights and rules from the share group, so that we can
if not wizard_data.group_ids: return
Groups = self.pool.get('res.groups')
Rules = self.pool.get('ir.rule')
Rights = self.pool.get('ir.model.access')
share_group = Groups.browse(cr, UID_ROOT, share_group_id)
share_rule_ids = [r.id for r in share_group.rule_groups]
for target_group in wizard_data.group_ids:
# Link the rules to the group. This is appropriate because as of
# v6.1, the algorithm for combining them will OR the rules, hence
# extending the visible data.
Rules.write(cr, UID_ROOT, share_rule_ids, {'groups': [(4,target_group.id)]})
_logger.debug("Linked sharing rules from temporary sharing group to group %s", target_group)
# Copy the access rights. This is appropriate too because
# groups have the UNION of all permissions granted by their
# access right lines.
for access_line in share_group.model_access:
Rights.copy(cr, UID_ROOT, access_line.id, default={'group_id': target_group.id})
_logger.debug("Copied access rights from temporary sharing group to group %s", target_group)
# finally, delete it after removing its users
Groups.write(cr, UID_ROOT, [share_group_id], {'users': [(6,0,[])]})
Groups.unlink(cr, UID_ROOT, [share_group_id])
_logger.debug("Deleted temporary sharing group %s", share_group_id)
def _finish_result_lines(self, cr, uid, wizard_data, share_group_id, context=None):
super(share_wizard_portal,self)._finish_result_lines(cr, uid, wizard_data, share_group_id, context=context)
self.copy_share_group_access_and_delete(cr, wizard_data, share_group_id, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/django/contrib/gis/db/backends/postgis/const.py
|
528
|
1484
|
"""
PostGIS to GDAL conversion constant definitions
"""
# Lookup to convert pixel type values from GDAL to PostGIS
GDAL_TO_POSTGIS = [None, 4, 6, 5, 8, 7, 10, 11, None, None, None, None]
# Lookup to convert pixel type values from PostGIS to GDAL
POSTGIS_TO_GDAL = [1, 1, 1, 3, 1, 3, 2, 5, 4, None, 6, 7, None, None]
# Struct pack structure for raster header, the raster header has the
# following structure:
#
# Endianness, PostGIS raster version, number of bands, scale, origin,
# skew, srid, width, and height.
#
# Scale, origin, and skew have x and y values. PostGIS currently uses
# a fixed endianness (1) and there is only one version (0).
POSTGIS_HEADER_STRUCTURE = 'B H H d d d d d d i H H'
# Lookup values to convert GDAL pixel types to struct characters. This is
# used to pack and unpack the pixel values of PostGIS raster bands.
GDAL_TO_STRUCT = [
None, 'B', 'H', 'h', 'L', 'l', 'f', 'd',
None, None, None, None,
]
# Size of the packed value in bytes for different numerical types.
# This is needed to cut chunks of band data out of PostGIS raster strings
# when decomposing them into GDALRasters.
# See https://docs.python.org/3/library/struct.html#format-characters
STRUCT_SIZE = {
'b': 1, # Signed char
'B': 1, # Unsigned char
'?': 1, # _Bool
'h': 2, # Short
'H': 2, # Unsigned short
'i': 4, # Integer
'I': 4, # Unsigned Integer
'l': 4, # Long
'L': 4, # Unsigned Long
'f': 4, # Float
'd': 8, # Double
}
|
gpl-3.0
|
JerryLead/spark
|
examples/src/main/python/ml/decision_tree_classification_example.py
|
123
|
3003
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decision Tree Classification Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("DecisionTreeClassificationExample")\
.getOrCreate()
# $example on$
# Load the data stored in LIBSVM format as a DataFrame.
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
# Automatically identify categorical features, and index them.
# We specify maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
dt = DecisionTreeClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
# Chain indexers and tree in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, dt])
# Train model. This also runs the indexers.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
predictions.select("prediction", "indexedLabel", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g " % (1.0 - accuracy))
treeModel = model.stages[2]
# summary only
print(treeModel)
# $example off$
spark.stop()
|
apache-2.0
|
mrosenbladt/yaml-cpp.new-api
|
test/gmock-1.7.0/test/gmock_leak_test.py
|
779
|
4384
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests that leaked mock objects can be caught be Google Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gmock_test_utils
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_')
TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*']
TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*']
TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*']
environ = gmock_test_utils.environ
SetEnvVar = gmock_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gmock_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
class GMockLeakTest(gmock_test_utils.TestCase):
def testCatchesLeakedMockByDefault(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL,
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL,
env=environ).exit_code)
def testDoesNotCatchLeakedMockWhenDisabled(self):
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabled(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=1'],
env=environ).exit_code)
def testCatchesMultipleLeakedMocks(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
if __name__ == '__main__':
gmock_test_utils.Main()
|
mit
|
thecodinghub/news-for-good
|
news/Lib/fnmatch.py
|
27
|
3166
|
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return r'(?s:%s)\Z' % res
|
bsd-3-clause
|
codeforamerica/skillcamp
|
ENV/lib/python2.7/site-packages/gunicorn/http/unreader.py
|
153
|
2024
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
from gunicorn import six
# Classes that can undo reading data from
# a given type of data source.
class Unreader(object):
def __init__(self):
self.buf = six.BytesIO()
def chunk(self):
raise NotImplementedError()
def read(self, size=None):
if size is not None and not isinstance(size, six.integer_types):
raise TypeError("size parameter must be an int or long.")
if size is not None:
if size == 0:
return b""
if size < 0:
size = None
self.buf.seek(0, os.SEEK_END)
if size is None and self.buf.tell():
ret = self.buf.getvalue()
self.buf = six.BytesIO()
return ret
if size is None:
d = self.chunk()
return d
while self.buf.tell() < size:
chunk = self.chunk()
if not len(chunk):
ret = self.buf.getvalue()
self.buf = six.BytesIO()
return ret
self.buf.write(chunk)
data = self.buf.getvalue()
self.buf = six.BytesIO()
self.buf.write(data[size:])
return data[:size]
def unread(self, data):
self.buf.seek(0, os.SEEK_END)
self.buf.write(data)
class SocketUnreader(Unreader):
def __init__(self, sock, max_chunk=8192):
super(SocketUnreader, self).__init__()
self.sock = sock
self.mxchunk = max_chunk
def chunk(self):
return self.sock.recv(self.mxchunk)
class IterUnreader(Unreader):
def __init__(self, iterable):
super(IterUnreader, self).__init__()
self.iter = iter(iterable)
def chunk(self):
if not self.iter:
return b""
try:
return six.next(self.iter)
except StopIteration:
self.iter = None
return b""
|
mit
|
Friday811/tf-tacyt
|
authorization/Response.py
|
2
|
2184
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This library offers an API to use Tacyt in a python environment.
Copyright (C) 2015 Eleven Paths
'''
import json
from authorization import Error
class Response(object):
'''
This class models a response from any of the endpoints in the Tacyt API.
It consists of a "data" and an "error" elements. Although normally only one of them will be
present, they are not mutually exclusive, since errors can be non fatal, and therefore a response
could have valid information in the data field and at the same time inform of an error.
'''
def __init__(self, data=None, error=None, json_string=None ):
'''
@param $json a json string received from one of the methods of the Tacyt API
'''
self.data = data
self.error = error
if json_string is not None:
json_object = json.loads(json_string)
if "data" in json_object:
self.data = json_object["data"]
else:
self.data = ""
if "error" in json_object:
self.error = Error.Error(json_object["error"])
else:
self.error = ""
def get_data(self):
'''
@return JsonObject the data part of the API response
'''
return self.data
def set_data(self, data):
'''
@param $data the data to include in the API response
'''
self.data = json.loads(data)
def get_error(self):
'''
@return Error the error part of the API response, consisting of an error code and an error message
'''
return self.error
def set_error(self, error):
'''
@param $error an error to include in the API response
'''
self.error = Error.Error(error)
def to_json(self):
'''
@return a Json object with the data and error parts set if they exist
'''
json_response = {}
if hasattr(self, "data"):
json_response["data"] = self.data
if hasattr(self, "error"):
json_response["error"] = self.error
return json_response
|
lgpl-2.1
|
hzy001/ansible
|
v1/ansible/runner/connection_plugins/accelerate.py
|
109
|
15527
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import base64
import socket
import struct
import time
from ansible.callbacks import vvv, vvvv
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.runner.connection_plugins.ssh import Connection as SSHConnection
from ansible.runner.connection_plugins.paramiko_ssh import Connection as ParamikoConnection
from ansible import utils
from ansible import constants
# the chunk size to read and send, assuming mtu 1500 and
# leaving room for base64 (+33%) encoding and header (8 bytes)
# ((1400-8)/4)*3) = 1044
# which leaves room for the TCP/IP header. We set this to a
# multiple of the value to speed up file reads.
CHUNK_SIZE=1044*20
class Connection(object):
''' raw socket accelerated connection '''
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
self.runner = runner
self.host = host
self.context = None
self.conn = None
self.user = user
self.key = utils.key_for_hostname(host)
self.port = port[0]
self.accport = port[1]
self.is_connected = False
self.has_pipelining = False
self.become_methods_supported=['sudo']
if not self.port:
self.port = constants.DEFAULT_REMOTE_PORT
elif not isinstance(self.port, int):
self.port = int(self.port)
if not self.accport:
self.accport = constants.ACCELERATE_PORT
elif not isinstance(self.accport, int):
self.accport = int(self.accport)
if self.runner.original_transport == "paramiko":
self.ssh = ParamikoConnection(
runner=self.runner,
host=self.host,
port=self.port,
user=self.user,
password=password,
private_key_file=private_key_file
)
else:
self.ssh = SSHConnection(
runner=self.runner,
host=self.host,
port=self.port,
user=self.user,
password=password,
private_key_file=private_key_file
)
if not getattr(self.ssh, 'shell', None):
self.ssh.shell = utils.plugins.shell_loader.get('sh')
# attempt to work around shared-memory funness
if getattr(self.runner, 'aes_keys', None):
utils.AES_KEYS = self.runner.aes_keys
def _execute_accelerate_module(self):
args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (
base64.b64encode(self.key.__str__()),
str(self.accport),
constants.ACCELERATE_DAEMON_TIMEOUT,
int(utils.VERBOSITY),
self.runner.accelerate_ipv6,
)
if constants.ACCELERATE_MULTI_KEY:
args += " multi_key=yes"
inject = dict(password=self.key)
if getattr(self.runner, 'accelerate_inventory_host', False):
inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.runner.accelerate_inventory_host))
else:
inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
vvvv("attempting to start up the accelerate daemon...")
self.ssh.connect()
tmp_path = self.runner._make_tmp_path(self.ssh)
return self.runner._execute_module(self.ssh, tmp_path, 'accelerate', args, inject=inject)
def connect(self, allow_ssh=True):
''' activates the connection object '''
try:
if not self.is_connected:
wrong_user = False
tries = 3
self.conn = socket.socket()
self.conn.settimeout(constants.ACCELERATE_CONNECT_TIMEOUT)
vvvv("attempting connection to %s via the accelerated port %d" % (self.host,self.accport))
while tries > 0:
try:
self.conn.connect((self.host,self.accport))
break
except socket.error:
vvvv("connection to %s failed, retrying..." % self.host)
time.sleep(0.1)
tries -= 1
if tries == 0:
vvv("Could not connect via the accelerated connection, exceeded # of tries")
raise AnsibleError("FAILED")
elif wrong_user:
vvv("Restarting daemon with a different remote_user")
raise AnsibleError("WRONG_USER")
self.conn.settimeout(constants.ACCELERATE_TIMEOUT)
if not self.validate_user():
# the accelerated daemon was started with a
# different remote_user. The above command
# should have caused the accelerate daemon to
# shutdown, so we'll reconnect.
wrong_user = True
except AnsibleError, e:
if allow_ssh:
if "WRONG_USER" in e:
vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host)
time.sleep(5)
vvv("Falling back to ssh to startup accelerated mode")
res = self._execute_accelerate_module()
if not res.is_successful():
raise AnsibleError("Failed to launch the accelerated daemon on %s (reason: %s)" % (self.host,res.result.get('msg')))
return self.connect(allow_ssh=False)
else:
raise AnsibleError("Failed to connect to %s:%s" % (self.host,self.accport))
self.is_connected = True
return self
def send_data(self, data):
packed_len = struct.pack('!Q',len(data))
return self.conn.sendall(packed_len + data)
def recv_data(self):
header_len = 8 # size of a packed unsigned long long
data = b""
try:
vvvv("%s: in recv_data(), waiting for the header" % self.host)
while len(data) < header_len:
d = self.conn.recv(header_len - len(data))
if not d:
vvvv("%s: received nothing, bailing out" % self.host)
return None
data += d
vvvv("%s: got the header, unpacking" % self.host)
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
vvvv("%s: data received so far (expecting %d): %d" % (self.host,data_len,len(data)))
while len(data) < data_len:
d = self.conn.recv(data_len - len(data))
if not d:
vvvv("%s: received nothing, bailing out" % self.host)
return None
vvvv("%s: received %d bytes" % (self.host, len(d)))
data += d
vvvv("%s: received all of the data, returning" % self.host)
return data
except socket.timeout:
raise AnsibleError("timed out while waiting to receive data")
def validate_user(self):
'''
Checks the remote uid of the accelerated daemon vs. the
one specified for this play and will cause the accel
daemon to exit if they don't match
'''
vvvv("%s: sending request for validate_user" % self.host)
data = dict(
mode='validate_user',
username=self.user,
)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self.host)
vvvv("%s: waiting for validate_user response" % self.host)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if "pong" in response:
# it's a keepalive, go back to waiting
vvvv("%s: received a keepalive packet" % self.host)
continue
else:
vvvv("%s: received the validate_user response: %s" % (self.host, response))
break
if response.get('failed'):
return False
else:
return response.get('rc') == 0
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
if executable == "":
executable = constants.DEFAULT_EXECUTABLE
if self.runner.become and sudoable:
cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
vvv("EXEC COMMAND %s" % cmd)
data = dict(
mode='command',
cmd=cmd,
tmp_path=tmp_path,
executable=executable,
)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self.host)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if "pong" in response:
# it's a keepalive, go back to waiting
vvvv("%s: received a keepalive packet" % self.host)
continue
else:
vvvv("%s: received the response" % self.host)
break
return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
fd = file(in_path, 'rb')
fstat = os.stat(in_path)
try:
vvv("PUT file is %d bytes" % fstat.st_size)
last = False
while fd.tell() <= fstat.st_size and not last:
vvvv("file position currently %ld, file size is %ld" % (fd.tell(), fstat.st_size))
data = fd.read(CHUNK_SIZE)
if fd.tell() >= fstat.st_size:
last = True
data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last)
if self.runner.become:
data['user'] = self.runner.become_user
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send the file to %s" % self.host)
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
finally:
fd.close()
vvvv("waiting for final response after PUT")
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
data = dict(mode='fetch', in_path=in_path)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to initiate the file fetch with %s" % self.host)
fh = open(out_path, "w")
try:
bytes = 0
while True:
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if response.get('failed', False):
raise AnsibleError("Error during file fetch, aborting")
out = base64.b64decode(response['data'])
fh.write(out)
bytes += len(out)
# send an empty response back to signify we
# received the last chunk without errors
data = utils.jsonify(dict())
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send ack during file fetch")
if response.get('last', False):
break
finally:
# we don't currently care about this final response,
# we just receive it and drop it. It may be used at some
# point in the future or we may just have the put/fetch
# operations not send back a final response at all
response = self.recv_data()
vvv("FETCH wrote %d bytes to %s" % (bytes, out_path))
fh.close()
def close(self):
''' terminate the connection '''
# Be a good citizen
try:
self.conn.close()
except:
pass
|
gpl-3.0
|
ramiro/scrapy
|
tests/test_http_cookies.py
|
38
|
2178
|
from six.moves.urllib.parse import urlparse
from unittest import TestCase
from scrapy.http import Request, Response
from scrapy.http.cookies import WrappedRequest, WrappedResponse
class WrappedRequestTest(TestCase):
def setUp(self):
self.request = Request("http://www.example.com/page.html", \
headers={"Content-Type": "text/html"})
self.wrapped = WrappedRequest(self.request)
def test_get_full_url(self):
self.assertEqual(self.wrapped.get_full_url(), self.request.url)
def test_get_host(self):
self.assertEqual(self.wrapped.get_host(), urlparse(self.request.url).netloc)
def test_get_type(self):
self.assertEqual(self.wrapped.get_type(), urlparse(self.request.url).scheme)
def test_is_unverifiable(self):
self.assertFalse(self.wrapped.is_unverifiable())
def test_is_unverifiable2(self):
self.request.meta['is_unverifiable'] = True
self.assertTrue(self.wrapped.is_unverifiable())
def test_get_origin_req_host(self):
self.assertEqual(self.wrapped.get_origin_req_host(), 'www.example.com')
def test_has_header(self):
self.assertTrue(self.wrapped.has_header('content-type'))
self.assertFalse(self.wrapped.has_header('xxxxx'))
def test_get_header(self):
self.assertEqual(self.wrapped.get_header('content-type'), 'text/html')
self.assertEqual(self.wrapped.get_header('xxxxx', 'def'), 'def')
def test_header_items(self):
self.assertEqual(self.wrapped.header_items(), [('Content-Type', ['text/html'])])
def test_add_unredirected_header(self):
self.wrapped.add_unredirected_header('hello', 'world')
self.assertEqual(self.request.headers['hello'], 'world')
class WrappedResponseTest(TestCase):
def setUp(self):
self.response = Response("http://www.example.com/page.html",
headers={"Content-TYpe": "text/html"})
self.wrapped = WrappedResponse(self.response)
def test_info(self):
self.assert_(self.wrapped.info() is self.wrapped)
def test_getheaders(self):
self.assertEqual(self.wrapped.getheaders('content-type'), ['text/html'])
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.