text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from tuskar.templates import namespace
class NamespaceTests(unittest.TestCase):
def test_apply_template_namespace(self):
namespaced = namespace.apply_template_namespace('test-ns', 'test-name')
self.assertEqual(namespaced, 'test-ns::test-name')
self.assertTrue(namespace.matches_template_namespace('test-ns',
namespaced))
def test_remove_template_namespace(self):
stripped = namespace.remove_template_namespace('test-ns::test-name')
self.assertEqual(stripped, 'test-name')
def test_matches_template_namespace(self):
value = 'test-ns::test-name'
self.assertTrue(namespace.matches_template_namespace('test-ns', value))
self.assertFalse(namespace.matches_template_namespace('fake', value))
def test_apply_resource_alias_namespace(self):
namespaced = namespace.apply_resource_alias_namespace('compute')
self.assertEqual(namespaced, 'Tuskar::compute')
def test_remove_resource_alias_namespace(self):
stripped = namespace.remove_resource_alias_namespace(
'Tuskar::controller')
self.assertEqual(stripped, 'controller')
|
rdo-management/tuskar
|
tuskar/tests/templates/test_namespace.py
|
Python
|
apache-2.0
| 1,765 | 0 |
from unittest import TestCase
from mock import Mock
from cloudshell.cp.openstack.domain.services.nova.nova_instance_service import NovaInstanceService
import cloudshell.cp.openstack.domain.services.nova.nova_instance_service as test_nova_instance_service
from cloudshell.cp.openstack.common.driver_helper import CloudshellDriverHelper
from cloudshell.cp.openstack.models.exceptions import CommandCancellationException, InstanceErrorStateException
class TestNovaInstanceService(TestCase):
def setUp(self):
instance_waiter = Mock()
instance_waiter.wait = Mock()
instance_waiter.ACTIVE = 'ACTIVE'
self.instance_service = NovaInstanceService(instance_waiter=instance_waiter)
self.mock_logger = Mock()
self.openstack_session = Mock()
def test_instance_create_empty_openstack_session(self):
test_name = 'test'
result = self.instance_service.create_instance(openstack_session=None,
name=test_name,
reservation=Mock(),
cp_resource_model=Mock(),
deploy_req_model=Mock(),
cancellation_context=Mock(),
logger=self.mock_logger)
self.assertEqual(result, None)
def test_instance_create_success(self):
test_name = 'test'
CloudshellDriverHelper.get_uuid = Mock(return_value='1234')
test_uniq_name = 'test-1234'
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
# mock_client.Client = Mock(return_vaule=mock_client2)
mock_image = Mock()
mock_flavor = Mock()
mock_client2.images.find = Mock(return_value=mock_image)
mock_client2.flavors.find = Mock(return_value=mock_flavor)
mock_deploy_req_model = Mock()
mock_deploy_req_model.affinity_group_id = ''
mock_deploy_req_model.availability_zone = 'test-avail-zone'
test_nova_instance_service.udev_rules_sh_str = 'test_userdata'
mock_cp_resource_model = Mock()
mock_cp_resource_model.qs_mgmt_os_net_uuid = '1234'
mock_cancellation_context = Mock()
mock_client2.servers = Mock()
mocked_inst = Mock()
mock_client2.servers.create = Mock(return_value=mocked_inst)
mock_qnet_dict = {'net-id': mock_cp_resource_model.qs_mgmt_os_net_uuid}
result = self.instance_service.create_instance(openstack_session=self.openstack_session,
name=test_name,
reservation=Mock(),
cp_resource_model=mock_cp_resource_model,
deploy_req_model=mock_deploy_req_model,
cancellation_context=mock_cancellation_context,
logger=self.mock_logger)
mock_client2.servers.create.assert_called_with(name=test_uniq_name,
image=mock_image,
flavor=mock_flavor,
availability_zone='test-avail-zone',
userdata='test_userdata',
nics=[mock_qnet_dict])
self.assertEquals(result, mocked_inst)
self.instance_service.instance_waiter.wait.assert_called_with(mocked_inst,
state=self.instance_service.instance_waiter.ACTIVE,
cancellation_context=mock_cancellation_context,
logger=self.mock_logger)
def test_instance_create_cancellation_called(self):
test_name = 'test'
CloudshellDriverHelper.get_uuid = Mock(return_value='1234')
test_uniq_name = 'test-1234'
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
# mock_client.Client = Mock(return_vaule=mock_client2)
mock_image = Mock()
mock_flavor = Mock()
mock_client2.images.find = Mock(return_value=mock_image)
mock_client2.flavors.find = Mock(return_value=mock_flavor)
mock_cp_resource_model = Mock()
mock_cp_resource_model.qs_mgmt_os_net_uuid = '1234'
mock_cancellation_context = Mock()
mock_client2.servers = Mock()
mocked_inst = Mock()
mock_client2.servers.create = Mock(return_value=mocked_inst)
mock_qnet_dict = {'net-id': mock_cp_resource_model.qs_mgmt_os_net_uuid}
self.instance_service.instance_waiter = Mock()
self.instance_service.instance_waiter.wait = Mock(side_effect=CommandCancellationException)
with self.assertRaises(CommandCancellationException):
result = self.instance_service.create_instance(openstack_session=self.openstack_session,
name=test_name,
reservation=Mock(),
cp_resource_model=mock_cp_resource_model,
deploy_req_model=Mock(),
cancellation_context=mock_cancellation_context,
logger=self.mock_logger)
mock_client2.servers.delete.assert_called_once_with(mocked_inst)
def test_instance_create_success_affinity_group(self):
test_name = 'test'
CloudshellDriverHelper.get_uuid = Mock(return_value='1234')
test_uniq_name = 'test-1234'
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
# mock_client.Client = Mock(return_vaule=mock_client2)
mock_image = Mock()
mock_flavor = Mock()
mock_client2.images.find = Mock(return_value=mock_image)
mock_client2.flavors.find = Mock(return_value=mock_flavor)
mock_deploy_req_model = Mock()
mock_deploy_req_model.affinity_group_id = 'test_affinity_group_id'
mock_deploy_req_model.availability_zone = ''
mock_deploy_req_model.auto_udev = False
mock_cp_resource_model = Mock()
mock_cp_resource_model.qs_mgmt_os_net_uuid = '1234'
mock_cancellation_context = Mock()
mock_client2.servers = Mock()
mocked_inst = Mock()
mock_client2.servers.create = Mock(return_value=mocked_inst)
mock_qnet_dict = {'net-id': mock_cp_resource_model.qs_mgmt_os_net_uuid}
result = self.instance_service.create_instance(openstack_session=self.openstack_session,
name=test_name,
reservation=Mock(),
cp_resource_model=mock_cp_resource_model,
deploy_req_model=mock_deploy_req_model,
cancellation_context=mock_cancellation_context,
logger=self.mock_logger)
mock_client2.servers.create.assert_called_with(name=test_uniq_name,
image=mock_image,
flavor=mock_flavor,
nics=[mock_qnet_dict],
scheduler_hints={'group': 'test_affinity_group_id'})
self.assertEquals(result, mocked_inst)
self.instance_service.instance_waiter.wait.assert_called_with(mocked_inst,
state=self.instance_service.instance_waiter.ACTIVE,
cancellation_context=mock_cancellation_context,
logger=self.mock_logger)
def test_instance_terminate_openstack_session_none(self):
with self.assertRaises(ValueError) as context:
self.instance_service.terminate_instance(openstack_session=None,
instance_id='1234',
logger=self.mock_logger)
self.assertTrue(context)
def test_instance_terminate_success(self):
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
mock_instance = Mock()
test_instance_id = '1234-56'
test_floating_ip = '1.2.3.4'
self.instance_service.get_instance_from_instance_id = Mock(return_value=mock_instance)
self.instance_service.detach_and_delete_floating_ip = Mock()
self.instance_service.terminate_instance(openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger)
mock_client2.servers.delete.assert_called_with(mock_instance)
def test_instance_power_off_success(self):
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
mock_instance = Mock()
test_instance_id = '1234-56'
self.instance_service.get_instance_from_instance_id = Mock(return_value=mock_instance)
self.instance_service.instance_power_off(openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger)
self.instance_service.get_instance_from_instance_id.assert_called_with(openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger,
client=mock_client2)
self.instance_service.instance_waiter.wait.assert_called_with(instance=mock_instance,
state=self.instance_service.instance_waiter.SHUTOFF,
cancellation_context=None,
logger=self.mock_logger)
self.assertEqual(True, mock_instance.stop.called)
def test_instance_power_on_success(self):
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
mock_instance = Mock()
test_instance_id = '1234-56'
self.instance_service.get_instance_from_instance_id = Mock(return_value=mock_instance)
self.instance_service.instance_power_on(openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger)
self.instance_service.get_instance_from_instance_id.assert_called_with(openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger,
client=mock_client2)
self.instance_service.instance_waiter.wait.assert_called_with(instance=mock_instance,
state=self.instance_service.instance_waiter.ACTIVE,
cancellation_context=None,
logger=self.mock_logger)
self.assertEqual(True, mock_instance.start.called)
def test_instance_power_on_no_instance(self):
"""
:return:
"""
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
test_instance_id = 'test-id'
self.instance_service.get_instance_from_instance_id = Mock(return_value=None)
with self.assertRaises(ValueError) as context:
self.instance_service.instance_power_on(openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger)
self.instance_service.get_instance_from_instance_id.assert_called_with(
openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger,
client=mock_client2)
self.assertTrue(context)
def test_instance_power_off_no_instance(self):
"""
:return:
"""
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
test_instance_id = 'test-id'
self.instance_service.get_instance_from_instance_id = Mock(return_value=None)
with self.assertRaises(ValueError) as context:
self.instance_service.instance_power_off(openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger)
self.instance_service.get_instance_from_instance_id.assert_called_with(
openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger,
client=mock_client2)
self.assertTrue(context)
def test_get_instance_from_instance_id(self):
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
mock_instance = Mock()
mock_client2.servers.find = Mock(return_value=mock_instance)
test_instance_id = '1234'
result = self.instance_service.get_instance_from_instance_id(openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger,
client=mock_client2)
self.assertEqual(result, mock_instance)
def test_get_instance_from_instance_id_not_found_on_nova(self):
"""Check that function will return None if instance with given id will not be found on the Nova server"""
mock_client = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client)
mock_client.servers.find = Mock(side_effect=test_nova_instance_service.novaclient.exceptions.NotFound(""))
test_instance_id = '1234'
result = self.instance_service.get_instance_from_instance_id(openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger,
client=mock_client)
self.assertEqual(result, None)
def test_get_instance_from_instance_id_reraise_exception(self):
"""Check that function will re-raise exception if such occurs during retrieving instance from Nova server"""
mock_client = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client)
mock_client.servers.find = Mock(side_effect=Exception())
test_instance_id = '1234'
with self.assertRaises(Exception):
self.instance_service.get_instance_from_instance_id(openstack_session=self.openstack_session,
instance_id=test_instance_id,
logger=self.mock_logger,
client=mock_client)
def test_attach_nic_to_net_success(self):
"""
:return:
"""
import jsonpickle
mock_client = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client)
mock_instance = Mock()
mock_iface_attach_result = Mock()
mock_instance.interface_attach = Mock(return_value=mock_iface_attach_result)
expected_test_mac = 'test_mac_address'
expected_port_id = 'test_port_id'
expected_ip_address = 'test_ip_address'
mock_result_dict = {'mac_addr': expected_test_mac,
'port_id': expected_port_id,
'fixed_ips': [{'ip_address': expected_ip_address}]}
mock_iface_attach_result.to_dict = Mock(return_value=mock_result_dict)
self.instance_service.get_instance_from_instance_id = Mock(return_value=mock_instance)
result = self.instance_service.attach_nic_to_net(openstack_session=self.openstack_session,
net_id='test_net_id',
instance_id='test_instance_id',
logger=self.mock_logger)
expected_result_dict = {'ip_address': expected_ip_address,
'port_id': expected_port_id,
'mac_address': expected_test_mac}
self.assertEqual(jsonpickle.loads(result), expected_result_dict)
def test_attach_nic_to_net_failure_no_instance(self):
mock_client = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client)
self.instance_service.get_instance_from_instance_id = Mock(return_value=None)
result = self.instance_service.attach_nic_to_net(openstack_session=self.openstack_session,
net_id='test_net_id',
instance_id='test_instance_id',
logger=self.mock_logger)
self.assertEqual(result, None)
def test_attach_nic_to_net_failure_exception(self):
mock_client = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client)
mock_instance = Mock()
mock_instance.interface_attach = Mock(side_effect=Exception)
with self.assertRaises(Exception) as context:
result = self.instance_service.attach_nic_to_net(openstack_session=self.openstack_session,
net_id='test_net_id',
instance_id='test_instance_id',
logger=self.mock_logger)
self.assertTrue(context)
def test_detach_nic_from_net_success(self):
mock_client = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client)
mock_instance = Mock()
self.instance_service.get_instance_from_instance_id = Mock(return_value=mock_instance)
mock_iface_detach_result = Mock()
mock_instance.interface_detach = Mock(return_value=mock_iface_detach_result)
result = self.instance_service.detach_nic_from_instance(openstack_session=self.openstack_session,
instance_id='test_instance_id',
port_id='test_port_id',
logger=self.mock_logger)
mock_instance.interface_detach.assert_called_with('test_port_id')
self.assertEqual(result, True)
def test_detach_nic_from_net_failure(self):
mock_client = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client)
mock_instance = Mock()
self.instance_service.get_instance_from_instance_id = Mock(return_value=mock_instance)
mock_instance.interface_detach = Mock(side_effect=Exception)
result = self.instance_service.detach_nic_from_instance(openstack_session=self.openstack_session,
instance_id='test_instance_id',
port_id='test_port_id',
logger=self.mock_logger)
self.assertEqual(result, False)
def test_attach_floating_ip(self):
mock_client = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client)
test_external_nw_id = 'ext-net-id'
test_floating_ip = '4.3.2.1'
test_net_label = 'test-net'
mock_net_obj = Mock()
mock_net_obj.to_dict = Mock(return_value={'id': test_external_nw_id, 'label': test_net_label})
mock_client.networks.list = Mock(return_value=[mock_net_obj])
mock_floating_ip_obj = Mock()
mock_floating_ip_obj.ip = test_floating_ip
mock_client.floating_ips.create = Mock(return_value=mock_floating_ip_obj)
mock_instance = Mock()
mock_instance.add_floating_ip = Mock()
result = self.instance_service.attach_floating_ip(openstack_session=self.openstack_session,
instance=mock_instance,
floating_ip=test_floating_ip,
logger=self.mock_logger)
mock_instance.add_floating_ip.assert_called_with(test_floating_ip)
self.assertEqual(result, True)
def test_detach_floating_ip(self):
mock_client = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client)
mock_floating_ip = '1.2.3.4'
mock_instance = Mock()
self.instance_service.get_instance_from_instance_id = Mock(return_value=mock_instance)
mock_instance.remove_floating_ip = Mock()
self.instance_service.detach_floating_ip(openstack_session=self.openstack_session,
instance=mock_instance,
floating_ip=mock_floating_ip,
logger=self.mock_logger)
mock_instance.remove_floating_ip.assert_called_with(mock_floating_ip)
def test_get_instance_mgmt_net_name_success(self):
mock_client = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client)
test_net_id = 'test_net_id'
test_cp_resource_model = Mock()
test_cp_resource_model.qs_mgmt_os_net_uuid = test_net_id
mock_net_obj = Mock()
mock_net_obj.to_dict = Mock(return_value={'id': test_net_id, 'label': 'test_returned_net'})
mock_client.networks = Mock()
mock_client.networks.list = Mock(return_value=[mock_net_obj])
result = self.instance_service.get_instance_mgmt_network_name(instance=Mock(),
openstack_session=self.openstack_session,
cp_resource_model=test_cp_resource_model)
self.assertEqual(result, 'test_returned_net')
def test_get_instance_mgmt_net_name_fail(self):
mock_client = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client)
test_net_id = 'test_net_id'
test_cp_resource_model = Mock()
test_cp_resource_model.qs_mgmt_os_net_uuid = test_net_id
test_net_id_1 = 'test_net_id_1'
mock_net_obj = Mock()
mock_net_obj.to_dict = Mock(return_value={'id': test_net_id_1, 'label': 'test_returned_net'})
mock_client.networks = Mock()
mock_client.networks.list = Mock(return_value=[mock_net_obj])
result = self.instance_service.get_instance_mgmt_network_name(instance=Mock(),
openstack_session=self.openstack_session,
cp_resource_model=test_cp_resource_model)
self.assertEqual(result, None)
def test_instance_create_error_state(self):
test_name = 'test'
CloudshellDriverHelper.get_uuid = Mock(return_value='1234')
test_uniq_name = 'test-1234'
mock_client2 = Mock()
test_nova_instance_service.novaclient.Client = Mock(return_value=mock_client2)
# mock_client.Client = Mock(return_vaule=mock_client2)
mock_image = Mock()
mock_flavor = Mock()
mock_client2.images.find = Mock(return_value=mock_image)
mock_client2.flavors.find = Mock(return_value=mock_flavor)
mock_cp_resource_model = Mock()
mock_cp_resource_model.qs_mgmt_os_net_uuid = '1234'
mock_cancellation_context = Mock()
mock_client2.servers = Mock()
mocked_inst = Mock()
mock_client2.servers.create = Mock(return_value=mocked_inst)
mock_qnet_dict = {'net-id': mock_cp_resource_model.qs_mgmt_os_net_uuid}
self.instance_service.instance_waiter = Mock()
self.instance_service.instance_waiter.wait = Mock(side_effect=InstanceErrorStateException)
with self.assertRaises(InstanceErrorStateException):
result = self.instance_service.create_instance(openstack_session=self.openstack_session,
name=test_name,
reservation=Mock(),
cp_resource_model=mock_cp_resource_model,
deploy_req_model=Mock(),
cancellation_context=mock_cancellation_context,
logger=self.mock_logger)
mock_client2.servers.delete.assert_called_once_with(mocked_inst)
|
QualiSystems/OpenStack-Shell
|
package/tests/test_cp/test_openstack/test_domain/test_services/test_nova/test_nova_instance_service.py
|
Python
|
isc
| 27,295 | 0.005166 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2018: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, h.goebel@goebel-consult.de
# Grégory Starck, g.starck@gmail.macros_command
# Sebastien Coavoux, s.coavoux@free.fr
# Jean Gabes, naparuba@gmail.macros_command
# Zoran Zaric, zz@zoranzaric.de
# Gerhard Lausser, gerhard.lausser@consol.de
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
import pytest
from .alignak_test import *
from alignak.macroresolver import MacroResolver
from alignak.commandcall import CommandCall
class MacroResolverTester(object):
def get_hst_svc(self):
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
hst = self._scheduler.hosts.find_by_name("test_host_0")
return (svc, hst)
def test_resolv_simple(self):
"""Test a simple macro resolution
:return:
"""
# These are macros built from a variable declare in alignak.ini file
# ; Some macros for the tests
# $alignak_test_macro=test macro
# _alignak_test_macro2=test macro 2
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK_TEST_MACRO$", [], None, None, None)
assert result == "test macro"
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK_TEST_MACRO2$", [], None, None, None)
assert result == "test macro 2"
# These are macros read from a pack. section of the alignak.ini configuration
result = self.mr.resolve_simple_macros_in_string("$SMTP_SERVER$", [], None, None, None)
assert result == "your_smtp_server_address"
result = self.mr.resolve_simple_macros_in_string("$MAIL_FROM$", [], None, None, None)
assert result == "alignak@monitoring"
# This is a macro built from a variable that is a string
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK$", [], None, None, None)
assert result == "My Alignak"
# This is a macro built from a variable that is a list of strings
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK_CONFIG$", [], None, None, None)
assert isinstance(result, string_types)
expected = "[%s]" % ','.join(self.alignak_env.cfg_files)
assert result == expected
# This is a macro built from a dynamic variable
result = self.mr.resolve_simple_macros_in_string("$MAINCONFIGFILE$", [], None, None, None)
assert result == os.path.abspath(os.path.join(self._test_dir, self.setup_file))
result = self.mr.resolve_simple_macros_in_string("$MAINCONFIGDIR$", [], None, None, None)
assert result == os.path.abspath(os.path.join(self._test_dir, './cfg'))
# This is an empty macro -> ''
result = self.mr.resolve_simple_macros_in_string("$COMMENTDATAFILE$", [], None, None, None)
assert result == ""
# This is a macro built from an Alignak variable - because the variable is prefixed with _
# The macro name is built from the uppercased variable name without the leading
# and trailing underscores: _dist -> $DIST$
result = self.mr.resolve_simple_macros_in_string("$DIST$", [], None, None, None)
assert result == "/tmp"
# Alignak variable interpolated from %(var) is available as a macro
result = self.mr.resolve_simple_macros_in_string("$DIST_ETC$", [], None, None, None)
assert result == "/tmp/etc/alignak"
# # Alignak "standard" variable is not available as a macro
# # Empty value ! todo: Perharps should be changed ?
# Sometimes the user is defined to alignak for test purpose and it remans set to this value!
# result = self.mr.resolve_simple_macros_in_string("$USER$", [], None, None, None)
# assert result == ""
def test_resolv_simple_command(self):
"""Test a simple command resolution
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
macros_command = self.mr.resolve_command(svc.check_command, data,
self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == "plugins/test_servicecheck.pl --type=ok --failchance=5% " \
"--previous-state=OK --state-duration=0 " \
"--total-critical-on-host=0 --total-warning-on-host=0 " \
"--hostname test_host_0 --servicedesc test_ok_0"
# @pytest.mark.skip(reason="A macro remains valued where all should be reset to default!")
def test_args_macro(self):
"""
Test ARGn macros
:return:
"""
print("Initial test macros: %d - %s" % (len(self._scheduler.pushed_conf.__class__.macros),
self._scheduler.pushed_conf.__class__.macros))
print(" - : %s" % (self._scheduler.pushed_conf.__class__.properties['$USER1$']))
print(" - : %s" % (self._scheduler.pushed_conf.properties['$USER1$']))
print(" - : %s" % (getattr(self._scheduler.pushed_conf, '$USER1$', None)))
for key in self._scheduler.pushed_conf.__class__.macros:
key = self._scheduler.pushed_conf.__class__.macros[key]
if key:
value = getattr(self._scheduler.pushed_conf.properties, key, '')
print(" - %s : %s" % (key, self._scheduler.pushed_conf.properties[key]))
if value:
print("- %s = %s" % (key, value))
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
# command_with_args is defined with 5 arguments as:
# $PLUGINSDIR$/command -H $HOSTADDRESS$ -t 9 -u -c $ARG1$
# -a $ARG2$ $ARG3$ $ARG4$ and the last is $ARG5$.
# No arguments are provided - will be valued as empty strings
dummy_call = "command_with_args"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# todo: Test problem is here!
# Whereas we should get:
assert macros_command == 'plugins/command -H 127.0.0.1 -t 9 -u -c -a and the last is .'
# We get:
# assert macros_command == '/var/lib/shinken/libexec/command -H 127.0.0.1 -t 9 -u -c -a and the last is .'
# Outside the test env, everything is ok ! Because some tests executed before the macro
# do not have the correct value!
# Extra arguments are provided - will be ignored
dummy_call = "command_with_args!arg_1!arg_2!arg_3!arg_4!arg_5!extra argument"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/command -H 127.0.0.1 -t 9 -u -c arg_1 ' \
'-a arg_2 arg_3 arg_4 and the last is arg_5.'
# All arguments are provided
dummy_call = "command_with_args!arg_1!arg_2!arg_3!arg_4!arg_5"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/command -H 127.0.0.1 -t 9 -u -c arg_1 ' \
'-a arg_2 arg_3 arg_4 and the last is arg_5.'
def test_datetime_macros(self):
""" Test date / time macros: SHORTDATETIME, LONGDATETIME, DATE, TIME, ...
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
# Long and short datetime
dummy_call = "special_macro!$LONGDATETIME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
dummy_call = "special_macro!$SHORTDATETIME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
dummy_call = "special_macro!$DATE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
dummy_call = "special_macro!$TIME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
dummy_call = "special_macro!$TIMET$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Do not check that the output of these macro is correct
# because there is no specific macro code for those functions ;)
# Process and event start time
dummy_call = "special_macro!$PROCESSSTARTTIME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing n/a' == macros_command
dummy_call = "special_macro!$EVENTSTARTTIME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing n/a' == macros_command
def test_summary_macros(self):
""" Test summary macros: TOTALHOSTSUP, TOTALHOSTDOWN, ...
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
# Number of hosts UP / DOWN / UNREACHABLE
dummy_call = "special_macro!$TOTALHOSTSUP$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# All 3 hosts are UP
assert 'plugins/nothing 3' == macros_command
dummy_call = "special_macro!$TOTALHOSTPROBLEMS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
dummy_call = "special_macro!$TOTALHOSTPROBLEMSUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my host is DOWN and not yet handled
hst.state = 'DOWN'
hst.is_problem = True
hst.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALHOSTSDOWN$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALHOSTSDOWNUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my host is DOWN but handled
hst.problem_has_been_acknowledged = True
dummy_call = "special_macro!$TOTALHOSTSDOWNUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my host is UNREACHABLE and not yet handled
hst.state = 'UNREACHABLE'
hst.is_problem = True
hst.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALHOSTSUNREACHABLE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALHOSTSUNREACHABLEUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my host is UNREACHABLE but handled
hst.problem_has_been_acknowledged = True
dummy_call = "special_macro!$TOTALHOSTSUNREACHABLEUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my host is DOWN and not yet handled
hst.state = 'DOWN'
hst.is_problem = True
hst.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALHOSTPROBLEMS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALHOSTPROBLEMSUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my host is UP and no more a problem
hst.state = 'UP'
hst.is_problem = False
hst.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALHOSTPROBLEMS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
dummy_call = "special_macro!$TOTALHOSTPROBLEMSUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Number of services OK / WARNING / CRITICAL / UNKNOWN
dummy_call = "special_macro!$TOTALSERVICESOK$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 2' == macros_command
# Now my service is WARNING and not handled
svc.state = 'WARNING'
svc.is_problem = True
svc.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALSERVICESWARNING$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALSERVICESWARNINGUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my service problem is handled
svc.problem_has_been_acknowledged = True
dummy_call = "special_macro!$TOTALSERVICESWARNINGUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my service is CRITICAL and not handled
svc.state = 'CRITICAL'
svc.is_problem = True
svc.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALSERVICESCRITICAL$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALSERVICESCRITICALUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my service problem is handled
svc.problem_has_been_acknowledged = True
dummy_call = "special_macro!$TOTALSERVICESCRITICALUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my service is UNKNOWN and not handled
svc.state = 'UNKNOWN'
svc.is_problem = True
svc.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALSERVICESUNKNOWN$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALSERVICESUNKNOWNUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my service problem is handled
svc.problem_has_been_acknowledged = True
dummy_call = "special_macro!$TOTALSERVICESUNKNOWNUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my service is WARNING and not handled
svc.state = 'WARNING'
svc.is_problem = True
svc.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALSERVICEPROBLEMS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALSERVICEPROBLEMSUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my service is OK and no more a problem
svc.state = 'OK'
svc.is_problem = False
svc.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALSERVICEPROBLEMS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
dummy_call = "special_macro!$TOTALSERVICEPROBLEMSUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
def test_special_macros_realm(self):
"""
Call the resolver with a special macro HOSTREALM
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
dummy_call = "special_macro!$HOSTREALM$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Macro raised the default realm (All)
assert 'plugins/nothing All' == macros_command
def test_escape_macro(self):
"""
Call the resolver with an empty macro ($$)
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
dummy_call = "special_macro!$$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Not a macro but $$ is transformed as $
assert 'plugins/nothing $' == macros_command
def test_unicode_macro(self):
"""
Call the resolver with a unicode content
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
hst.output = u"На берегу пустынных волн"
dummy_call = "special_macro!$HOSTOUTPUT$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Output is correctly restitued
assert u'plugins/nothing На берегу пустынных волн' == macros_command
hst.state = 'UP'
hst.output = 'Père Noël'
dummy_call = "special_macro!$HOSTOUTPUT$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Output is correctly restitued
assert u'plugins/nothing Père Noël' == macros_command
hst.state = 'UP'
hst.output = 'Père Noël'
dummy_call = "special_macro!$HOSTOUTPUT$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Output is correctly restitued
assert u'plugins/nothing Père Noël' == macros_command
def test_illegal_macro_output_chars(self):
""" Check output macros are cleaned from illegal macro characters
$HOSTOUTPUT$, $HOSTPERFDATA$, $HOSTACKAUTHOR$, $HOSTACKCOMMENT$,
$SERVICEOUTPUT$, $SERVICEPERFDATA$, $SERVICEACKAUTHOR$, $SERVICEACKCOMMENT$
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
illegal_macro_output_chars = \
self._scheduler.pushed_conf.illegal_macro_output_chars
print("Illegal macros caracters:", illegal_macro_output_chars)
hst.output = 'fake output'
dummy_call = "special_macro!$HOSTOUTPUT$"
for c in illegal_macro_output_chars:
hst.output = 'fake output' + c
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
print(macros_command)
assert 'plugins/nothing fake output' == macros_command
def test_env_macros(self):
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
data.append(self._arbiter.conf)
# Macros are existing in the environment with a prefix which defaults to ALIGNAK_
# but this prefix may be overriden in the configuration
# assert self.mr.env_prefix == 'ALIGNAK_'
env = self.mr.get_env_macros(data)
assert env != {}
assert 'test_host_0' == env['%sHOSTNAME' % self.mr.env_prefix]
assert 0.0 == env['%sSERVICEPERCENTCHANGE' % self.mr.env_prefix]
assert 'custvalue' == env['%s_SERVICECUSTNAME' % self.mr.env_prefix]
assert 'gnulinux' == env['%s_HOSTOSTYPE' % self.mr.env_prefix]
assert '%sUSER1' % self.mr.env_prefix not in env
def test_resource_file(self):
"""
Test macros defined in configuration files
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
# $USER1$ macro is defined as 'plugins' in the configuration file
dummy_call = "special_macro!$USER1$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing plugins' == macros_command
# $PLUGINSDIR$ macro is defined as $USER1$ in the configuration file
dummy_call = "special_macro!$PLUGINSDIR$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing plugins' == macros_command
# $INTERESTINGVARIABLE$ macro is defined as 'interesting_value' in the configuration file
dummy_call = "special_macro!$INTERESTINGVARIABLE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing interesting_value' == macros_command
# Look for multiple = in lines, should split the first
# and keep others in the macro value
dummy_call = "special_macro!$ANOTHERVALUE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing first=second' == macros_command
def test_ondemand_macros(self):
"""Test on-demand macros
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
svc.state = 'UNKNOWN'
# Get another service
svc2 = self._scheduler.pushed_conf.services.find_srv_by_name_and_hostname(
"test_host_0", "test_another_service"
)
svc2.output = 'you should not pass'
# Request a not existing macro
dummy_call = "special_macro!$HOSTXXX:test_host_0$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing' == macros_command
# Request a specific host state
dummy_call = "special_macro!$HOSTSTATE:test_host_0$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing UP' == macros_command
# Call with a void host name, means : myhost
data = [hst]
dummy_call = "special_macro!$HOSTSTATE:$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing UP' == macros_command
# Now with a service, for our implicit host state
data = [hst, svc]
dummy_call = "special_macro!$HOSTSTATE:test_host_0$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing UP' == macros_command
# Now with a service, for our implicit host state (missing host ...)
data = [hst, svc]
dummy_call = "special_macro!$HOSTSTATE:$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing UP' == macros_command
# Now call this data from our previous service - get service state
data = [hst, svc2]
dummy_call = "special_macro!$SERVICESTATE:test_host_0:test_another_service$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing OK' == macros_command
# Now call this data from our previous service - get service output
data = [hst, svc2]
dummy_call = "special_macro!$SERVICEOUTPUT:test_host_0:test_another_service$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing you should not pass' == macros_command
# Ok now with a host implicit way
svc2.output = 'you should not pass'
data = [hst, svc2]
dummy_call = "special_macro!$SERVICEOUTPUT::test_another_service$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing you should not pass' == macros_command
def test_host_macros(self):
"""Test host macros
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
# First group name
dummy_call = "special_macro!$HOSTGROUPNAME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/nothing allhosts'
# All group names
dummy_call = "special_macro!$HOSTGROUPNAMES$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/nothing allhosts,hostgroup_01,up'
# First group alias
dummy_call = "special_macro!$HOSTGROUPALIAS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/nothing All Hosts'
# All group aliases
dummy_call = "special_macro!$HOSTGROUPALIASES$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/nothing All Hosts,All Up Hosts,hostgroup_alias_01'
def test_host_count_services_macros(self):
"""Test services count for an hostmacros
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
# Get another service
svc2 = self._scheduler.pushed_conf.services.find_srv_by_name_and_hostname(
"test_host_0", "test_another_service"
)
svc2.output = 'you should not pass'
# Total
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICES$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 2' == macros_command
# Services states
svc.state_id = 0
svc.state = 'OK'
svc2.state_id = 1
svc2.state = 'WARNING'
# Ok
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESOK$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Warning
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESWARNING$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Critical
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESCRITICAL$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Unknown
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESUNKNOWN$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Unreachable
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESUNREACHABLE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Change states
svc.state_id = 2
svc.state = 'CRITICAL'
svc2.state_id = 3
svc2.state = 'UNKNOWN'
# Ok
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESOK$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Warning
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESWARNING$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Critical
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESCRITICAL$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Unknown
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESUNKNOWN$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Unreachable
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESUNREACHABLE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
def test_contact_custom_macros(self):
"""
Test on-demand macros with custom variables for contacts
:return:
"""
contact = self._scheduler.contacts.find_by_name("test_macro_contact")
data = [contact]
# Parse custom macro to get contact custom variables based upon a fixed value
# contact has a custom variable defined as _custom1 = value
dummy_call = "special_macro!$_CONTACTCUSTOM1$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing value' == macros_command
# Parse custom macro to get service custom variables based upon another macro
# host has a custom variable defined as _custom2 = $CONTACTNAME$
dummy_call = "special_macro!$_CONTACTCUSTOM2$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing test_macro_contact' == macros_command
def test_host_custom_macros(self):
"""
Test on-demand macros with custom variables for hosts
:return:
"""
hst = self._scheduler.hosts.find_by_name("test_macro_host")
# The host has custom variables, thus we may use them in a macro
assert hst.customs is not []
assert '_CUSTOM1' in hst.customs
assert '_CUSTOM2' in hst.customs
# Force declare an integer customs variable
hst.customs['_CUSTOM3'] = 10
print((hst.customs))
data = [hst]
# Parse custom macro to get host custom variables based upon a fixed value
# host has a custom variable defined as _custom1 = value
dummy_call = "special_macro!$_HOSTCUSTOM1$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing value' == macros_command
# Parse custom macro to get host custom variables based upon another macro
# host has a custom variable defined as _custom2 = $HOSTNAME$
dummy_call = "special_macro!$_HOSTCUSTOM2$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing test_macro_host' == macros_command
# Parse custom macro to get host custom variables based upon another macro
# host has a custom variable defined as _custom2 = $HOSTNAME$
dummy_call = "special_macro!$_HOSTCUSTOM3$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
print("Command: %s" % macros_command)
assert 'plugins/nothing 10' == macros_command
def test_service_custom_macros(self):
"""
Test on-demand macros with custom variables for services
:return:
"""
(svc, hst) = self.get_hst_svc()
# Get the second service
svc2 = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"test_host_0", "test_another_service"
)
data = [hst, svc2]
# Parse custom macro to get service custom variables based upon a fixed value
# special_macro is defined as: $USER1$/nothing $ARG1$
dummy_call = "special_macro!$_SERVICECUSTOM1$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing value' == macros_command
# Parse custom macro to get service custom variables based upon another macro
dummy_call = "special_macro!$_SERVICECUSTOM2$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing test_host_0' == macros_command
def test_hostadressX_macros(self):
"""
Host addresses macros
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
# Ok sample host call
dummy_call = "special_macro!$HOSTADDRESS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 127.0.0.1' == macros_command
class TestMacroResolverWithEnv(MacroResolverTester, AlignakTest):
"""Test without enabled environment macros"""
def setUp(self):
super(TestMacroResolverWithEnv, self).setUp()
# Do not provide environment file to use the default one
self.setup_file = 'cfg/cfg_macroresolver.cfg'
self.setup_with_file(self.setup_file, dispatching=True)
assert self.conf_is_correct
# Get an initialized macro resolver object
self.mr = MacroResolver()
self.mr.init(self._scheduler.pushed_conf)
# Default prefix
assert self.mr.env_prefix == 'ALIGNAK_'
class TestMacroResolverWithoutEnv(MacroResolverTester, AlignakTest):
"""Test without enabled environment macros"""
def setUp(self):
super(TestMacroResolverWithoutEnv, self).setUp()
# Do not provide environment file to use the default one
self.setup_file = 'cfg/cfg_macroresolver_environment.cfg'
self.setup_with_file(self.setup_file, dispatching=True)
assert self.conf_is_correct
# Get an initialized macro resolver object
self.mr = MacroResolver()
self.mr.init(self._scheduler.pushed_conf)
assert self.mr.env_prefix == 'NAGIOS_'
|
Alignak-monitoring/alignak
|
tests/test_macros_resolver.py
|
Python
|
agpl-3.0
| 51,044 | 0.005924 |
# Version 4.0
import csv
import sys
count = 10
offset = 0
if len(sys.argv) >= 3:
count = int(sys.argv[1])
offset = int(sys.argv[2]) - 1
start = offset*count
start = 1 if start==0 else start
end = start + count
r = csv.reader(sys.stdin)
rows = []
i = 0
for l in r:
rows.append(l[:1] + l[start:end])
i = i + 1
if(i > 1):
csv.writer(sys.stdout).writerows(rows)
|
nachiketmistry/splunk-app-pstack
|
bin/paginatefields.py
|
Python
|
mit
| 391 | 0.015345 |
# Copyright 2009 Shikhar Bhushan
# Copyright 2014 Leonidas Poulopoulos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import logging
from threading import Thread, Lock, Event
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from ncclient.xml_ import *
from ncclient.capabilities import Capabilities
from ncclient.transport.errors import TransportError, SessionError, SessionCloseError
from ncclient.transport.notify import Notification
logger = logging.getLogger('ncclient.transport.session')
class Session(Thread):
"Base class for use by transport protocol implementations."
def __init__(self, capabilities):
Thread.__init__(self)
self.setDaemon(True)
self._listeners = set()
self._lock = Lock()
self.setName('session')
self._q = Queue()
self._notification_q = Queue()
self._client_capabilities = capabilities
self._server_capabilities = None # yet
self._id = None # session-id
self._timeout = None
self._connected = False # to be set/cleared by subclass implementation
logger.debug('%r created: client_capabilities=%r' %
(self, self._client_capabilities))
self._device_handler = None # Should be set by child class
def _dispatch_message(self, raw):
try:
root = parse_root(raw)
except Exception as e:
device_handled_raw=self._device_handler.handle_raw_dispatch(raw)
if isinstance(device_handled_raw, str):
root = parse_root(device_handled_raw)
elif isinstance(device_handled_raw, Exception):
self._dispatch_error(device_handled_raw)
return
else:
logger.error('error parsing dispatch message: %s' % e)
return
with self._lock:
listeners = list(self._listeners)
for l in listeners:
logger.debug('dispatching message to %r: %s' % (l, raw))
l.callback(root, raw) # no try-except; fail loudly if you must!
def _dispatch_error(self, err):
with self._lock:
listeners = list(self._listeners)
for l in listeners:
logger.debug('dispatching error to %r' % l)
try: # here we can be more considerate with catching exceptions
l.errback(err)
except Exception as e:
logger.warning('error dispatching to %r: %r' % (l, e))
def _post_connect(self):
"Greeting stuff"
init_event = Event()
error = [None] # so that err_cb can bind error[0]. just how it is.
# callbacks
def ok_cb(id, capabilities):
self._id = id
self._server_capabilities = capabilities
init_event.set()
def err_cb(err):
error[0] = err
init_event.set()
self.add_listener(NotificationHandler(self._notification_q))
listener = HelloHandler(ok_cb, err_cb)
self.add_listener(listener)
self.send(HelloHandler.build(self._client_capabilities, self._device_handler))
logger.debug('starting main loop')
self.start()
# we expect server's hello message
if not init_event.wait(self._timeout):
raise SessionCloseError("Session hello timeout")
# received hello message or an error happened
self.remove_listener(listener)
if error[0]:
raise error[0]
#if ':base:1.0' not in self.server_capabilities:
# raise MissingCapabilityError(':base:1.0')
logger.info('initialized: session-id=%s | server_capabilities=%s' %
(self._id, self._server_capabilities))
def add_listener(self, listener):
"""Register a listener that will be notified of incoming messages and
errors.
:type listener: :class:`SessionListener`
"""
logger.debug('installing listener %r' % listener)
if not isinstance(listener, SessionListener):
raise SessionError("Listener must be a SessionListener type")
with self._lock:
self._listeners.add(listener)
def remove_listener(self, listener):
"""Unregister some listener; ignore if the listener was never
registered.
:type listener: :class:`SessionListener`
"""
logger.debug('discarding listener %r' % listener)
with self._lock:
self._listeners.discard(listener)
def get_listener_instance(self, cls):
"""If a listener of the specified type is registered, returns the
instance.
:type cls: :class:`SessionListener`
"""
with self._lock:
for listener in self._listeners:
if isinstance(listener, cls):
return listener
def connect(self, *args, **kwds): # subclass implements
raise NotImplementedError
def run(self): # subclass implements
raise NotImplementedError
def send(self, message):
"""Send the supplied *message* (xml string) to NETCONF server."""
if not self.connected:
raise TransportError('Not connected to NETCONF server')
logger.debug('queueing %s' % message)
self._q.put(message)
def scp(self):
raise NotImplementedError
### Properties
def take_notification(self, block, timeout):
try:
return self._notification_q.get(block, timeout)
except Empty:
return None
@property
def connected(self):
"Connection status of the session."
return self._connected
@property
def client_capabilities(self):
"Client's :class:`Capabilities`"
return self._client_capabilities
@property
def server_capabilities(self):
"Server's :class:`Capabilities`"
return self._server_capabilities
@property
def id(self):
"""A string representing the `session-id`. If the session has not been initialized it will be `None`"""
return self._id
class SessionListener(object):
"""Base class for :class:`Session` listeners, which are notified when a new
NETCONF message is received or an error occurs.
.. note::
Avoid time-intensive tasks in a callback's context.
"""
def callback(self, root, raw):
"""Called when a new XML document is received. The *root* argument allows the callback to determine whether it wants to further process the document.
Here, *root* is a tuple of *(tag, attributes)* where *tag* is the qualified name of the root element and *attributes* is a dictionary of its attributes (also qualified names).
*raw* will contain the XML document as a string.
"""
raise NotImplementedError
def errback(self, ex):
"""Called when an error occurs.
:type ex: :exc:`Exception`
"""
raise NotImplementedError
class HelloHandler(SessionListener):
def __init__(self, init_cb, error_cb):
self._init_cb = init_cb
self._error_cb = error_cb
def callback(self, root, raw):
tag, attrs = root
if (tag == qualify("hello")) or (tag == "hello"):
try:
id, capabilities = HelloHandler.parse(raw)
except Exception as e:
self._error_cb(e)
else:
self._init_cb(id, capabilities)
def errback(self, err):
self._error_cb(err)
@staticmethod
def build(capabilities, device_handler):
"Given a list of capability URI's returns <hello> message XML string"
if device_handler:
# This is used as kwargs dictionary for lxml's Element() function.
# Therefore the arg-name ("nsmap") is used as key here.
xml_namespace_kwargs = { "nsmap" : device_handler.get_xml_base_namespace_dict() }
else:
xml_namespace_kwargs = {}
hello = new_ele("hello", **xml_namespace_kwargs)
caps = sub_ele(hello, "capabilities")
def fun(uri): sub_ele(caps, "capability").text = uri
#python3 changes
if sys.version < '3':
map(fun, capabilities)
else:
list(map(fun, capabilities))
return to_xml(hello)
@staticmethod
def parse(raw):
"Returns tuple of (session-id (str), capabilities (Capabilities)"
sid, capabilities = 0, []
root = to_ele(raw)
for child in root.getchildren():
if child.tag == qualify("session-id") or child.tag == "session-id":
sid = child.text
elif child.tag == qualify("capabilities") or child.tag == "capabilities" :
for cap in child.getchildren():
if cap.tag == qualify("capability") or cap.tag == "capability":
capabilities.append(cap.text)
return sid, Capabilities(capabilities)
class NotificationHandler(SessionListener):
def __init__(self, notification_q):
self._notification_q = notification_q
def callback(self, root, raw):
tag, _ = root
if tag == qualify('notification', NETCONF_NOTIFICATION_NS):
self._notification_q.put(Notification(raw))
def errback(self, _):
pass
|
OpenClovis/ncclient
|
ncclient/transport/session.py
|
Python
|
apache-2.0
| 9,854 | 0.003146 |
from .__about__ import __version__
from .portworx import PortworxCheck
__all__ = ['__version__', 'PortworxCheck']
|
DataDog/integrations-extras
|
portworx/datadog_checks/portworx/__init__.py
|
Python
|
bsd-3-clause
| 115 | 0 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from zeeko._build_helpers import get_utils_extension_args, get_zmq_extension_args, _generate_cython_extensions, pxd, get_package_data
from astropy_helpers import setup_helpers
utilities = [pxd("..utils.rc"),
pxd("..utils.msg"),
pxd("..utils.pthread"),
pxd("..utils.lock"),
pxd("..utils.condition"),
pxd("..utils.clock")]
base = [ pxd("..cyloop.throttle"), pxd("..cyloop.statemachine"), pxd(".snail"), pxd(".base")]
dependencies = {
'base' : utilities + [ pxd("..cyloop.throttle") ],
'snail' : utilities + [ pxd("..cyloop.throttle"), pxd("..cyloop.statemachine") ],
'client' : utilities + base + [ pxd("..messages.receiver") ],
'server' : utilities + base + [ pxd("..messages.publisher") ],
}
def get_extensions(**kwargs):
"""Get the Cython extensions"""
extension_args = setup_helpers.DistutilsExtensionArgs()
extension_args.update(get_utils_extension_args())
extension_args.update(get_zmq_extension_args())
extension_args['include_dirs'].append('numpy')
package_name = __name__.split(".")[:-1]
extensions = [e for e in _generate_cython_extensions(extension_args, os.path.dirname(__file__), package_name)]
for extension in extensions:
name = extension.name.split(".")[-1]
if name in dependencies:
extension.depends.extend(dependencies[name])
return extensions
|
alexrudy/Zeeko
|
zeeko/handlers/setup_package.py
|
Python
|
bsd-3-clause
| 1,501 | 0.016656 |
#!/usr/bin/env python
# Encoding: utf-8
# -----------------------------------------------------------------------------
# Project : Broken Promises
# -----------------------------------------------------------------------------
# Author : Edouard Richard <edou4rd@gmail.com>
# -----------------------------------------------------------------------------
# License : GNU General Public License
# -----------------------------------------------------------------------------
# Creation : 28-Oct-2013
# Last mod : 27-Nov-2013
# -----------------------------------------------------------------------------
# This file is part of Broken Promises.
#
# Broken Promises is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Broken Promises is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Broken Promises. If not, see <http://www.gnu.org/licenses/>.
from brokenpromises.operations import CollectArticles
from bson.json_util import dumps
import optparse
import brokenpromises.channels
import sys
import reporter
reporter.REPORTER.register(reporter.StderrReporter())
debug, trace, info, warning, error, fatal = reporter.bind("script_collect_articles")
oparser = optparse.OptionParser(usage ="\n./%prog [options] year \n./%prog [options] year month\n./%prog [options] year month day")
# oparser.add_option("-C", "--nocache", action="store_true", dest="nocache",
# help = "Prevents from using the cache", default=False)
oparser.add_option("-f", "--channelslistfile", action="store", dest="channels_file",
help = "Use this that as channels list to use", default=None)
oparser.add_option("-c", "--channels", action="store", dest="channels_list",
help = "channels list comma separated", default=None)
oparser.add_option("-s", "--storage", action="store_true", dest="storage",
help = "Save the result with the default storage", default=False)
oparser.add_option("-d", "--drop", action="store_true", dest="mongodb_drop",
help = "drop the previous articles from database before", default=False)
oparser.add_option("--force", action="store_true", dest="force_collect",
help = "Force the scrap. If --storage is enable, the scrap could be escape b/c of a previous similar scrap", default=False)
oparser.add_option("-o", "--output", action="store", dest="output_file",
help = "Specify a file to write the export to. If you do not specify a file name, the program writes data to standard output (e.g. stdout)", default=None)
# Think to update the README.md file after modifying the options
options, args = oparser.parse_args()
assert len(args) > 0 and len(args) <= 3
if options.output_file:
sys.stdout = open(options.output_file, 'a')
channels = brokenpromises.channels.get_available_channels()
if options.channels_file:
with open(options.channels_file) as f:
channels = [line.replace("\n", "") for line in f.readlines()]
if options.channels_list:
channels = options.channels_list.split(",")
collector = CollectArticles(channels, *args, use_storage=options.storage, force_collect=options.force_collect)
if options.mongodb_drop:
collector.storage.get_database().drop_collection("articles")
collector.storage.get_database().drop_collection("reports")
results = collector.run()
# OUTPUT
print dumps([_.__dict__ for _ in results]).encode('utf-8')
info("%d articles collected." % (len(results)))
exit()
# EOF
|
jplusplus/broken-promises
|
Scripts/collect_articles.py
|
Python
|
gpl-3.0
| 3,847 | 0.011178 |
#/usr/bin/env python
# -#- coding: utf-8 -#-
#
# contract/core/api.py - functions which simplify contract package feature access
#
# This file is part of OndALear collection of open source components
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Copyright (C) 2008 Amnon Janiv <amnon.janiv@ondalear.com>
#
# Initial version: 2008-02-01
# Author: Amnon Janiv <amnon.janiv@ondalear.com>
"""
.. module:: contract.core.api
:synopsis: Contract core simplified feature access module
Set of functions which simplify access to contract.core features.
.. moduleauthor:: Amnon Janiv <amnon.janiv@ondalear.com>
"""
__revision__ = '$Id: $'
__version__ = '0.0.1'
from contract.core.package import BusContractCorePackageDescriptor
import busxml.core.api
def parse_file(file_name):
"""Parse an xml file containing contract object graph
:param file_name: XML file name.
:type file_name: str.
:returns: BusinessContractWorkspace -- contract object graph container.
"""
package_desc = BusContractCorePackageDescriptor.get_instance()
root_obj = busxml.core.api.parse_file(file_name, package_desc)
return root_obj
def export_to_string(obj):
"""Export contract object graph to string
:param obj: Contract object graph container.
:type obj: BusinessContractWorkspace.
:returns: unicode -- xml string with underlying contract information
"""
package_desc = BusContractCorePackageDescriptor.get_instance()
buf = busxml.core.api.export_to_string(obj, package_desc)
return buf
|
ajaniv/softwarebook
|
cpython/contract/core/api.py
|
Python
|
gpl-2.0
| 1,699 | 0.006474 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.init_weights import init_weights, normalized_columns_initializer
from core.model import Model
class A3CMlpConModel(Model):
def __init__(self, args):
super(A3CMlpConModel, self).__init__(args)
# build model
# 0. feature layers
self.fc1 = nn.Linear(self.input_dims[0] * self.input_dims[1], self.hidden_dim) # NOTE: for pkg="gym"
self.rl1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl3 = nn.ReLU()
self.fc4 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl4 = nn.ReLU()
self.fc1_v = nn.Linear(self.input_dims[0] * self.input_dims[1], self.hidden_dim) # NOTE: for pkg="gym"
self.rl1_v = nn.ReLU()
self.fc2_v = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl2_v = nn.ReLU()
self.fc3_v = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl3_v = nn.ReLU()
self.fc4_v = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl4_v = nn.ReLU()
# lstm
if self.enable_lstm:
self.lstm = nn.LSTMCell(self.hidden_dim, self.hidden_dim)
self.lstm_v = nn.LSTMCell(self.hidden_dim, self.hidden_dim)
# 1. policy output
self.policy_5 = nn.Linear(self.hidden_dim, self.output_dims)
self.policy_sig = nn.Linear(self.hidden_dim, self.output_dims)
self.softplus = nn.Softplus()
# 2. value output
self.value_5 = nn.Linear(self.hidden_dim, 1)
self._reset()
def _init_weights(self):
self.apply(init_weights)
self.fc1.weight.data = normalized_columns_initializer(self.fc1.weight.data, 0.01)
self.fc1.bias.data.fill_(0)
self.fc2.weight.data = normalized_columns_initializer(self.fc2.weight.data, 0.01)
self.fc2.bias.data.fill_(0)
self.fc3.weight.data = normalized_columns_initializer(self.fc3.weight.data, 0.01)
self.fc3.bias.data.fill_(0)
self.fc4.weight.data = normalized_columns_initializer(self.fc4.weight.data, 0.01)
self.fc4.bias.data.fill_(0)
self.fc1_v.weight.data = normalized_columns_initializer(self.fc1_v.weight.data, 0.01)
self.fc1_v.bias.data.fill_(0)
self.fc2_v.weight.data = normalized_columns_initializer(self.fc2_v.weight.data, 0.01)
self.fc2_v.bias.data.fill_(0)
self.fc3_v.weight.data = normalized_columns_initializer(self.fc3_v.weight.data, 0.01)
self.fc3_v.bias.data.fill_(0)
self.fc4_v.weight.data = normalized_columns_initializer(self.fc4_v.weight.data, 0.01)
self.fc4_v.bias.data.fill_(0)
self.policy_5.weight.data = normalized_columns_initializer(self.policy_5.weight.data, 0.01)
self.policy_5.bias.data.fill_(0)
self.value_5.weight.data = normalized_columns_initializer(self.value_5.weight.data, 1.0)
self.value_5.bias.data.fill_(0)
self.lstm.bias_ih.data.fill_(0)
self.lstm.bias_hh.data.fill_(0)
self.lstm_v.bias_ih.data.fill_(0)
self.lstm_v.bias_hh.data.fill_(0)
def forward(self, x, lstm_hidden_vb=None):
p = x.view(x.size(0), self.input_dims[0] * self.input_dims[1])
p = self.rl1(self.fc1(p))
p = self.rl2(self.fc2(p))
p = self.rl3(self.fc3(p))
p = self.rl4(self.fc4(p))
p = p.view(-1, self.hidden_dim)
if self.enable_lstm:
p_, v_ = torch.split(lstm_hidden_vb[0],1)
c_p, c_v = torch.split(lstm_hidden_vb[1],1)
p, c_p = self.lstm(p, (p_, c_p))
p_out = self.policy_5(p)
sig = self.policy_sig(p)
sig = self.softplus(sig)
v = x.view(x.size(0), self.input_dims[0] * self.input_dims[1])
v = self.rl1_v(self.fc1_v(v))
v = self.rl2_v(self.fc2_v(v))
v = self.rl3_v(self.fc3_v(v))
v = self.rl4_v(self.fc4_v(v))
v = v.view(-1, self.hidden_dim)
if self.enable_lstm:
v, c_v = self.lstm_v(v, (v_, c_v))
v_out = self.value_5(v)
if self.enable_lstm:
return p_out, sig, v_out, (torch.cat((p,v),0), torch.cat((c_p, c_v),0))
else:
return p_out, sig, v_out
|
lukashermann/pytorch-rl
|
core/models/a3c_mlp_con.py
|
Python
|
mit
| 4,501 | 0.005776 |
n = int(input())
s = input()
letterlist = ['x', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
letter = {}
for l in letterlist:
letter[l] = 0
for i in range(n):
if s[i:i+1] in letterlist:
letter[s[i:i+1]] += 1
if letter['x'] > 0 and letter['0'] > 0:
letter['0'] -= 1
del letter['x']
del letterlist[0]
res = '0x'
if any(c > 0 for c in letter.values()):
letterlist.reverse()
for l in letterlist:
res += l*letter[l]
print(res)
else:
print('No')
|
dluschan/olymp
|
lomonosov/num16.py
|
Python
|
mit
| 550 | 0.005455 |
from django.apps import AppConfig
class TorrentsConfig(AppConfig):
name = 'torrents'
|
ReanGD/web-home-manage
|
backend/torrents/apps.py
|
Python
|
apache-2.0
| 91 | 0 |
import sys
import json
def make_column_to_candidate_dict(header_row):
my_dict = {}
for colIndex, candidate in enumerate(header_row):
my_dict[colIndex] = candidate.strip()
return my_dict
def return_candidates_in_order(row, col_to_candidate_dict):
ballot = []
for i in range(0,len(row)):
ballot.append([])
for colIndex, rank in enumerate(row):
candidate = col_to_candidate_dict[colIndex]
int_rank = int(rank)
ballot[int_rank-1].append(candidate)
ballot = filter(lambda x: len(x) > 0, ballot)
return ballot
def split_line(line):
return line.split('\t')
def convert_csv(filename):
return convert_csv_to_php(filename)
def convert_csv_to_json(filename):
ballot_arrays = get_ballot_arrays(filename)
objects = []
for ballot_array in ballot_arrays:
ballot_object = {'count': 1, 'values': ballot_array}
print(json.dumps(objects))
def convert_csv_to_php(filename):
class_text = ''
with open('TestScenarioHeader.php.fragment', 'r') as class_header:
class_text += class_header.read()
ballot_arrays = get_ballot_arrays(filename)
class_text += generate_php(ballot_arrays)
with open('TestScenarioFooter.php.fragment', 'r') as class_footer:
class_text += class_footer.read().rstrip()
print class_text
def generate_php(ballot_arrays):
ballots = []
for ballot in ballot_arrays:
ballots.append(generate_one_ballot_php(ballot))
return ' return [\n' + ',\n'.join(ballots) + '\n ];\n'
def generate_one_ballot_php(ballot):
php = ' new NBallot(\n 1,\n'
candidate_lists = []
for group in ballot:
candidate_list = ' new CandidateList(\n'
candidates = []
for candidate in group:
candidates.append(' new Candidate("' + candidate + '")')
candidate_list += ',\n'.join(candidates)
candidate_list += '\n )'
candidate_lists.append(candidate_list)
php += ',\n'.join(candidate_lists)
php += '\n )'
return php
def get_ballot_arrays(filename):
ballots = []
header = True
ids = False
with open(filename, 'r') as csv:
for line in csv.readlines():
row = split_line(line)
if header:
header = False
ids = True
elif ids:
col_to_candidate_dict = make_column_to_candidate_dict(row)
ids = False
else:
ballot = return_candidates_in_order(row, col_to_candidate_dict)
##print ballot
ballots.append(ballot)
return ballots
if __name__ == '__main__':
convert_csv(sys.argv[1])
|
pivot-libre/tideman
|
tests/parse_ballot.py
|
Python
|
apache-2.0
| 2,798 | 0.006076 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/tatooine/shared_pillar_pristine_small_style_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/static/structure/tatooine/shared_pillar_pristine_small_style_01.py
|
Python
|
mit
| 469 | 0.046908 |
#!/usr/bin/eny python
#coding:utf-8
from gi.repository import Gtk
class ButtonWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title='Button Demo')
self.set_border_width(10)
hbox = Gtk.Box(spacing=6)
self.add(hbox)
button = Gtk.Button('Click Me')
button.connect('clicked', self.on_click_me_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button(stock=Gtk.STOCK_OPEN)
button.connect('clicked', self.on_open_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button('_Close', use_underline=True)
button.connect('clicked', self.on_close_clicked)
hbox.pack_start(button, True, True, 0)
def on_click_me_clicked(self, button):
print '"click me" button was clicked'
def on_open_clicked(self, button):
print '"open" button was clicked'
def on_close_clicked(self, button):
print 'Closing application'
Gtk.main_quit()
wind = ButtonWindow()
wind.connect('delete-event', Gtk.main_quit)
wind.show_all()
Gtk.main()
|
qytz/qytz-notes
|
source/tech/PyGObject-Tutorial/examples/button_example.py
|
Python
|
mit
| 1,105 | 0.00362 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import gtk
import mock
from stoqlib.database.runtime import get_current_branch
from stoqlib.domain.transfer import TransferOrder
from stoqlib.gui.test.uitestutils import GUITest
from stoqlib.gui.wizards.stocktransferwizard import StockTransferWizard
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class TestStockTransferWizard(GUITest):
@mock.patch('stoqlib.gui.wizards.stocktransferwizard.print_report')
@mock.patch('stoqlib.gui.wizards.stocktransferwizard.yesno')
def test_create(self, yesno, print_report):
sellable = self.create_sellable(description=u"Product to transfer")
self.create_storable(sellable.product, get_current_branch(self.store),
stock=10)
wizard = StockTransferWizard(self.store)
self.assertNotSensitive(wizard, ['next_button'])
self.check_wizard(wizard, 'wizard-stock-transfer-create')
step = wizard.get_current_step()
step.destination_branch.set_active(0)
self.assertSensitive(wizard, ['next_button'])
self.click(wizard.next_button)
step = wizard.get_current_step()
# adds sellable to step
step.sellable_selected(sellable)
step._add_sellable()
self.check_wizard(wizard, 'wizard-stock-transfer-products')
module = 'stoqlib.gui.events.StockTransferWizardFinishEvent.emit'
with mock.patch(module) as emit:
with mock.patch.object(self.store, 'commit'):
self.click(wizard.next_button)
self.assertEquals(emit.call_count, 1)
args, kwargs = emit.call_args
self.assertTrue(isinstance(args[0], TransferOrder))
yesno.assert_called_once_with(
_('Would you like to print a receipt for this transfer?'),
gtk.RESPONSE_YES, 'Print receipt', "Don't print")
self.assertEquals(print_report.call_count, 1)
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_stocktransferwizard.py
|
Python
|
gpl-2.0
| 2,810 | 0.004982 |
""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import BlankLine, syms, token
class FixItertoolsImports(fixer_base.BaseFix):
PATTERN = """
import_from< 'from' 'itertools' 'import' imports=any >
""" %(locals())
def transform(self, node, results):
imports = results['imports']
if imports.type == syms.import_as_name or not imports.children:
children = [imports]
else:
children = imports.children
for child in children[::2]:
if child.type == token.NAME:
member = child.value
name_node = child
else:
assert child.type == syms.import_as_name
name_node = child.children[0]
member_name = name_node.value
if member_name in (u'imap', u'izip', u'ifilter'):
child.value = None
child.remove()
elif member_name == u'ifilterfalse':
node.changed()
name_node.value = u'filterfalse'
# Make sure the import statement is still sane
children = imports.children[:] or [imports]
remove_comma = True
for child in children:
if remove_comma and child.type == token.COMMA:
child.remove()
else:
remove_comma ^= True
if children[-1].type == token.COMMA:
children[-1].remove()
# If there are no imports left, just get rid of the entire statement
if not (imports.children or getattr(imports, 'value', None)) or \
imports.parent is None:
p = node.prefix
node = BlankLine()
node.prefix = p
return node
|
2ndy/RaspIM
|
usr/lib/python2.6/lib2to3/fixes/fix_itertools_imports.py
|
Python
|
gpl-2.0
| 1,840 | 0.000543 |
'''
Created on Jun 29, 2016
@author: Thomas Adriaan Hellinger
'''
import pytest
from roodestem.voting_systems.voting_system import Result
class TestResult:
def test_null_result_not_tolerated(self):
with pytest.raises(TypeError):
Result()
def test_passed_multiple_winners(self):
res = Result(winner=['a', 'b', 'c'], tied=['b','c'])
assert res == Result(tied=['a', 'b', 'c'])
def test_passed_all_losers(self):
res = Result(loser=['a', 'b', 'c'])
assert res == Result(tied=['a', 'b', 'c'])
def test_passed_all_winners(self):
res = Result(winner=['a', 'b', 'c'])
assert res == Result(tied=['a', 'b', 'c'])
|
brotherjack/RoodeStem
|
tests/test_voting_systems.py
|
Python
|
mit
| 709 | 0.008463 |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.status import HTTP_204_NO_CONTENT
from users.serializers import UserSerializer
from rest_framework.permissions import AllowAny
from django.contrib.auth import login, logout
from rest_framework.authentication import BaseAuthentication, SessionAuthentication
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.authtoken.serializers import AuthTokenSerializer
from django.contrib.auth import login, logout
from users.authentication import CustomBaseAuthentication
class AuthLoginView(APIView):
authentication_classes = (CustomBaseAuthentication, SessionAuthentication)
def post(self, request):
login(request, request.user)
return Response(status=HTTP_204_NO_CONTENT)
class AuthLogoutView(APIView):
def delete(self, request):
logout(request)
return Response(status=HTTP_204_NO_CONTENT)
class UserRegisterView(APIView):
permission_classes = (AllowAny,)
authentication_classes = () # TODO: Remove
def post(self, request):
serializer = UserSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
|
KDD-OpenSource/fexum
|
users/views.py
|
Python
|
mit
| 1,286 | 0.001555 |
"""
The consumer's code.
It takes HTML from the queue and outputs the URIs found in it.
"""
import asyncio
import json
import logging
from typing import List
from urllib.parse import urljoin
import aioredis
from bs4 import BeautifulSoup
from . import app_cli, redis_queue
_log = logging.getLogger('url_extractor')
def _scrape_urls(html: str, base_url: str) -> List[str]:
"""Gets all valid links from a site and returns them as URIs (some links may be relative.
If the URIs scraped here would go back into the system to have more URIs scraped from their
HTML, we would need to filter out all those who are not HTTP or HTTPS.
Also, assuming that many consumers and many producers would be running at the same time,
connected to one Redis instance, we would need to cache normalized versions or visited URIs
without fragments (https://tools.ietf.org/html/rfc3986#section-3.5) so we don't fall into loops.
For example two sites referencing each other.
The cached entries could have time-to-live (Redis EXPIRE command), so we could refresh our
knowledge about a site eventually.
"""
soup = BeautifulSoup(html, 'html.parser')
href = 'href'
return [urljoin(base_url, link.get(href))
for link in soup.find_all('a') if link.has_attr(href)]
async def _scrape_urls_from_queued_html(redis_pool: aioredis.RedisPool):
_log.info('Processing HTML from queue...')
while True:
try:
html_payload = await redis_queue.pop(redis_pool)
_log.info('Processing HTML from URL %s', html_payload.url)
scraped_urls = _scrape_urls(html_payload.html, html_payload.url)
_log.info('Scraped URIs from URL %s', html_payload.url)
output_json = {html_payload.url: scraped_urls}
# flush for anyone who is watching the stream
print(json.dumps(output_json), flush=True)
except redis_queue.QueueEmptyError:
# wait for work to become available
await asyncio.sleep(1) # pragma: no cover
def main():
"""Run the URL extractor (the consumer).
"""
app_cli.setup_logging()
args_parser = app_cli.get_redis_args_parser(
'Start a worker that will get URL/HTML pairs from a Redis queue and for each of those '
'pairs output (on separate lines) a JSON in format {ORIGINATING_URL: [FOUND_URLS_LIST]}')
args = args_parser.parse_args()
loop = app_cli.get_event_loop()
_log.info('Creating a pool of connections to Redis at %s:%d.',
args.redis_host, args.redis_port)
# the pool won't be closed explicitly, since the process needs to be terminated to stop anyway
redis_pool = loop.run_until_complete(
aioredis.create_pool((args.redis_host, args.redis_port)))
loop.run_until_complete(_scrape_urls_from_queued_html(redis_pool))
if __name__ == '__main__':
main()
|
butla/experiments
|
aiohttp_redis_producer_consumer/txodds_code_test/url_extractor.py
|
Python
|
mit
| 2,896 | 0.003108 |
from math import log
def sort(a_list, base):
"""Sort the input list with the specified base, using Radix sort.
This implementation assumes that the input list does not contain negative
numbers. This algorithm is inspired from the Wikipedia implmentation of
Radix sort.
"""
passes = int(log(max(a_list), base) + 1)
items = a_list[:]
for digit_index in xrange(passes):
buckets = [[] for _ in xrange(base)] # Buckets for sorted sublists.
for item in items:
digit = _get_digit(item, base, digit_index)
buckets[digit].append(item)
items = []
for sublists in buckets:
items.extend(sublists)
return items
def _get_digit(number, base, digit_index):
return (number // base ** digit_index) % base
|
isubuz/zahlen
|
algorithms/sorting/radix_sort.py
|
Python
|
mit
| 805 | 0 |
#!/usr/bin/env python
"""
Framework to start a simulated vehicle and connect it to MAVProxy.
Peter Barker, April 2016
based on sim_vehicle.sh by Andrew Tridgell, October 2011
"""
from __future__ import print_function
import atexit
import getpass
import optparse
import os
import os.path
import re
import signal
import subprocess
import sys
import tempfile
import time
import shlex
# List of open terminal windows for macosx
windowID = []
class CompatError(Exception):
"""A custom exception class to hold state if we encounter the parse error we are looking for"""
def __init__(self, error, opts, rargs):
Exception.__init__(self, error)
self.opts = opts
self.rargs = rargs
class CompatOptionParser(optparse.OptionParser):
"""An option parser which emulates the behaviour of the old sim_vehicle.sh; if passed -C, the first argument not understood starts a list of arguments that are passed straight to mavproxy"""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
def error(self, error):
"""Override default error handler called by optparse.OptionParser.parse_args when a parse error occurs; raise a detailed exception which can be caught"""
if error.find("no such option") != -1:
raise CompatError(error, self.values, self.rargs)
optparse.OptionParser.error(self, error)
def parse_args(self, args=None, values=None):
"""Wrap parse_args so we can catch the exception raised upon discovering the known parameter parsing error"""
try:
opts, args = optparse.OptionParser.parse_args(self)
except CompatError as e:
if not e.opts.sim_vehicle_sh_compatible:
print(e)
print("Perhaps you want --sim_vehicle_sh_compatible (-C)?")
sys.exit(1)
if e.opts.mavproxy_args:
print("--mavproxy-args not permitted in compat mode")
sys.exit(1)
args = []
opts = e.opts
mavproxy_args = [str(e)[16:]] # this trims "no such option" off
mavproxy_args.extend(e.rargs)
opts.ensure_value("mavproxy_args", " ".join(mavproxy_args))
return opts, args
def cygwin_pidof(proc_name):
""" Thanks to kata198 for this:
https://github.com/kata198/cygwin-ps-misc/blob/master/pidof
"""
pipe = subprocess.Popen("ps -ea | grep " + proc_name, shell=True, stdout=subprocess.PIPE)
output_lines = pipe.stdout.read().replace("\r", "").split("\n")
ret = pipe.wait()
pids = []
if ret != 0:
# No results
return []
for line in output_lines:
if not line:
continue
line_split = [item for item in line.split(' ') if item]
cmd = line_split[-1].split('/')[-1]
if cmd == proc_name:
try:
pid = int(line_split[0].strip())
except:
pid = int(line_split[1].strip())
if pid not in pids:
pids.append(pid)
return pids
def under_cygwin():
"""Return if Cygwin binary exist"""
return os.path.exists("/usr/bin/cygstart")
def under_macos():
return sys.platform == 'darwin'
def kill_tasks_cygwin(victims):
"""Shell out to ps -ea to find processes to kill"""
for victim in list(victims):
pids = cygwin_pidof(victim)
# progress("pids for (%s): %s" % (victim,",".join([ str(p) for p in pids])))
for apid in pids:
os.kill(apid, signal.SIGKILL)
def kill_tasks_macos():
for window in windowID:
cmd = "osascript -e \'tell application \"Terminal\" to close (window(get index of window id %s))\'" % window
os.system(cmd)
def kill_tasks_psutil(victims):
"""Use the psutil module to kill tasks by name. Sadly, this module is not available on Windows, but when it is we should be able to *just* use this routine"""
import psutil
for proc in psutil.process_iter():
if proc.status == psutil.STATUS_ZOMBIE:
continue
if proc.name in victims:
proc.kill()
def kill_tasks_pkill(victims):
"""Shell out to pkill(1) to kill processed by name"""
for victim in victims: # pkill takes a single pattern, so iterate
cmd = ["pkill", victim]
run_cmd_blocking("pkill", cmd, quiet=True)
class BobException(Exception):
"""Handle Bob's Exceptions"""
pass
def kill_tasks():
"""Clean up stray processes by name. This is a somewhat shotgun approach"""
progress("Killing tasks")
try:
victim_names = {
'JSBSim',
'lt-JSBSim',
'ArduPlane.elf',
'ArduCopter.elf',
'APMrover2.elf',
'AntennaTracker.elf',
'JSBSIm.exe',
'MAVProxy.exe',
'runsim.py',
'AntennaTracker.elf',
}
for frame in _options_for_frame.keys():
if "waf_target" not in _options_for_frame[frame]:
continue
exe_name = os.path.basename(_options_for_frame[frame]["waf_target"])
victim_names.add(exe_name)
if under_cygwin():
return kill_tasks_cygwin(victim_names)
if under_macos():
return kill_tasks_macos()
try:
kill_tasks_psutil(victim_names)
except ImportError:
kill_tasks_pkill(victim_names)
except Exception as e:
progress("kill_tasks failed: {}".format(str(e)))
def check_jsbsim_version():
"""Assert that the JSBSim we will run is the one we expect to run"""
jsbsim_cmd = ["JSBSim", "--version"]
progress_cmd("Get JSBSim version", jsbsim_cmd)
try:
jsbsim_version = subprocess.Popen(jsbsim_cmd, stdout=subprocess.PIPE).communicate()[0]
except OSError:
jsbsim_version = '' # this value will trigger the ".index"
# check below and produce a reasonable
# error message
try:
jsbsim_version.index(b"ArduPilot")
except ValueError:
print(r"""
=========================================================
You need the latest ArduPilot version of JSBSim installed
and in your \$PATH
Please get it from git://github.com/tridge/jsbsim.git
See
http://ardupilot.org/dev/docs/setting-up-sitl-on-linux.html
for more details
=========================================================
""")
sys.exit(1)
def progress(text):
"""Display sim_vehicle progress text"""
print("SIM_VEHICLE: " + text)
def find_autotest_dir():
"""Return path to autotest directory"""
return os.path.dirname(os.path.realpath(__file__))
def find_root_dir():
"""Return path to root directory"""
return os.path.realpath(os.path.join(find_autotest_dir(), '../..'))
"""
make_target: option passed to make to create binaries. Usually sitl, and "-debug" may be appended if -D is passed to sim_vehicle.py
default_params_filename: filename of default parameters file. Taken to be relative to autotest dir.
extra_mavlink_cmds: extra parameters that will be passed to mavproxy
"""
_options_for_frame = {
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
# COPTER
"+": {
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"quad": {
"model": "+",
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"X": {
"waf_target": "bin/arducopter-quad",
# this param set FRAME doesn't actually work because mavproxy
# won't set a parameter unless it knows of it, and the param fetch happens asynchronously
"default_params_filename": "default_params/copter.parm",
"extra_mavlink_cmds": "param fetch frame; param set FRAME 1;",
},
"hexa": {
"make_target": "sitl-hexa",
"waf_target": "bin/arducopter-hexa",
"default_params_filename": "default_params/copter.parm",
},
"octa-quad": {
"make_target": "sitl-octa-quad",
"waf_target": "bin/arducopter-octa-quad",
"default_params_filename": "default_params/copter.parm",
},
"octa": {
"make_target": "sitl-octa",
"waf_target": "bin/arducopter-octa",
"default_params_filename": "default_params/copter.parm",
},
"tri": {
"make_target": "sitl-tri",
"waf_target": "bin/arducopter-tri",
"default_params_filename": "default_params/copter-tri.parm",
},
"y6": {
"make_target": "sitl-y6",
"waf_target": "bin/arducopter-y6",
"default_params_filename": "default_params/copter-y6.parm",
},
# COPTER TYPES
"IrisRos": {
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"firefly": {
"waf_target": "bin/arducopter-firefly",
"default_params_filename": "default_params/firefly.parm",
},
# HELICOPTER
"heli": {
"make_target": "sitl-heli",
"waf_target": "bin/arducopter-heli",
"default_params_filename": "default_params/copter-heli.parm",
},
"heli-dual": {
"make_target": "sitl-heli-dual",
"waf_target": "bin/arducopter-coax", # is this correct? -pb201604301447
},
"heli-compound": {
"make_target": "sitl-heli-compound",
"waf_target": "bin/arducopter-coax", # is this correct? -pb201604301447
},
"singlecopter": {
"make_target": "sitl-single",
"waf_target": "bin/arducopter-single",
"default_params_filename": "default_params/copter-single.parm",
},
"coaxcopter": {
"make_target": "sitl-coax",
"waf_target": "bin/arducopter-coax",
"default_params_filename": "default_params/copter-coax.parm",
},
# PLANE
"quadplane-tilttri": {
"make_target": "sitl-tri",
"waf_target": "bin/arduplane-tri",
"default_params_filename": "default_params/quadplane-tilttri.parm",
},
"quadplane-tri": {
"make_target": "sitl-tri",
"waf_target": "bin/arduplane-tri",
"default_params_filename": "default_params/quadplane-tri.parm",
},
"quadplane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane.parm",
},
"plane-elevon": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-elevons.parm",
},
"plane-vtail": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-vtail.parm",
},
"plane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
},
# ROVER
"rover": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover.parm",
},
"rover-skid": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover-skid.parm",
},
# SIM
"gazebo-iris": {
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/gazebo-iris.parm",
},
"gazebo-zephyr": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/gazebo-zephyr.parm",
},
"last_letter": {
"waf_target": "bin/arduplane",
},
"CRRCSim": {
"waf_target": "bin/arduplane",
},
"jsbsim": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-jsbsim.parm",
},
}
_default_waf_target = {
"ArduPlane": "bin/arduplane",
"ArduCopter": "bin/arducopter-quad",
"APMrover2": "bin/ardurover",
"AntennaTracker": "bin/antennatracker",
}
def default_waf_target(vehicle):
"""Returns a waf target based on vehicle type, which is often determined by which directory the user is in"""
return _default_waf_target[vehicle]
def options_for_frame(frame, vehicle, opts):
"""Return informatiom about how to sitl for frame e.g. build-type==sitl"""
ret = None
if frame in _options_for_frame:
ret = _options_for_frame[frame]
else:
for p in ["octa", "tri", "y6", "firefly", "heli", "gazebo", "last_letter", "jsbsim", "quadplane", "plane-elevon", "plane-vtail", "plane"]:
if frame.startswith(p):
ret = _options_for_frame[p]
break
if ret is None:
if frame.endswith("-heli"):
ret = _options_for_frame["heli"]
if ret is None:
ret = {}
if "model" not in ret:
ret["model"] = frame
if "sitl-port" not in ret:
ret["sitl-port"] = True
if opts.model is not None:
ret["model"] = opts.model
if (ret["model"].find("xplane") != -1 or ret["model"].find("flightaxis") != -1):
ret["sitl-port"] = False
if "make_target" not in ret:
ret["make_target"] = "sitl"
if "waf_target" not in ret:
ret["waf_target"] = default_waf_target(vehicle)
if opts.build_target is not None:
ret["make_target"] = opts.build_target
ret["waf_target"] = opts.build_target
return ret
def do_build_waf(opts, frame_options):
"""Build sitl using waf"""
progress("WAF build")
old_dir = os.getcwd()
root_dir = find_root_dir()
os.chdir(root_dir)
waf_light = os.path.join(root_dir, "modules/waf/waf-light")
cmd_configure = [waf_light, "configure", "--board", "sitl"]
if opts.debug:
cmd_configure.append("--debug")
pieces = [ shlex.split(x) for x in opts.waf_configure_args ]
for piece in pieces:
cmd_configure.extend(piece)
run_cmd_blocking("Configure waf", cmd_configure, check=True)
if opts.clean:
run_cmd_blocking("Building clean", [waf_light, "clean"])
cmd_build = [waf_light, "build", "--target", frame_options["waf_target"]]
if opts.jobs is not None:
cmd_build += ['-j', str(opts.jobs)]
pieces = [ shlex.split(x) for x in opts.waf_build_args ]
for piece in pieces:
cmd_build.extend(piece)
_, sts = run_cmd_blocking("Building", cmd_build)
if sts != 0: # build failed
if opts.rebuild_on_failure:
progress("Build failed; cleaning and rebuilding")
run_cmd_blocking("Building clean", [waf_light, "clean"])
_, sts = run_cmd_blocking("Building", cmd_build)
if sts != 0:
progress("Build failed")
sys.exit(1)
else:
progress("Build failed")
sys.exit(1)
os.chdir(old_dir)
def do_build(vehicledir, opts, frame_options):
"""Build build target (e.g. sitl) in directory vehicledir"""
if opts.build_system == 'waf':
return do_build_waf(opts, frame_options)
old_dir = os.getcwd()
os.chdir(vehicledir)
if opts.clean:
run_cmd_blocking("Building clean", ["make", "clean"])
build_target = frame_options["make_target"]
if opts.debug:
build_target += "-debug"
build_cmd = ["make", build_target]
if opts.jobs is not None:
build_cmd += ['-j', str(opts.jobs)]
_, sts = run_cmd_blocking("Building %s" % build_target, build_cmd)
if sts != 0:
progress("Build failed; cleaning and rebuilding")
run_cmd_blocking("Cleaning", ["make", "clean"])
_, sts = run_cmd_blocking("Building %s" % build_target, build_cmd)
if sts != 0:
progress("Build failed")
sys.exit(1)
os.chdir(old_dir)
def get_user_locations_path():
'''The user locations.txt file is located by default in
$XDG_CONFIG_DIR/ardupilot/locations.txt. If $XDG_CONFIG_DIR is
not defined, we look in $HOME/.config/ardupilot/locations.txt. If
$HOME is not defined, we look in ./.config/ardpupilot/locations.txt.'''
config_dir = os.environ.get(
'XDG_CONFIG_DIR',
os.path.join(os.environ.get('HOME', '.'), '.config'))
user_locations_path = os.path.join(
config_dir, 'ardupilot', 'locations.txt')
return user_locations_path
def find_location_by_name(autotest, locname):
"""Search locations.txt for locname, return GPS coords"""
locations_userpath = os.environ.get('ARDUPILOT_LOCATIONS',
get_user_locations_path())
locations_filepath = os.path.join(autotest, "locations.txt")
comment_regex = re.compile("\s*#.*")
for path in [locations_userpath, locations_filepath]:
if not os.path.isfile(path):
continue
with open(path, 'r') as fd:
for line in fd:
line = re.sub(comment_regex, "", line)
line = line.rstrip("\n")
if len(line) == 0:
continue
(name, loc) = line.split("=")
if name == locname:
return loc
print("Failed to find location (%s)" % cmd_opts.location)
sys.exit(1)
def progress_cmd(what, cmd):
"""Print cmd in a way a user could cut-and-paste to get the same effect"""
progress(what)
shell_text = "%s" % (" ".join(['"%s"' % x for x in cmd]))
progress(shell_text)
def run_cmd_blocking(what, cmd, quiet=False, check=False, **kw):
if not quiet:
progress_cmd(what, cmd)
p = subprocess.Popen(cmd, **kw)
ret = os.waitpid(p.pid, 0)
_, sts = ret
if check and sts != 0:
progress("(%s) exited with code %d" % (what,sts,))
sys.exit(1)
return ret
def run_in_terminal_window(autotest, name, cmd):
"""Execute the run_in_terminal_window.sh command for cmd"""
global windowID
runme = [os.path.join(autotest, "run_in_terminal_window.sh"), name]
runme.extend(cmd)
progress_cmd("Run " + name, runme)
if under_macos():
# on MacOS record the window IDs so we can close them later
out = subprocess.Popen(runme, stdout=subprocess.PIPE).communicate()[0]
import re
p = re.compile('tab 1 of window id (.*)')
windowID.append(p.findall(out)[0])
else:
p = subprocess.Popen(runme)
tracker_uarta = None # blemish
def start_antenna_tracker(autotest, opts):
"""Compile and run the AntennaTracker, add tracker to mavproxy"""
global tracker_uarta
progress("Preparing antenna tracker")
tracker_home = find_location_by_name(find_autotest_dir(), opts.tracker_location)
vehicledir = os.path.join(autotest, "../../" + "AntennaTracker")
tracker_frame_options = {
"waf_target": _default_waf_target["AntennaTracker"],
}
do_build(vehicledir, opts, tracker_frame_options)
tracker_instance = 1
os.chdir(vehicledir)
tracker_uarta = "tcp:127.0.0.1:" + str(5760 + 10 * tracker_instance)
exe = os.path.join(vehicledir, "AntennaTracker.elf")
run_in_terminal_window(autotest, "AntennaTracker", ["nice", exe, "-I" + str(tracker_instance), "--model=tracker", "--home=" + tracker_home])
def start_vehicle(binary, autotest, opts, stuff, loc):
"""Run the ArduPilot binary"""
cmd_name = opts.vehicle
cmd = []
if opts.valgrind:
cmd_name += " (valgrind)"
cmd.append("valgrind")
if opts.gdb:
cmd_name += " (gdb)"
cmd.append("gdb")
gdb_commands_file = tempfile.NamedTemporaryFile(delete=False)
atexit.register(os.unlink, gdb_commands_file.name)
for breakpoint in opts.breakpoint:
gdb_commands_file.write("b %s\n" % (breakpoint,))
gdb_commands_file.write("r\n")
gdb_commands_file.close()
cmd.extend(["-x", gdb_commands_file.name])
cmd.append("--args")
if opts.strace:
cmd_name += " (strace)"
cmd.append("strace")
strace_options = ['-o', binary + '.strace', '-s', '8000', '-ttt']
cmd.extend(strace_options)
cmd.append(binary)
cmd.append("-S")
cmd.append("-I" + str(opts.instance))
cmd.extend(["--home", loc])
if opts.wipe_eeprom:
cmd.append("-w")
cmd.extend(["--model", stuff["model"]])
cmd.extend(["--speedup", str(opts.speedup)])
if opts.sitl_instance_args:
cmd.extend(opts.sitl_instance_args.split(" ")) # this could be a lot better..
if opts.mavlink_gimbal:
cmd.append("--gimbal")
if "default_params_filename" in stuff:
path = os.path.join(autotest, stuff["default_params_filename"])
progress("Using defaults from (%s)" % (path,))
cmd.extend(["--defaults", path])
run_in_terminal_window(autotest, cmd_name, cmd)
def start_mavproxy(opts, stuff):
"""Run mavproxy"""
# FIXME: would be nice to e.g. "mavproxy.mavproxy(....).run" rather than shelling out
extra_cmd = ""
cmd = []
if under_cygwin():
cmd.append("/usr/bin/cygstart")
cmd.append("-w")
cmd.append("/cygdrive/c/Program Files (x86)/MAVProxy/mavproxy.exe")
else:
cmd.append("mavproxy.py")
if opts.hil:
cmd.extend(["--load-module", "HIL"])
else:
cmd.extend(["--master", mavlink_port])
if stuff["sitl-port"]:
cmd.extend(["--sitl", simout_port])
# If running inside of a vagrant guest, then we probably want to forward our mavlink out to the containing host OS
ports = [p + 10 * cmd_opts.instance for p in [14550,14551]]
for port in ports:
if os.path.isfile("/ardupilot.vagrant"):
cmd.extend(["--out", "10.0.2.2:" + str(port)])
else:
cmd.extend(["--out", "127.0.0.1:" + str(port)])
if opts.tracker:
cmd.extend(["--load-module", "tracker"])
global tracker_uarta
# tracker_uarta is set when we start the tracker...
extra_cmd += "module load map; tracker set port %s; tracker start; tracker arm;" % (tracker_uarta,)
if opts.mavlink_gimbal:
cmd.extend(["--load-module", "gimbal"])
if "extra_mavlink_cmds" in stuff:
extra_cmd += " " + stuff["extra_mavlink_cmds"]
if opts.mavproxy_args:
cmd.extend(opts.mavproxy_args.split(" ")) # this could be a lot better..
# compatibility pass-through parameters (for those that don't want
# to use -C :-)
for out in opts.out:
cmd.extend(['--out', out])
if opts.map:
cmd.append('--map')
if opts.console:
cmd.append('--console')
if opts.aircraft is not None:
cmd.extend(['--aircraft', opts.aircraft])
if len(extra_cmd):
cmd.extend(['--cmd', extra_cmd])
local_mp_modules_dir = os.path.abspath(
os.path.join(__file__, '..', '..', 'mavproxy_modules'))
env = dict(os.environ)
env['PYTHONPATH'] = local_mp_modules_dir + os.pathsep + env.get('PYTHONPATH', '')
run_cmd_blocking("Run MavProxy", cmd, env=env)
progress("MAVProxy exitted")
# define and run parser
parser = CompatOptionParser("sim_vehicle.py",
epilog="eeprom.bin in the starting directory contains the parameters for your " \
"simulated vehicle. Always start from the same directory. It is "\
"recommended that you start in the main vehicle directory for the vehicle" \
"you are simulating, for example, start in the ArduPlane directory to " \
"simulate ArduPlane")
parser.add_option("-v", "--vehicle", type='string', default=None, help="vehicle type (ArduPlane, ArduCopter or APMrover2)")
parser.add_option("-f", "--frame", type='string', default=None, help="""set aircraft frame type
for copters can choose +, X, quad or octa
for planes can choose elevon or vtail""")
parser.add_option("-C", "--sim_vehicle_sh_compatible", action='store_true', default=False, help="be compatible with the way sim_vehicle.sh works; make this the first option")
parser.add_option("-H", "--hil", action='store_true', default=False, help="start HIL")
group_build = optparse.OptionGroup(parser, "Build options")
group_build.add_option("-N", "--no-rebuild", action='store_true', default=False, help="don't rebuild before starting ardupilot")
group_build.add_option("-D", "--debug", action='store_true', default=False, help="build with debugging")
group_build.add_option("-c", "--clean", action='store_true', default=False, help="do a make clean before building")
group_build.add_option("-j", "--jobs", default=None, type='int', help="number of processors to use during build (default for waf : number of processor, for make : 1)")
group_build.add_option("-b", "--build-target", default=None, type='string', help="override SITL build target")
group_build.add_option("-s", "--build-system", default="waf", type='choice', choices=["make", "waf"], help="build system to use")
group_build.add_option("", "--rebuild-on-failure", dest="rebuild_on_failure", action='store_true', default=False, help="if build fails, do not clean and rebuild")
group_build.add_option("", "--waf-configure-arg", action="append", dest="waf_configure_args", type="string", default=[], help="extra arguments to pass to waf in its configure step")
group_build.add_option("", "--waf-build-arg", action="append", dest="waf_build_args", type="string", default=[], help="extra arguments to pass to waf in its build step")
parser.add_option_group(group_build)
group_sim = optparse.OptionGroup(parser, "Simulation options")
group_sim.add_option("-I", "--instance", default=0, type='int', help="instance of simulator")
group_sim.add_option("-V", "--valgrind", action='store_true', default=False, help="enable valgrind for memory access checking (very slow!)")
group_sim.add_option("-T", "--tracker", action='store_true', default=False, help="start an antenna tracker instance")
group_sim.add_option("-A", "--sitl-instance-args", type='string', default=None, help="pass arguments to SITL instance")
# group_sim.add_option("-R", "--reverse-throttle", action='store_true', default=False, help="reverse throttle in plane")
group_sim.add_option("-G", "--gdb", action='store_true', default=False, help="use gdb for debugging ardupilot")
group_sim.add_option("-g", "--gdb-stopped", action='store_true', default=False, help="use gdb for debugging ardupilot (no auto-start)")
group_sim.add_option("-d", "--delay-start", default=0, type='float', help="delays the start of mavproxy by the number of seconds")
group_sim.add_option("-B", "--breakpoint", type='string', action="append", default=[], help="add a breakpoint at given location in debugger")
group_sim.add_option("-M", "--mavlink-gimbal", action='store_true', default=False, help="enable MAVLink gimbal")
group_sim.add_option("-L", "--location", type='string', default='CMAC', help="select start location from Tools/autotest/locations.txt")
group_sim.add_option("-l", "--custom-location", type='string', default=None, help="set custom start location")
group_sim.add_option("-S", "--speedup", default=1, type='int', help="set simulation speedup (1 for wall clock time)")
group_sim.add_option("-t", "--tracker-location", default='CMAC_PILOTSBOX', type='string', help="set antenna tracker start location")
group_sim.add_option("-w", "--wipe-eeprom", action='store_true', default=False, help="wipe EEPROM and reload parameters")
group_sim.add_option("-m", "--mavproxy-args", default=None, type='string', help="additional arguments to pass to mavproxy.py")
group_sim.add_option("", "--strace", action='store_true', default=False, help="strace the ArduPilot binary")
group_sim.add_option("", "--model", type='string', default=None, help="Override simulation model to use")
parser.add_option_group(group_sim)
# special-cased parameters for mavproxy, because some people's fingers
# have long memories, and they don't want to use -C :-)
group = optparse.OptionGroup(parser, "Compatibility MAVProxy options (consider using --mavproxy-args instead)")
group.add_option("", "--out", default=[], type='string', action="append", help="create an additional mavlink output")
group.add_option("", "--map", default=False, action='store_true', help="load map module on startup")
group.add_option("", "--console", default=False, action='store_true', help="load console module on startup")
group.add_option("", "--aircraft", default=None, help="store state and logs in named directory")
parser.add_option_group(group)
cmd_opts, cmd_args = parser.parse_args()
# clean up processes at exit:
atexit.register(kill_tasks)
progress("Start")
if cmd_opts.sim_vehicle_sh_compatible and cmd_opts.jobs is None:
cmd_opts.jobs = 1
# validate parameters
if cmd_opts.hil:
if cmd_opts.valgrind:
print("May not use valgrind with hil")
sys.exit(1)
if cmd_opts.gdb or cmd_opts.gdb_stopped:
print("May not use gdb with hil")
sys.exit(1)
if cmd_opts.strace:
print("May not use strace with hil")
sys.exit(1)
if cmd_opts.valgrind and (cmd_opts.gdb or cmd_opts.gdb_stopped):
print("May not use valgrind with gdb")
sys.exit(1)
if cmd_opts.strace and (cmd_opts.gdb or cmd_opts.gdb_stopped):
print("May not use strace with gdb")
sys.exit(1)
if cmd_opts.strace and cmd_opts.valgrind:
print("valgrind and strace almost certainly not a good idea")
# magically determine vehicle type (if required):
if cmd_opts.vehicle is None:
cwd = os.getcwd()
cmd_opts.vehicle = os.path.basename(cwd)
# determine a frame type if not specified:
default_frame_for_vehicle = {
"APMrover2": "rover",
"ArduPlane": "jsbsim",
"ArduCopter": "quad",
"AntennaTracker": "tracker",
}
if cmd_opts.vehicle not in default_frame_for_vehicle:
# try in parent directories, useful for having config in subdirectories
cwd = os.getcwd()
while cwd:
bname = os.path.basename(cwd)
if not bname:
break
if bname in default_frame_for_vehicle:
cmd_opts.vehicle = bname
break
cwd = os.path.dirname(cwd)
# try to validate vehicle
if cmd_opts.vehicle not in default_frame_for_vehicle:
progress("** Is (%s) really your vehicle type? Try -v VEHICLETYPE if not, or be in the e.g. ArduCopter subdirectory" % (cmd_opts.vehicle,))
# determine frame options (e.g. build type might be "sitl")
if cmd_opts.frame is None:
cmd_opts.frame = default_frame_for_vehicle[cmd_opts.vehicle]
# setup ports for this instance
mavlink_port = "tcp:127.0.0.1:" + str(5760 + 10 * cmd_opts.instance)
simout_port = "127.0.0.1:" + str(5501 + 10 * cmd_opts.instance)
frame_infos = options_for_frame(cmd_opts.frame, cmd_opts.vehicle, cmd_opts)
if frame_infos["model"] == "jsbsim":
check_jsbsim_version()
vehicle_dir = os.path.realpath(os.path.join(find_root_dir(), cmd_opts.vehicle))
if not os.path.exists(vehicle_dir):
print("vehicle directory (%s) does not exist" % (vehicle_dir,))
sys.exit(1)
if not cmd_opts.hil:
if cmd_opts.instance == 0:
kill_tasks()
if cmd_opts.tracker:
start_antenna_tracker(find_autotest_dir(), cmd_opts)
if cmd_opts.custom_location:
location = cmd_opts.custom_location
progress("Starting up at %s" % (location,))
else:
location = find_location_by_name(find_autotest_dir(), cmd_opts.location)
progress("Starting up at %s (%s)" % (location, cmd_opts.location))
if cmd_opts.hil:
# (unlikely)
run_in_terminal_window(find_autotest_dir(), "JSBSim", [os.path.join(find_autotest_dir(), "jsb_sim/runsim.py"), "--home", location, "--speedup=" + str(cmd_opts.speedup)])
else:
if not cmd_opts.no_rebuild: # i.e. we should rebuild
do_build(vehicle_dir, cmd_opts, frame_infos)
if cmd_opts.build_system == "waf":
if cmd_opts.debug:
binary_basedir = "build/sitl-debug"
else:
binary_basedir = "build/sitl"
vehicle_binary = os.path.join(find_root_dir(), binary_basedir, frame_infos["waf_target"])
else:
vehicle_binary = os.path.join(vehicle_dir, cmd_opts.vehicle + ".elf")
if not os.path.exists(vehicle_binary):
print("Vehicle binary (%s) does not exist" % (vehicle_binary,))
sys.exit(1)
start_vehicle(vehicle_binary, find_autotest_dir(), cmd_opts, frame_infos, location)
if cmd_opts.delay_start:
progress("Sleeping for %f seconds" % (cmd_opts.delay_start,))
time.sleep(float(cmd_opts.delay_start))
start_mavproxy(cmd_opts, frame_infos)
sys.exit(0)
|
bnsgeyer/Copter3_4
|
Tools/autotest/sim_vehicle.py
|
Python
|
gpl-3.0
| 32,299 | 0.002848 |
import urllib2
from lxml import etree
####################################################################
# API
####################################################################
class Scrape_Quora:
regexpNS = "http://exslt.org/regular-expressions"
@staticmethod
def get_name(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
name = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/div/h1/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return name
@staticmethod
def get_url(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
URL = response.geturl()
return URL
@staticmethod
def get_profile_picture_link(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
profile_picture_link = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/div/img/@data-src', namespaces={'re':Scrape_Quora.regexpNS})[0]
return profile_picture_link
@staticmethod
def get_no_of_questions(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_questions = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Questions"]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_questions
@staticmethod
def get_no_of_answers(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_answers = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Answers"]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_answers
@staticmethod
def get_no_of_followers(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_followers = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Followers "]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_followers
@staticmethod
def get_no_of_following(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_following = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Following "]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_following
@staticmethod
def get_no_of_edits(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_edits = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Edits"]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_edits
|
hansika/pyquora
|
scrape_quora/pyquora.py
|
Python
|
apache-2.0
| 3,445 | 0.004354 |
from .group_analysis import create_fsl_flame_wf, \
get_operation
__all__ = ['create_fsl_flame_wf', \
'get_operation']
|
FCP-INDI/C-PAC
|
CPAC/group_analysis/__init__.py
|
Python
|
bsd-3-clause
| 158 | 0.006329 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'OperatingSystem', fields ['item_id', 'document']
db.delete_unique('wiki_operatingsystem', ['item_id', 'document_id'])
# Removing unique constraint on 'FirefoxVersion', fields ['item_id', 'document']
db.delete_unique('wiki_firefoxversion', ['item_id', 'document_id'])
# Deleting model 'FirefoxVersion'
db.delete_table('wiki_firefoxversion')
# Deleting model 'OperatingSystem'
db.delete_table('wiki_operatingsystem')
# Deleting field 'Revision.significance'
db.delete_column('wiki_revision', 'significance')
def backwards(self, orm):
# Adding model 'FirefoxVersion'
db.create_table('wiki_firefoxversion', (
('item_id', self.gf('django.db.models.fields.IntegerField')()),
('document', self.gf('django.db.models.fields.related.ForeignKey')(related_name='firefox_version_set', to=orm['wiki.Document'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('wiki', ['FirefoxVersion'])
# Adding unique constraint on 'FirefoxVersion', fields ['item_id', 'document']
db.create_unique('wiki_firefoxversion', ['item_id', 'document_id'])
# Adding model 'OperatingSystem'
db.create_table('wiki_operatingsystem', (
('item_id', self.gf('django.db.models.fields.IntegerField')()),
('document', self.gf('django.db.models.fields.related.ForeignKey')(related_name='operating_system_set', to=orm['wiki.Document'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('wiki', ['OperatingSystem'])
# Adding unique constraint on 'OperatingSystem', fields ['item_id', 'document']
db.create_unique('wiki_operatingsystem', ['item_id', 'document_id'])
# Adding field 'Revision.significance'
db.add_column('wiki_revision', 'significance',
self.gf('django.db.models.fields.IntegerField')(null=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teamwork.team': {
'Meta': {'object_name': 'Team'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'tidings.watch': {
'Meta': {'object_name': 'Watch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'attachments.attachment': {
'Meta': {'object_name': 'Attachment'},
'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_rev'", 'null': 'True', 'to': "orm['attachments.AttachmentRevision']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mindtouch_attachment_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'attachments.attachmentrevision': {
'Meta': {'object_name': 'AttachmentRevision'},
'attachment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['attachments.Attachment']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_attachment_revisions'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_mindtouch_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'mindtouch_old_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'})
},
'wiki.document': {
'Meta': {'unique_together': "(('parent', 'locale'), ('slug', 'locale'))", 'object_name': 'Document'},
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_for+'", 'null': 'True', 'to': "orm['wiki.Revision']"}),
'defer_rendering': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'files': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['attachments.Attachment']", 'through': "orm['wiki.DocumentAttachment']", 'symmetrical': 'False'}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_localizable': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_redirect': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'json': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'last_rendered_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'locale': ('kuma.core.fields.LocaleField', [], {'default': "'en-US'", 'max_length': '7', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': "orm['wiki.Document']"}),
'parent_topic': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['wiki.Document']"}),
'quick_links_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['wiki.Document']", 'through': "orm['wiki.RelatedDocument']", 'symmetrical': 'False'}),
'render_expires': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'render_max_age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'render_scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'render_started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'rendered_errors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rendered_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teamwork.Team']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'zone_subnav_local_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'wiki.documentattachment': {
'Meta': {'object_name': 'DocumentAttachment'},
'attached_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Document']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['attachments.Attachment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
'wiki.documentdeletionlog': {
'Meta': {'object_name': 'DocumentDeletionLog'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('kuma.core.fields.LocaleField', [], {'default': "'en-US'", 'max_length': '7', 'db_index': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'wiki.documenttag': {
'Meta': {'object_name': 'DocumentTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'wiki.documentzone': {
'Meta': {'object_name': 'DocumentZone'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'zones'", 'unique': 'True', 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'styles': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url_root': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'wiki.editortoolbar': {
'Meta': {'object_name': 'EditorToolbar'},
'code': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_toolbars'", 'to': "orm['auth.User']"}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wiki.helpfulvote': {
'Meta': {'object_name': 'HelpfulVote'},
'anonymous_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'null': 'True', 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'to': "orm['wiki.Document']"}),
'helpful': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'wiki.localizationtag': {
'Meta': {'object_name': 'LocalizationTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'wiki.localizationtaggedrevision': {
'Meta': {'object_name': 'LocalizationTaggedRevision'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.LocalizationTag']"})
},
'wiki.relateddocument': {
'Meta': {'ordering': "['-in_common']", 'object_name': 'RelatedDocument'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_from'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_common': ('django.db.models.fields.IntegerField', [], {}),
'related': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_to'", 'to': "orm['wiki.Document']"})
},
'wiki.reviewtag': {
'Meta': {'object_name': 'ReviewTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'wiki.reviewtaggedrevision': {
'Meta': {'object_name': 'ReviewTaggedRevision'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ReviewTag']"})
},
'wiki.revision': {
'Meta': {'object_name': 'Revision'},
'based_on': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_revisions'", 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_mindtouch_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'render_max_age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviewed_revisions'", 'null': 'True', 'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'toc_depth': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'wiki.taggeddocument': {
'Meta': {'object_name': 'TaggedDocument'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.DocumentTag']"})
}
}
complete_apps = ['wiki']
|
mastizada/kuma
|
kuma/wiki/migrations/0041_auto__del_firefoxversion__del_unique_firefoxversion_item_id_document__.py
|
Python
|
mpl-2.0
| 21,931 | 0.007387 |
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception \
as storops_ex
from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops as storops
from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
from cinder.tests.unit.volume.drivers.emc.vnx import utils
from cinder.volume.drivers.emc.vnx import client as vnx_client
from cinder.volume.drivers.emc.vnx import common as vnx_common
class TestCondition(test.TestCase):
@res_mock.patch_client
def test_is_lun_io_ready_false(self, client, mocked):
r = vnx_client.Condition.is_lun_io_ready(mocked['lun'])
self.assertFalse(r)
@res_mock.patch_client
def test_is_lun_io_ready_true(self, client, mocked):
r = vnx_client.Condition.is_lun_io_ready(mocked['lun'])
self.assertTrue(r)
@res_mock.patch_client
def test_is_lun_io_ready_exception(self, client, mocked):
self.assertRaises(exception.VolumeBackendAPIException,
vnx_client.Condition.is_lun_io_ready,
mocked['lun'])
class TestClient(test.TestCase):
def setUp(self):
super(TestClient, self).setUp()
self.origin_timeout = vnx_common.DEFAULT_TIMEOUT
vnx_common.DEFAULT_TIMEOUT = 0
def tearDown(self):
super(TestClient, self).tearDown()
vnx_common.DEFAULT_TIMEOUT = self.origin_timeout
@res_mock.patch_client
def test_create_lun(self, client, mocked):
client.create_lun(pool='pool1', name='test', size=1, provision=None,
tier=None, cg_id=None, ignore_thresholds=False)
client.vnx.get_pool.assert_called_once_with(name='pool1')
pool = client.vnx.get_pool(name='pool1')
pool.create_lun.assert_called_with(lun_name='test',
size_gb=1,
provision=None,
tier=None,
ignore_thresholds=False)
@res_mock.patch_client
def test_create_lun_error(self, client, mocked):
self.assertRaises(storops_ex.VNXCreateLunError,
client.create_lun,
pool='pool1',
name='test',
size=1,
provision=None,
tier=None,
cg_id=None,
ignore_thresholds=False)
client.vnx.get_pool.assert_called_once_with(name='pool1')
@res_mock.patch_client
def test_create_lun_already_existed(self, client, mocked):
client.create_lun(pool='pool1', name='lun3', size=1, provision=None,
tier=None, cg_id=None, ignore_thresholds=False)
client.vnx.get_lun.assert_called_once_with(name='lun3')
@res_mock.patch_client
def test_create_lun_in_cg(self, client, mocked):
client.create_lun(
pool='pool1', name='test', size=1, provision=None,
tier=None, cg_id='cg1', ignore_thresholds=False)
@res_mock.patch_client
def test_create_lun_compression(self, client, mocked):
client.create_lun(pool='pool1', name='lun2', size=1,
provision=storops.VNXProvisionEnum.COMPRESSED,
tier=None, cg_id=None,
ignore_thresholds=False)
@res_mock.patch_client
def test_migrate_lun(self, client, mocked):
client.migrate_lun(src_id=1,
dst_id=2)
lun = client.vnx.get_lun()
lun.migrate.assert_called_with(2, storops.VNXMigrationRate.HIGH)
@unittest.skip("Skip until bug #1578986 is fixed")
@utils.patch_sleep
@res_mock.patch_client
def test_migrate_lun_with_retry(self, client, mocked, mock_sleep):
lun = client.vnx.get_lun()
self.assertRaises(storops_ex.VNXTargetNotReadyError,
client.migrate_lun,
src_id=4,
dst_id=5)
lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH)
mock_sleep.assert_called_with(15)
@res_mock.patch_client
def test_session_finished_faulted(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertTrue(r)
@res_mock.patch_client
def test_session_finished_migrating(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertFalse(r)
@res_mock.patch_client
def test_session_finished_not_existed(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertTrue(r)
@res_mock.patch_client
def test_migrate_lun_error(self, client, mocked):
lun = client.vnx.get_lun()
self.assertRaises(storops_ex.VNXMigrationError,
client.migrate_lun,
src_id=4,
dst_id=5)
lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH)
@res_mock.patch_client
def test_verify_migration(self, client, mocked):
r = client.verify_migration(1, 2, 'test_wwn')
self.assertTrue(r)
@res_mock.patch_client
def test_verify_migration_false(self, client, mocked):
r = client.verify_migration(1, 2, 'fake_wwn')
self.assertFalse(r)
@res_mock.patch_client
def test_cleanup_migration(self, client, mocked):
client.cleanup_migration(1, 2)
@res_mock.patch_client
def test_get_lun_by_name(self, client, mocked):
lun = client.get_lun(name='lun_name_test_get_lun_by_name')
self.assertEqual(888, lun.lun_id)
@res_mock.patch_client
def test_delete_lun(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_smp(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_lun_not_exist(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_lun_exception(self, client, mocked):
self.assertRaisesRegexp(storops_ex.VNXDeleteLunError,
'General lun delete error.',
client.delete_lun, mocked['lun'].name)
@res_mock.patch_client
def test_enable_compression(self, client, mocked):
lun_obj = mocked['lun']
client.enable_compression(lun_obj)
lun_obj.enable_compression.assert_called_with(ignore_thresholds=True)
@res_mock.patch_client
def test_enable_compression_on_compressed_lun(self, client, mocked):
lun_obj = mocked['lun']
client.enable_compression(lun_obj)
@res_mock.patch_client
def test_get_vnx_enabler_status(self, client, mocked):
re = client.get_vnx_enabler_status()
self.assertTrue(re.dedup_enabled)
self.assertFalse(re.compression_enabled)
self.assertTrue(re.thin_enabled)
self.assertFalse(re.fast_enabled)
self.assertTrue(re.snap_enabled)
@res_mock.patch_client
def test_lun_has_snapshot_true(self, client, mocked):
re = client.lun_has_snapshot(mocked['lun'])
self.assertTrue(re)
@res_mock.patch_client
def test_lun_has_snapshot_false(self, client, mocked):
re = client.lun_has_snapshot(mocked['lun'])
self.assertFalse(re)
@res_mock.patch_client
def test_create_cg(self, client, mocked):
cg = client.create_consistency_group('cg_name')
self.assertIsNotNone(cg)
@res_mock.patch_client
def test_create_cg_already_existed(self, client, mocked):
cg = client.create_consistency_group('cg_name_already_existed')
self.assertIsNotNone(cg)
@res_mock.patch_client
def test_delete_cg(self, client, mocked):
client.delete_consistency_group('deleted_name')
@res_mock.patch_client
def test_delete_cg_not_existed(self, client, mocked):
client.delete_consistency_group('not_existed')
@res_mock.patch_client
def test_expand_lun(self, client, _ignore):
client.expand_lun('lun', 10, poll=True)
@res_mock.patch_client
def test_expand_lun_not_poll(self, client, _ignore):
client.expand_lun('lun', 10, poll=False)
@res_mock.patch_client
def test_expand_lun_already_expanded(self, client, _ignore):
client.expand_lun('lun', 10)
@unittest.skip("Skip until bug #1578986 is fixed")
@utils.patch_sleep
@res_mock.patch_client
def test_expand_lun_not_ops_ready(self, client, _ignore, sleep_mock):
self.assertRaises(storops_ex.VNXLunPreparingError,
client.expand_lun, 'lun', 10)
lun = client.vnx.get_lun()
lun.expand.assert_called_once_with(10, ignore_thresholds=True)
# Called twice
lun.expand.assert_called_once_with(10, ignore_thresholds=True)
@res_mock.patch_client
def test_create_snapshot(self, client, _ignore):
client.create_snapshot('lun_test_create_snapshot',
'snap_test_create_snapshot')
lun = client.vnx.get_lun()
lun.create_snap.assert_called_once_with('snap_test_create_snapshot',
allow_rw=True,
auto_delete=False)
@res_mock.patch_client
def test_create_snapshot_snap_name_exist_error(self, client, _ignore):
client.create_snapshot('lun_name', 'snapshot_name')
@res_mock.patch_client
def test_delete_snapshot(self, client, _ignore):
client.delete_snapshot('snapshot_name')
@res_mock.patch_client
def test_delete_snapshot_delete_attached_error(self, client, _ignore):
self.assertRaises(storops_ex.VNXDeleteAttachedSnapError,
client.delete_snapshot, 'snapshot_name')
@res_mock.patch_client
def test_copy_snapshot(self, client, mocked):
client.copy_snapshot('old_name', 'new_name')
@res_mock.patch_client
def test_create_mount_point(self, client, mocked):
client.create_mount_point('lun_name', 'smp_name')
@res_mock.patch_client
def test_attach_mount_point(self, client, mocked):
client.attach_snapshot('smp_name', 'snap_name')
@res_mock.patch_client
def test_detach_mount_point(self, client, mocked):
client.detach_snapshot('smp_name')
@res_mock.patch_client
def test_modify_snapshot(self, client, mocked):
client.modify_snapshot('snap_name', True, True)
@res_mock.patch_client
def test_create_cg_snapshot(self, client, mocked):
snap = client.create_cg_snapshot('cg_snap_name', 'cg_name')
self.assertIsNotNone(snap)
@res_mock.patch_client
def test_create_cg_snapshot_already_existed(self, client, mocked):
snap = client.create_cg_snapshot('cg_snap_name', 'cg_name')
self.assertIsNotNone(snap)
@res_mock.patch_client
def test_delete_cg_snapshot(self, client, mocked):
client.delete_cg_snapshot(cg_snap_name='test_snap')
@res_mock.patch_client
def test_create_sg(self, client, mocked):
client.create_storage_group('sg_name')
@res_mock.patch_client
def test_create_sg_name_in_use(self, client, mocked):
self.assertRaisesRegexp(storops_ex.VNXStorageGroupNameInUseError,
'Storage group sg_name already exists. '
'Message: ',
client.create_storage_group('sg_name'))
@res_mock.patch_client
def test_get_storage_group(self, client, mocked):
sg = client.get_storage_group('sg_name')
self.assertEqual('sg_name', sg.name)
@res_mock.patch_client
def test_register_initiator(self, client, mocked):
host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip')
client.register_initiator(mocked['sg'], host,
{'host_initiator': 'port_1'})
@res_mock.patch_client
def test_register_initiator_exception(self, client, mocked):
host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip')
client.register_initiator(mocked['sg'], host,
{'host_initiator': 'port_1'})
@res_mock.patch_client
def test_ping_node(self, client, mocked):
self.assertTrue(client.ping_node(mocked['iscsi_port'], 'ip'))
@res_mock.patch_client
def test_ping_node_fail(self, client, mocked):
self.assertFalse(client.ping_node(mocked['iscsi_port'], 'ip'))
@res_mock.patch_client
def test_add_lun_to_sg(self, client, mocked):
lun = 'not_care'
self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3))
@res_mock.patch_client
def test_add_lun_to_sg_alu_already_attached(self, client, mocked):
lun = 'not_care'
self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3))
@res_mock.patch_client
def test_add_lun_to_sg_alu_in_use(self, client, mocked):
self.assertRaisesRegexp(storops_ex.VNXNoHluAvailableError,
'No HLU available.',
client.add_lun_to_sg,
mocked['sg'],
mocked['lun'],
3)
@res_mock.patch_client
def test_update_consistencygroup_no_lun_in_cg(self, client, mocked):
lun_1 = mocked['lun_1']
lun_2 = mocked['lun_2']
def _get_lun(lun_id):
return list(filter(
lambda x: x.lun_id == lun_id, (lun_1, lun_2)))[0]
client.get_lun = _get_lun
cg = mocked['cg']
client.update_consistencygroup(cg, [lun_1.lun_id, lun_2.lun_id], [])
cg.replace_member.assert_called_once_with(lun_1, lun_2)
@res_mock.patch_client
def test_update_consistencygroup_lun_in_cg(self, client, mocked):
lun_1 = mocked['lun_1']
lun_2 = mocked['lun_2']
def _get_lun(lun_id):
return list(filter(
lambda x: x.lun_id == lun_id, (lun_1, lun_2)))[0]
client.get_lun = _get_lun
cg = mocked['cg']
client.update_consistencygroup(cg, [lun_2.lun_id], [lun_1.lun_id])
cg.replace_member.assert_called_once_with(lun_2)
@res_mock.patch_client
def test_update_consistencygroup_remove_all(self, client, mocked):
lun_1 = mocked['lun_1']
def _get_lun(lun_id):
return list(filter(lambda x: x.lun_id == lun_id, (lun_1,)))[0]
client.get_lun = _get_lun
cg = mocked['cg']
client.update_consistencygroup(cg, [], [lun_1.lun_id])
cg.delete_member.assert_called_once_with(lun_1)
@res_mock.patch_client
def test_get_available_ip(self, client, mocked):
ip = client.get_available_ip()
self.assertEqual('192.168.1.5', ip)
@res_mock.patch_client
def test_create_mirror(self, client, mocked):
mv = client.create_mirror('test_mirror_name', 11)
self.assertIsNotNone(mv)
@res_mock.patch_client
def test_create_mirror_already_created(self, client, mocked):
mv = client.create_mirror('error_mirror', 12)
self.assertIsNotNone(mv)
@res_mock.patch_client
def test_delete_mirror(self, client, mocked):
client.delete_mirror('mirror_name')
@res_mock.patch_client
def test_delete_mirror_already_deleted(self, client, mocked):
client.delete_mirror('mirror_name_deleted')
@res_mock.patch_client
def test_add_image(self, client, mocked):
client.add_image('mirror_namex', '192.168.1.11', 31)
@res_mock.patch_client
def test_remove_image(self, client, mocked):
client.remove_image('mirror_remove')
@res_mock.patch_client
def test_fracture_image(self, client, mocked):
client.fracture_image('mirror_fracture')
@res_mock.patch_client
def test_sync_image(self, client, mocked):
client.sync_image('mirror_sync')
@res_mock.patch_client
def test_promote_image(self, client, mocked):
client.promote_image('mirror_promote')
@res_mock.mock_driver_input
@res_mock.patch_client
def test_get_lun_id(self, client, mocked, cinder_input):
lun_id = client.get_lun_id(cinder_input['volume'])
self.assertEqual(1, lun_id)
@res_mock.mock_driver_input
@res_mock.patch_client
def test_get_lun_id_without_provider_location(self, client, mocked,
cinder_input):
lun_id = client.get_lun_id(cinder_input['volume'])
self.assertIsInstance(lun_id, int)
self.assertEqual(mocked['lun'].lun_id, lun_id)
|
cloudbase/cinder
|
cinder/tests/unit/volume/drivers/emc/vnx/test_client.py
|
Python
|
apache-2.0
| 17,512 | 0 |
#!/usr/bin/env python
import sys
import src.json_importing as I
import src.data_training as T
import src.data_cross_validation as V
import src.extract_feature_multilabel as EML
if __name__ == '__main__':
print('Hello, I am Trellearn')
jsonFileName = sys.argv[1]
cards = I.parseJSON(jsonFileName)
X, Y, cv, mlb = EML.extract(cards)
V.validateML(X, Y)
exit(0)
|
AntoineToubhans/trellearn
|
main.py
|
Python
|
mit
| 389 | 0.002571 |
/usr/share/pyshared/gwibber/lib/gtk/widgets.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/pymodules/python2.7/gwibber/lib/gtk/widgets.py
|
Python
|
gpl-3.0
| 46 | 0.021739 |
# CodeIgniter
# http://codeigniter.com
#
# An open source application development framework for PHP
#
# This content is released under the MIT License (MIT)
#
# Copyright (c) 2014 - 2015, British Columbia Institute of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Copyright (c) 2008 - 2014, EllisLab, Inc. (http://ellislab.com/)
# Copyright (c) 2014 - 2015, British Columbia Institute of Technology (http://bcit.ca/)
#
# http://opensource.org/licenses/MIT MIT License
import re
import copy
from pygments.lexer import DelegatingLexer
from pygments.lexers.web import PhpLexer, HtmlLexer
__all__ = [ 'CodeIgniterLexer' ]
class CodeIgniterLexer(DelegatingLexer):
"""
Handles HTML, PHP, JavaScript, and CSS is highlighted
PHP is highlighted with the "startline" option
"""
name = 'CodeIgniter'
aliases = [ 'ci', 'codeigniter' ]
filenames = [ '*.html', '*.css', '*.php', '*.xml', '*.static' ]
mimetypes = [ 'text/html', 'application/xhtml+xml' ]
def __init__(self, **options):
super(CodeIgniterLexer, self).__init__(HtmlLexer,
PhpLexer,
startinline=True)
|
ajose1024/Code_Igniter_Extended
|
user_guide_src/cilexer/cilexer/cilexer.py
|
Python
|
mit
| 2,222 | 0.00495 |
import urllib.request, urllib.parse, urllib.error
from oauth2 import Request as OAuthRequest, SignatureMethod_HMAC_SHA1
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from social_auth.backends import ConsumerBasedOAuth, OAuthBackend, BaseOAuth2
from social_auth.utils import dsa_urlopen
class RdioBaseBackend(OAuthBackend):
def get_user_id(self, details, response):
return response['key']
def get_user_details(self, response):
return {
'username': response['username'],
'first_name': response['firstName'],
'last_name': response['lastName'],
'fullname': response['displayName'],
}
class RdioOAuth1Backend(RdioBaseBackend):
"""Rdio OAuth authentication backend"""
name = 'rdio-oauth1'
EXTRA_DATA = [
('key', 'rdio_id'),
('icon', 'rdio_icon_url'),
('url', 'rdio_profile_url'),
('username', 'rdio_username'),
('streamRegion', 'rdio_stream_region'),
]
@classmethod
def tokens(cls, instance):
token = super(RdioOAuth1Backend, cls).tokens(instance)
if token and 'access_token' in token:
token = dict(tok.split('=')
for tok in token['access_token'].split('&'))
return token
class RdioOAuth2Backend(RdioBaseBackend):
name = 'rdio-oauth2'
EXTRA_DATA = [
('key', 'rdio_id'),
('icon', 'rdio_icon_url'),
('url', 'rdio_profile_url'),
('username', 'rdio_username'),
('streamRegion', 'rdio_stream_region'),
('refresh_token', 'refresh_token', True),
('token_type', 'token_type', True),
]
class RdioOAuth1(ConsumerBasedOAuth):
AUTH_BACKEND = RdioOAuth1Backend
REQUEST_TOKEN_URL = 'http://api.rdio.com/oauth/request_token'
AUTHORIZATION_URL = 'https://www.rdio.com/oauth/authorize'
ACCESS_TOKEN_URL = 'http://api.rdio.com/oauth/access_token'
RDIO_API_BASE = 'http://api.rdio.com/1/'
SETTINGS_KEY_NAME = 'RDIO_OAUTH1_KEY'
SETTINGS_SECRET_NAME = 'RDIO_OAUTH1_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
params = {
'method': 'currentUser',
'extras': 'username,displayName,streamRegion',
}
request = self.oauth_post_request(access_token, self.RDIO_API_BASE,
params=params)
response = dsa_urlopen(request.url, request.to_postdata())
json = '\n'.join(response.readlines())
try:
return simplejson.loads(json)['result']
except ValueError:
return None
def oauth_post_request(self, token, url, params):
"""Generate OAuth request, setups callback url"""
if 'oauth_verifier' in self.data:
params['oauth_verifier'] = self.data['oauth_verifier']
request = OAuthRequest.from_consumer_and_token(self.consumer,
token=token,
http_url=url,
parameters=params,
http_method='POST')
request.sign_request(SignatureMethod_HMAC_SHA1(), self.consumer, token)
return request
class RdioOAuth2(BaseOAuth2):
AUTH_BACKEND = RdioOAuth2Backend
AUTHORIZATION_URL = 'https://www.rdio.com/oauth2/authorize'
ACCESS_TOKEN_URL = 'https://www.rdio.com/oauth2/token'
RDIO_API_BASE = 'https://www.rdio.com/api/1/'
SETTINGS_KEY_NAME = 'RDIO_OAUTH2_KEY'
SETTINGS_SECRET_NAME = 'RDIO_OAUTH2_SECRET'
SCOPE_VAR_NAME = 'RDIO2_PERMISSIONS'
EXTRA_PARAMS_VAR_NAME = 'RDIO2_EXTRA_PARAMS'
def user_data(self, access_token, *args, **kwargs):
params = {
'method': 'currentUser',
'extras': 'username,displayName,streamRegion',
'access_token': access_token,
}
response = dsa_urlopen(self.RDIO_API_BASE, urllib.parse.urlencode(params))
try:
return simplejson.load(response)['result']
except ValueError:
return None
# Backend definition
BACKENDS = {
'rdio-oauth1': RdioOAuth1,
'rdio-oauth2': RdioOAuth2
}
|
limdauto/django-social-auth
|
social_auth/backends/contrib/rdio.py
|
Python
|
bsd-3-clause
| 4,358 | 0.000918 |
from LSP.plugin.core.typing import Any, Callable
from types import MethodType
import weakref
__all__ = ['weak_method']
# An implementation of weak method borrowed from sublime_lib [1]
#
# We need it to be able to weak reference bound methods as `weakref.WeakMethod` is not available in
# 3.3 runtime.
#
# The reason this is necessary is explained in the documentation of `weakref.WeakMethod`:
# > A custom ref subclass which simulates a weak reference to a bound method (i.e., a method defined
# > on a class and looked up on an instance). Since a bound method is ephemeral, a standard weak
# > reference cannot keep hold of it.
#
# [1] https://github.com/SublimeText/sublime_lib/blob/master/st3/sublime_lib/_util/weak_method.py
def weak_method(method: Callable) -> Callable:
assert isinstance(method, MethodType)
self_ref = weakref.ref(method.__self__)
function_ref = weakref.ref(method.__func__)
def wrapped(*args: Any, **kwargs: Any) -> Any:
self = self_ref()
function = function_ref()
if self is None or function is None:
print('[lsp_utils] Error: weak_method not called due to a deleted reference', [self, function])
return
return function(self, *args, **kwargs)
return wrapped
|
dmilith/SublimeText3-dmilith
|
Packages/lsp_utils/st3/lsp_utils/_util/weak_method.py
|
Python
|
mit
| 1,266 | 0.004739 |
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#------------------------------------------------------------------------------
""" A placeholder for math functionality that is not implemented in SciPy.
"""
import warnings
warnings.warn("Module is deprecated.", DeprecationWarning)
import numpy
def is_monotonic(array):
""" Does the array increase monotonically?
>>> is_monotonic(array((1, 2, 3, 4)))
True
>>> is_monotonic(array((1, 2, 3, 0, 5)))
False
This may not be the desired response but:
>>> is_monotonic(array((1)))
False
"""
try:
min_increment = numpy.amin(array[1:] - array[:-1])
if min_increment >= 0:
return True
except Exception:
return False
return False;
def brange(min_value, max_value, increment):
""" Returns an inclusive version of arange().
The usual arange() gives:
>>> arange(1, 4, 1)
array([1, 2, 3])
However brange() returns:
>>> brange(1, 4, 1)
array([ 1., 2., 3., 4.])
"""
return numpy.arange(min_value, max_value + increment / 2.0, increment)
def norm(mean, std):
""" Returns a single random value from a normal distribution. """
return numpy.random.normal(mean, std)
def discrete_std (counts, bin_centers):
""" Returns a standard deviation from binned data. """
mean = numpy.sum(counts * bin_centers)/numpy.sum(counts)
return numpy.sqrt((numpy.sum((counts-mean)**2))/len(counts))
|
enthought/etsproxy
|
enthought/util/math.py
|
Python
|
bsd-3-clause
| 1,984 | 0.003528 |
#!/usr/bin/env python
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Converts a recipe given in a .cfg file into a full bash shell script
which would be similar to what CIVET would end up running.
"""
from __future__ import unicode_literals, absolute_import
import argparse, sys, os
import re
from RecipeReader import RecipeReader
def read_script(filename):
top_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
script_file = os.path.join(top_dir, filename)
with open(script_file, "r") as f:
out = f.read()
return out
def step_functions(recipe):
step_cmds = ''
for step in recipe["steps"]:
step_cmds += "function step_%s\n{\n" % step["position"]
for key, value in step["environment"].items():
step_cmds += write_env(key, value, " local")
step_cmds += ' local step_name="%s"\n' % step["name"]
step_cmds += ' local step_position="%s"\n' % step["position"]
script = read_script(step["script"])
for l in script.split('\n'):
if l.strip():
step_cmds += ' %s\n' % l
else:
step_cmds += "\n"
step_cmds += "}\nexport -f step_%s\n\n" % step["position"]
step_cmds += "function step_exit()\n"
step_cmds += '{\n'
step_cmds += ' if bash -c $1; then\n'
step_cmds += ' printf "\\n$1 passed\\n\\n"\n'
step_cmds += ' elif [ "$2" == "True" ]; then\n'
step_cmds += ' printf "\\n$1 failed. Aborting\\n\\n"\n'
step_cmds += ' exit 1\n'
step_cmds += ' else\n'
step_cmds += ' printf "\\n$1 failed but continuing\\n\\n"\n'
step_cmds += ' fi\n'
step_cmds += '}\n\n'
# now write out all the functions
for step in recipe["steps"]:
step_cmds += "step_exit step_%s %s\n" % (step["position"], step["abort_on_failure"])
return step_cmds
def write_env(key, value, prefix="export"):
return '%s %s="%s"\n' % (prefix, key, re.sub("^BUILD_ROOT", "$BUILD_ROOT", value))
def recipe_to_bash(recipe,
base_repo,
base_branch,
base_sha,
head_repo,
head_branch,
head_sha,
pr,
push,
manual,
build_root,
moose_jobs,
args):
script = "#!/bin/bash\n"
script += '# Generated by: %s %s\n' % (__file__, ' '.join(args))
script += '# Script for job %s\n' % recipe["filename"]
script += '# It is a good idea to redirect stdin, ie "./script.sh < /dev/null"\n'
script += '# Be sure to have the proper modules loaded as well.\n'
script += '\n\n'
script += 'module list\n'
script += 'export BUILD_ROOT="%s"\n' % build_root
script += 'export MOOSE_JOBS="%s"\n' % moose_jobs
script += '\n\n'
script += 'export CIVET_RECIPE_NAME="%s"\n' % recipe["name"]
script += 'export CIVET_BASE_REPO="%s"\n' % base_repo
script += 'export CIVET_BASE_SSH_URL="%s"\n' % base_repo
script += 'export CIVET_BASE_REF="%s"\n' % base_branch
script += 'export CIVET_BASE_SHA="%s"\n' % base_sha
script += 'export CIVET_HEAD_REPO="%s"\n' % head_repo
script += 'export CIVET_HEAD_REF="%s"\n' % head_branch
script += 'export CIVET_HEAD_SHA="%s"\n' % head_sha
script += 'export CIVET_HEAD_SSH_URL="%s"\n' % head_repo
script += 'export CIVET_JOB_ID="1"\n'
cause_str = ""
if pr:
cause_str = "Pull Request"
elif push:
cause_str = "Push"
elif manual:
cause_str = "Manual"
script += 'export CIVET_EVENT_CAUSE="%s"\n' % cause_str
script += '\n\n'
for source in recipe["global_sources"]:
s = read_script(source)
script += "# %s\n%s\n" % (source, s)
script += "\n\n"
for key, value in recipe["global_env"].items():
script += write_env(key, value)
script += "\n\n"
script += step_functions(recipe)
return script
def convert_recipe(args):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--recipe", dest="recipe", help="The recipe file to convert.", required=True)
parser.add_argument("--output", dest="output", help="Where to write the script to")
parser.add_argument("--build-root", dest="build_root", default="/tmp/", help="Where to set BUILD_ROOT")
parser.add_argument("--num-jobs", dest="num_jobs", default="4", help="What to set MOOSE_JOBS to")
parser.add_argument("--head", nargs=3, dest="head", help="Head repo to work on. Format is: repo branch sha", required=True)
parser.add_argument("--base", nargs=3, dest="base", help="Base repo to work on. Format is: repo branch sha", required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--pr", action="store_true")
group.add_argument("--push", action="store_true")
group.add_argument("--manual", action="store_true")
parsed = parser.parse_args(args)
dirname = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.dirname(dirname)
# RecipeReader takes a relative path from the base repo directory
real_path = os.path.realpath(parsed.recipe)
rel_path = os.path.relpath(real_path, parent_dir)
try:
reader = RecipeReader(parent_dir, rel_path)
recipe = reader.read()
except Exception as e:
print("Recipe '%s' is not valid: %s" % (real_path, e))
return 1
try:
script = recipe_to_bash(recipe,
base_repo=parsed.base[0],
base_branch=parsed.base[1],
base_sha=parsed.base[2],
head_repo=parsed.head[0],
head_branch=parsed.head[1],
head_sha=parsed.head[2],
pr=parsed.pr,
push=parsed.push,
manual=parsed.manual,
build_root=parsed.build_root,
moose_jobs=parsed.num_jobs,
args=args,
)
if parsed.output:
with open(parsed.output, "w") as f:
f.write(script)
else:
print(script)
except Exception as e:
print("Failed to convert recipe: %s" % e)
return 1
if __name__ == "__main__":
convert_recipe(sys.argv[1:])
|
idaholab/civet
|
ci/recipe/recipe_to_bash.py
|
Python
|
apache-2.0
| 6,708 | 0.00641 |
"""
Test multiword commands ('platform' in this case).
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
class MultiwordCommandsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test_ambiguous_subcommand(self):
self.expect("platform s", error=True,
substrs=["ambiguous command 'platform s'. Possible completions:",
"\tselect\n",
"\tshell\n",
"\tsettings\n"])
@no_debug_info_test
def test_empty_subcommand(self):
self.expect("platform \"\"", error=True, substrs=["Need to specify a non-empty subcommand."])
@no_debug_info_test
def test_help(self):
# <multiword> help brings up help.
self.expect("platform help",
substrs=["Commands to manage and create platforms.",
"Syntax: platform [",
"The following subcommands are supported:",
"connect",
"Select the current platform"])
|
llvm-mirror/lldb
|
packages/Python/lldbsuite/test/functionalities/multiword-commands/TestMultiWordCommands.py
|
Python
|
apache-2.0
| 1,161 | 0.002584 |
#!/usr/bin/env python3
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# A Happy command line utility that tests Weave Ping among Weave nodes.
#
# The command is executed by instantiating and running WeavePing class.
#
from __future__ import absolute_import
from __future__ import print_function
import getopt
import sys
import set_test_path
from happy.Utils import *
import WeavePing
if __name__ == "__main__":
options = WeavePing.option()
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:s:c:tuwqp:i:a:e:n:CE:T:",
["help", "origin=", "server=", "count=", "tcp", "udp", "wrmp", "interval=", "quiet",
"tap=", "case", "case_cert_path=", "case_key_path="])
except getopt.GetoptError as err:
print(WeavePing.WeavePing.__doc__)
print(hred(str(err)))
sys.exit(hred("%s: Failed server parse arguments." % (__file__)))
for o, a in opts:
if o in ("-h", "--help"):
print(WeavePing.WeavePing.__doc__)
sys.exit(0)
elif o in ("-q", "--quiet"):
options["quiet"] = True
elif o in ("-t", "--tcp"):
options["tcp"] = True
elif o in ("-u", "--udp"):
options["udp"] = True
elif o in ("-w", "--wrmp"):
options["wrmp"] = True
elif o in ("-o", "--origin"):
options["client"] = a
elif o in ("-s", "--server"):
options["server"] = a
elif o in ("-c", "--count"):
options["count"] = a
elif o in ("-i", "--interval"):
options["interval"] = a
elif o in ("-p", "--tap"):
options["tap"] = a
elif o in ("-C", "--case"):
options["case"] = True
elif o in ("-E", "--case_cert_path"):
options["case_cert_path"] = a
elif o in ("-T", "--case_key_path"):
options["case_key_path"] = a
else:
assert False, "unhandled option"
if len(args) == 1:
options["origin"] = args[0]
if len(args) == 2:
options["client"] = args[0]
options["server"] = args[1]
cmd = WeavePing.WeavePing(options)
cmd.start()
|
openweave/openweave-core
|
src/test-apps/happy/bin/weave-ping.py
|
Python
|
apache-2.0
| 2,876 | 0.001043 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
debug = True
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
REDIS_PASSWORD = '820AEC1BFC5D2C71E06CBF947A3A6191'
GUAVA_API_URL = 'http://localhost:5000'
|
Awesomeomics/webserver
|
guava/settings.py
|
Python
|
mit
| 210 | 0.009524 |
from sklearn.tree import DecisionTreeClassifier
# weak classifier
# decision tree (max depth = 2) using scikit-learn
class WeakClassifier:
# initialize
def __init__(self):
self.clf = DecisionTreeClassifier(max_depth = 2)
# train on dataset (X, y) with distribution weight w
def fit(self, X, y, w):
self.clf.fit(X, y, sample_weight = w)
# predict
def predict(self, X):
return self.clf.predict(X)
|
huangshenno1/algo
|
ml/iris/ada_MO/weakclassifier.py
|
Python
|
mit
| 425 | 0.030588 |
import time
from prometheus_client import Counter, Histogram
from prometheus_client import start_http_server
from flask import request
FLASK_REQUEST_LATENCY = Histogram('flask_request_latency_seconds', 'Flask Request Latency',
['method', 'endpoint'])
FLASK_REQUEST_COUNT = Counter('flask_request_count', 'Flask Request Count',
['method', 'endpoint', 'http_status'])
def before_request():
request.start_time = time.time()
def after_request(response):
request_latency = time.time() - request.start_time
FLASK_REQUEST_LATENCY.labels(request.method, request.path).observe(request_latency)
FLASK_REQUEST_COUNT.labels(request.method, request.path, response.status_code).inc()
return response
def monitor(app, port=8000, addr=''):
app.before_request(before_request)
app.after_request(after_request)
start_http_server(port, addr)
if __name__ == '__main__':
from flask import Flask
app = Flask(__name__)
monitor(app, port=8000)
@app.route('/')
def index():
return "Hello"
# Run the application!
app.run()
|
sbarratt/flask-prometheus
|
flask_prometheus/__init__.py
|
Python
|
bsd-3-clause
| 1,126 | 0.008881 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRrcov(RPackage):
"""rrcov: Scalable Robust Estimators with High Breakdown Point"""
homepage = "https://cloud.r-project.org/package=rrcov"
url = "https://cloud.r-project.org/src/contrib/rrcov_1.4-7.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/rrcov"
version('1.4-7', sha256='cbd08ccce8b583a2f88946a3267c8fc494ee2b44ba749b9296a6e3d818f6f293')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-robustbase@0.92.1:', type=('build', 'run'))
depends_on('r-mvtnorm', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-cluster', type=('build', 'run'))
depends_on('r-pcapp', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-rrcov/package.py
|
Python
|
lgpl-2.1
| 932 | 0.002146 |
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
"""
Model for testing arithmetic expressions.
"""
from django.db import models
@python_2_unicode_compatible
class Number(models.Model):
integer = models.BigIntegerField(db_column='the_integer')
float = models.FloatField(null=True, db_column='the_float')
def __str__(self):
return '%i, %.3f' % (self.integer, self.float)
class Experiment(models.Model):
name = models.CharField(max_length=24)
assigned = models.DateField()
completed = models.DateField()
start = models.DateTimeField()
end = models.DateTimeField()
class Meta:
ordering = ('name',)
def duration(self):
return self.end - self.start
|
ericholscher/django
|
tests/expressions_regress/models.py
|
Python
|
bsd-3-clause
| 766 | 0.001305 |
import socket
import os
RUN_IN_TOPOLOGY = False
TOPOLOGY_FROM_RESOURCE_SERVER = False
HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3 = socket.gethostname()
USE_SSL = False
ICAT_HOSTNAME = socket.gethostname()
PREEXISTING_ADMIN_PASSWORD = 'rods'
# TODO: allow for arbitrary number of remote zones
class FEDERATION(object):
LOCAL_IRODS_VERSION = (4, 2, 0)
REMOTE_IRODS_VERSION = (4, 2, 0)
RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '53CR37')]
RODSADMIN_NAME_PASSWORD_LIST = []
IRODS_DIR = '/var/lib/irods/iRODS'
LOCAL_ZONE = 'dev'
REMOTE_ZONE = 'buntest'
REMOTE_HOST = 'buntest'
REMOTE_RESOURCE = 'demoResc'
REMOTE_VAULT = '/var/lib/irods/iRODS/Vault'
TEST_FILE_SIZE = 4*1024*1024
LARGE_FILE_SIZE = 64*1024*1024
TEST_FILE_COUNT = 300
MAX_THREADS = 16
|
janiheikkinen/irods
|
tests/pydevtest/configuration.py
|
Python
|
bsd-3-clause
| 802 | 0 |
# 20170226 Add more additional info
import acm
import ael
import HTI_Util
import HTI_FeedTrade_EDD_Util
import fnmatch
import datetime
import os
import sys
import csv
import re
import sqlite3
import math
import glob
import win32com.client
import traceback
ael_variables = [['asofdate', 'Date', 'string', [str(ael.date_today()), 'Today'], 'Today', 1, 0, 'Report Date', None, 1], \
['acquirers', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), 'HTISEC - EDD,HTIFS - EDD', 1, 1, 'Acquirer(s)', None, 1], \
['counterparties', 'Counterparty(s)', 'string', HTI_Util.getAllParties(), None, 0, 1, 'Counterparty(s)', None, 1], \
['portfolio', 'Portfolio', 'string', HTI_Util.getAllPortfolios(), 'EDD Deltaone', 1, 1, 'Portfolio', None, 1], \
['currclspricemkt', 'Current Closing Price Market', 'string', None, 'Bloomberg_5PM', 1, 0, 'Current Closing Price Market', None, 1],
['histclspricemkt', 'Historical Closing Price Market', 'string', None, 'Bloomberg_5PM_Cls', 1, 0, 'Historical Closing Price Market', None, 1],
['pb_trd_file', 'PB Trade File', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\FA_Trade_Import\\pb_to_fa_YYYYMMDD.csv', 1, 0, 'PB Trade File', None, 1],
['loan_xls_template', 'Loan Template', 'string', None, 'S:\\Prime Brokerage (PB)\\Tools\\Stock Loan Collateral\\template\\ExcelUpload - Cash Entry.xlsm', 1, 0, 'Loan Template', None, 1],
['loan_xls_output', 'Loan Output', 'string', None, 'S:\\Prime Brokerage (PB)\\Tools\\Stock Loan Collateral\\ExcelUpload - Cash Entry YYYYMMDD.xlsm', 1, 0, 'Loan Output', None, 1],
['ss_bb_output', 'SS/BB Output', 'string', None, 'S:\\Prime Brokerage (PB)\\Tools\\Stock Loan Collateral\\ss_bb_trd_YYYYMMDD.xlsx', 1, 0, 'SS/BB Output', None, 1],
['base_ccy', 'Base Ccy', 'string', None, 'HKD', 1, 0, 'Base Ccy', None, 1]]
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def db_cur(source = ":memory:"):
# sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = None, index_arr = None):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
if index_arr is not None:
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr is not None:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def getTRSUnderlying(acm_ins):
acm_und_ins = None
bbticker = ""
for acm_leg in acm_ins.Legs():
if acm_leg.PayLeg() == False:
acm_und_ins = acm_leg.FloatRateReference()
break
return acm_und_ins
def getUndInstrumentBBTicker(acm_ins):
bbticker = ''
acm_und_ins = getTRSUnderlying(acm_ins)
if acm_und_ins != None:
for aliase in acm_und_ins.Aliases():
if aliase.Type().Name() == 'BB_TICKER':
bbticker = aliase.Alias().strip()
break
return bbticker
def getGroupTradeRef(external_ref):
groupTradeRef = None
strSql = """
select trdnbr, t.time
from trade t, instrument i, party a, party c, portfolio pf, leg l, instrument u
where t.insaddr = i.insaddr
and i.instype = 'TotalReturnSwap'
and t.status not in ('Void', 'Simulated')
and t.acquirer_ptynbr = a.ptynbr
and t.counterparty_ptynbr = c.ptynbr
and t.prfnbr = pf.prfnbr
and add_info(t, 'External Reference') = '%s'
and i.insaddr = l.insaddr
and l.float_rate = u.insaddr
order by t.time, trdnbr
""" % (external_ref)
print strSql
res = ael.asql(strSql)
columns, buf = res
for table in buf:
for row in table:
groupTradeRef = row[0]
break
return groupTradeRef
def getFirstTRS(external_ref, und_insaddr):
strSql = """select i.insid
from trade t, instrument i, leg l
where i.insaddr = t.insaddr
and i.instype = 'TotalReturnSwap'
and t.status not in ('Void', 'Simulated')
and add_info(t, 'External Reference') = '%s'
and i.insaddr = l.insaddr and l.payleg = 'No' and l.type = 'Total Return'
and add_info(t, 'Trd Pos Closed') ~= 'Yes'
and l.float_rate = %s
and t.trdnbr = t.trx_trdnbr""" % (external_ref, str(und_insaddr))
#print strSql
rs = ael.asql(strSql)
columns, buf = rs
insid = ''
for table in buf:
for row in table:
insid = str(row[0]).strip()
break
if insid == '':
return None
acm_ins = acm.FInstrument[insid]
return acm_ins
def getTotalTradeQuantity(external_ref, und_insaddr, asofdate):
acm_ins = getFirstTRS(external_ref, und_insaddr)
if acm_ins == None:
return None
#print "instrument='%s' and status <> 'Void' and status <> 'Simulated'" % acm_ins.Name()
#acm_trds = acm.FTrade.Select("instrument='%s' and status <> 'Void' and status <> 'Simulated' and tradeTime <= '%s'" % (acm_ins.Name(), asofdate.add_days(1)))
acm_trds = acm.FTrade.Select("instrument='%s' and status <> 'Void' and status <> 'Simulated' and tradeTime < '%s'" % (acm_ins.Name(), asofdate.add_days(1)))
acm_trd = None
if acm_trds != None:
for acm_trd in acm_trds:
if acm_trd.TrxTrade() != None:
if acm_trd.Oid() == acm_trd.TrxTrade().Oid():
break
else:
return None
total_quantity = 0.0
if acm_trd.TrxTrade() == None:
if acm_trd.Status() not in ('Void', 'Simulated'):
total_quantity = total_quantity + acm_trd.Quantity()
return abs(total_quantity)
else:
return None
elif acm_trd.Oid() == acm_trd.TrxTrade().Oid():
if acm_trd.Status() not in ('Void', 'Simulated'):
total_quantity = total_quantity + acm_trd.Quantity()
# find all other trade
#acm_trs_trds = acm.FTrade.Select("trxTrade=%s and tradeTime <= '%s'" % (acm_trd.Oid(), asofdate.add_days(1)))
acm_trs_trds = acm.FTrade.Select("trxTrade=%s and tradeTime < '%s'" % (acm_trd.Oid(), asofdate.add_days(1)))
for acm_trs_trd in acm_trs_trds:
# add this to handle tradeTime lag 8 hours from gmt
ael_trd_date = ael.date(str(acm_trs_trd.TradeTime())[0:10])
if ael_trd_date >= asofdate.add_days(1):
continue
if acm_trs_trd.Oid() != acm_trs_trd.TrxTrade().Oid() and \
acm_trs_trd.Status() not in ('Void', 'Simulated') and \
acm_trs_trd.Instrument().InsType() == 'TotalReturnSwap':
total_quantity = total_quantity + acm_trs_trd.Quantity()
#print total_quantity
'''
if total_quantity == 0.0:
return None
else:
return abs(total_quantity)
'''
return -total_quantity
else:
return -total_quantity
def getUnderlyingPrice(dt, ael_und_ins, currclspricemkt, histclspricemkt):
try:
if dt == ael.date_today():
cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Last', 0, currclspricemkt)
else:
cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Close', 0, histclspricemkt)
except:
#cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Last', 0, currclspricemkt)
cls_price = 0.0
return cls_price
def csv_to_arr(csv_file, start=0, has_header=True, delim=',', encoding='utf-8'):
arr = []
reader = []
if "http" in csv_file:
response = requests.get(csv_file)
text = response.content.decode(encoding)
else:
text = open(csv_file, 'rU')
reader = csv.reader(text, delimiter=delim)
arr = list(reader)
arr = list(zip(*arr))
arr = [x for x in arr if any(x)]
arr = list(zip(*arr))
header = ""
if has_header:
header = ','.join(arr[start])
arr = arr[start+1:]
return re.sub(r"[\*\.#/\$%\"\(\)& \_-]", "", header), arr
else:
return arr[start:]
return
def getFx(dt, fm_ccy, to_ccy, currclspricemkt, histclspricemkt):
if fm_ccy == 'CNY':
fm_ccy = 'CNH'
if to_ccy == 'CNY':
to_ccy = 'CNH'
ins_fm_ccy = ael.Instrument[fm_ccy]
ins_to_ccy = ael.Instrument[to_ccy]
ins_usd = ael.Instrument['USD']
try:
if dt == ael.date_today():
#fx_rate = ins_fm_ccy.used_price(dt, ins_to_ccy.insid, 'Last', 0, currclspricemkt)
fm_usd_rate = ins_fm_ccy.used_price(ael.date_today(), ins_usd.insid, 'Last', 0, currclspricemkt)
to_usd_rate = ins_usd.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
fx_rate = fm_usd_rate * to_usd_rate
else:
#fx_rate = ins_fm_ccy.used_price(dt, ins_to_ccy.insid, 'Close', 0, histclspricemkt)
fm_usd_rate = ins_fm_ccy.used_price(dt, ins_usd.insid, 'Close', 0, histclspricemkt)
to_usd_rate = ins_usd.used_price(dt, ins_to_ccy.insid, 'Close', 0, histclspricemkt)
fx_rate = fm_usd_rate * to_usd_rate
except:
#fm_usd_rate = ins_fm_ccy.used_price(ael.date_today(), ins_usd.insid, 'Last', 0, currclspricemkt)
#to_usd_rate = ins_usd.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
#fx_rate = fm_usd_rate * to_usd_rate
fx_rate = 0.0
#fx_rate = ins_fm_ccy.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
return fx_rate
def mtm_valuation(dict):
header = "cpty,bbg,qty,mkt_price,today_mv"
asofdate = dict['asofdate']
if asofdate == 'Today':
asofdate = ael.date_today()
asofdate = ael.date(asofdate)
# Portfolios
portfolios = dict['portfolio']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
# Acquirers
acq_array_list = dict['acquirers']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
# Counterparties
pty_array_list = dict['counterparties']
pty_list = ''
for pty in pty_array_list:
if pty_list == '':
pty_list = "'" + pty + "'"
else:
pty_list = pty_list + ",'" + pty + "'"
currclspricemkt = dict['currclspricemkt']
histclspricemkt = dict['histclspricemkt']
base_ccy = dict['base_ccy']
product_strategy = 'SP_Portfolio Swap' #default no grouping
strSql = """select t.trdnbr, add_info(t, 'External Reference') 'external_ref', l.float_rate, c.ptyid
into externalRef
from instrument i, trade t, party a, portfolio pf, leg l, party c
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
and i.instype = 'TotalReturnSwap'
and t.acquirer_ptynbr = a.ptynbr
and a.ptyid in (@accquirer_list)
and pf.prfid in (@portfolio_list)
and t.time < '%s'
and i.insaddr = l.insaddr and l.payleg = 'No'
and t.counterparty_ptynbr = c.ptynbr
and add_info(t, 'Trd Pos Closed') ~= 'Yes'
@counterparty_list_sql
select distinct external_ref, float_rate, ptyid
from externalRef
where external_ref ~= ''""" % (asofdate.add_days(1))
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@accquirer_list', acq_list)
if pty_list != '':
counterparty_list_sql = 'and c.ptyid in (@counterparty_list)'
counterparty_list_sql = counterparty_list_sql.replace("@counterparty_list", pty_list)
strSql = strSql.replace("@counterparty_list_sql", counterparty_list_sql)
else:
strSql = strSql.replace("@counterparty_list_sql", ' ')
print (strSql)
rs = ael.asql(strSql)
columns, buf = rs
arr = []
for table in buf:
for row in table:
rptRow = []
external_ref = str(row[0])
und_insaddr = row[1]
cpty_id = row[2]
acm_ins = getFirstTRS(external_ref, und_insaddr)
#print 'acm_ins', acm_ins.Name()
if acm_ins != None:
underlying_bbg = getUndInstrumentBBTicker(acm_ins)
ins_ccy = acm_ins.Currency().Name()
if ins_ccy == 'CNY':
ins_ccy = 'CNH'
qty = getTotalTradeQuantity(external_ref, und_insaddr, asofdate)
if round(qty, 2) == 0.0:
# suppress all have been closed out
continue
#print 'qty', qty
acm_und_ins = getTRSUnderlying(acm_ins)
today_underlying_price = getUnderlyingPrice(asofdate, ael.Instrument[acm_und_ins.Name()], currclspricemkt, histclspricemkt)
today_fx = getFx(asofdate, ins_ccy, base_ccy, currclspricemkt, histclspricemkt)
original_mv = today_underlying_price * qty * today_fx
rptRow = [cpty_id, underlying_bbg, int(qty), float(today_underlying_price), float(original_mv) ]
# print (rptRow)
arr.append(rptRow)
return header, arr
def client_cash(dict):
header = 'TradeDate,ClientCode,ClientName,TradeReference,CashType,Amount,ExternalReference'
asofdate = dict['asofdate']
if asofdate == 'Today':
asofdate = ael.date_today()
asofdate = ael.date(asofdate)
# Portfolios
portfolios = dict['portfolio']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
# Acquirers
acq_array_list = dict['acquirers']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
# Counterparties
pty_array_list = dict['counterparties']
pty_list = ''
for pty in pty_array_list:
if pty_list == '':
pty_list = "'" + pty + "'"
else:
pty_list = pty_list + ",'" + pty + "'"
base_ccy = dict['base_ccy']
currclspricemkt = dict['currclspricemkt']
histclspricemkt = dict['histclspricemkt']
strSql = """select cpty.ptyid, cpty.fullname, t.trdnbr, p.type, c.insid, p.amount, add_info(t, 'External Reference') 'ext_ref'
from trade t, instrument i, payment p, party cpty, party a, portfolio pf, instrument c
where t.insaddr = i.insaddr
and i.instype = 'Curr'
and t.trdnbr = p.trdnbr
and t.counterparty_ptynbr = cpty.ptynbr
and t.acquirer_ptynbr = a.ptynbr
and p.curr = c.insaddr
and a.ptyid in (@accquirer_list)
@counterparty_list_sql
and t.prfnbr = pf.prfnbr
and pf.prfid in (@portfolio_list)
@start_date
and t.time < '@dt'
and t.status not in ('Void', 'Simulated')
"""
strSql = strSql.replace('@dt', asofdate.add_days(1).to_string('%Y-%m-%d'))
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@accquirer_list', acq_list)
strSql = strSql.replace("@start_date", ' ')
if pty_list != '':
counterparty_list_sql = 'and cpty.ptyid in (@counterparty_list)'
counterparty_list_sql = counterparty_list_sql.replace("@counterparty_list", pty_list)
strSql = strSql.replace("@counterparty_list_sql", counterparty_list_sql)
else:
strSql = strSql.replace("@counterparty_list_sql", ' ')
print (strSql)
rs = ael.asql(strSql)
columns, buf = rs
rptContent = []
for table in buf:
for row in table:
client_code = row[0]
client_name = row[1]
trade_ref = row[2]
cash_type = row[3]
currency = row[4]
amt = row[5]
ext_ref = row[6]
acm_trd = acm.FTrade[trade_ref]
if acm_trd != None:
trade_date = acm_trd.TradeTime()[0:10]
today_fx = getFx(asofdate, currency, base_ccy, currclspricemkt, histclspricemkt)
rptRow = [str(trade_date), client_code, client_name, str(trade_ref), cash_type, float(amt*today_fx), ext_ref]
print (rptRow)
rptContent.append(rptRow)
return header, rptContent
def get_value_day(asofdate, pay_cal, spot_day):
value_day = asofdate
cal = acm.FCalendar.Select("name='%s'" % (pay_cal))[0]
for i in range(0, spot_day):
is_holiday = True
while is_holiday:
value_day = value_day.add_days(1)
if not cal.IsNonBankingDay(cal, cal, value_day):
is_holiday = False
return value_day
def calc_stock_loan(cur, coll_arr, cpty, ext_ref, coll_ratio, ia, total_mv, asofdate, base_ccy, currclspricemkt, histclspricemkt):
trade_ref = getGroupTradeRef(ext_ref)
if total_mv is None and ia is not None and ia != 0:
value_day = get_value_day(asofdate, "Hong Kong", 2)
loan_amount = ia
trade_source = "EMSX"
ccy = base_ccy
comment = "IA full return"
coll_arr.append(["EDD Deltaone", "HKD", asofdate.to_string('%Y-%m-%d'), value_day.to_string('%Y-%m-%d'), "HTIFS - EDD", value_day.to_string('%Y-%m-%d'),
"FO Confirmed", cpty, cpty, "", "EDMO2", "Cash Entry", comment,
("Expense" if loan_amount > 0 else "Income"), "", "IM-EDD(Short Pos)", -loan_amount, ccy, "", "", "", "", "Group Trade Ref", trade_ref, "",
"", "External Reference", ext_ref, "", "", "Product_Strategy", "SP_Portfolio Swap", "", "", "Trade Source", trade_source ])
else:
cur.execute("""
select Security, Currency, PayCal1, SpotDays, Sum(case when BS = 'BUY' then Quantity else -Quantity end) as qty, TradeSource
from trd
where Counterparty = ? and (ShortSell = 'Y' or BuyBack = 'Y' )
group by Counterparty, Security, Currency, PayCal1, SpotDays
having qty <> 0
""", (cpty,))
for row in cur.fetchall():
stock_code = str(row["Security"])
ccy = str(row["Currency"])
pay_cal = str(row["PayCal1"])
spot_day = int(row["SpotDays"])
qty = float(row["qty"])
trade_source = str(row["TradeSource"])
value_day = get_value_day(asofdate, pay_cal, spot_day)
today_underlying_price = getUnderlyingPrice(asofdate, ael.Instrument[stock_code], currclspricemkt, histclspricemkt)
ratio = (1 if coll_ratio < 1.2 and coll_ratio > 0 and qty > 0 else 1.2)
loan_amount = qty*today_underlying_price*ratio
comment = "IA " + "{0:.0f}%".format(ratio*100) + (" return " if loan_amount > 0 else " for ") + stock_code
coll_arr.append(["EDD Deltaone", "HKD", asofdate.to_string('%Y-%m-%d'), value_day.to_string('%Y-%m-%d'), "HTIFS - EDD", value_day.to_string('%Y-%m-%d'),
"FO Confirmed", cpty, cpty, "", "EDMO2", "Cash Entry", comment,
("Expense" if loan_amount > 0 else "Income"), "", "IM-EDD(Short Pos)", -loan_amount, ccy, "", "", "", "", "Group Trade Ref", trade_ref, "",
"", "External Reference", ext_ref, "", "", "Product_Strategy", "SP_Portfolio Swap", "", "", "Trade Source", trade_source ])
return
def arr_to_xlsx(xlsx_file, header, arr, sheet="", start_row=1, output_filename=""):
xl = win32com.client.Dispatch('Excel.Application')
wb = xl.Workbooks.Open(xlsx_file)
ws = wb.Worksheets(1) if sheet == "" else wb.Worksheets(sheet)
if header != "":
for i, cell in enumerate(header.split(',')):
ws.Cells(start_row,i+1).Value = cell
for i, row in enumerate(arr):
for j, cell in enumerate(row):
if str(cell) != "" and str(cell)[0] == '=':
ws.Cells(i+start_row+1,j+1).Formula = cell
else:
ws.Cells(i+start_row+1,j+1).Value = cell
ws.Columns.AutoFit()
xl.DisplayAlerts = False
wb.SaveAs(xlsx_file if output_filename == "" else output_filename)
xl.DisplayAlerts = True
wb.Close(True)
return
def ael_main(dict):
conn, cur = db_cur()
asofdate = dict['asofdate']
if asofdate == 'Today':
asofdate = ael.date_today()
asofdate = ael.date(asofdate)
currclspricemkt = dict['currclspricemkt']
histclspricemkt = dict['histclspricemkt']
csh_header, csh_arr = client_cash(dict)
mtm_header, mtm_arr = mtm_valuation(dict)
trd_header, trd_arr = csv_to_arr(dict["pb_trd_file"].replace("YYYYMMDD", asofdate.to_string('%Y%m%d')))
create_tbl(cur, "csh", csh_header, csh_arr)
create_tbl(cur, "mtm", mtm_header, mtm_arr)
create_tbl(cur, "trd", trd_header, trd_arr)
cur.execute("""
select trd1.Counterparty, trd1.ExternalReference, -ia/total_mv as CollRatio, ia, total_mv
from
(select distinct Counterparty, ExternalReference
from trd
group by Counterparty, ExternalReference
) trd1
left join
(select ClientCode, ExternalReference, sum(Amount) as ia
from csh
where csh.CashType = 'IM-EDD(Short Pos)'
group by ClientCode, ExternalReference) tmp2
on trd1.Counterparty = tmp2.ClientCode
left join
(select cpty, sum(today_mv) as total_mv
from mtm
where qty < 0
group by cpty) tmp1
on tmp1.cpty = tmp2.ClientCode
""")
coll_arr = []
for row in cur.fetchall():
cpty = row["Counterparty"]
ext_ref = row["ExternalReference"]
coll_ratio = row["CollRatio"]
ia = row["ia"]
total_mv = row["total_mv"]
print (cpty, ia, total_mv)
calc_stock_loan(cur, coll_arr, cpty, ext_ref, coll_ratio, ia, total_mv, asofdate, dict["base_ccy"], currclspricemkt, histclspricemkt)
arr_to_xlsx(dict["loan_xls_template"], "", coll_arr, "Cash Entry", 3, dict["loan_xls_output"].replace("YYYYMMDD", asofdate.to_string('%Y%m%d')))
print ("Finished")
return
|
frederick623/pb
|
fa_collateral_upload/HTI_Loan_Collateral_Automation.py
|
Python
|
apache-2.0
| 25,027 | 0.010349 |
from gbdxtools import Interface
gbdx = None
def go():
print(gbdx.task_registry.list())
print(gbdx.task_registry.get_definition('HelloGBDX'))
if __name__ == "__main__":
gbdx = Interface()
go()
|
DigitalGlobe/gbdxtools
|
examples/basic_workflow_client.py
|
Python
|
mit
| 211 | 0.009479 |
"""This module provides the main functionality of cfbackup
"""
from __future__ import print_function
import sys
import argparse
import json
import CloudFlare
# https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records
class CF_DNS_Records(object):
"""
commands for zones manipulation
"""
def __init__(self, ctx):
self._ctx = ctx
def run(self):
"""
run - entry point for DNS records manipulations
"""
cmd = self._ctx.command
if cmd == "show":
self.show()
else:
sys.exit("Command " + cmd + " not implemened for zones")
def show(self):
"""Show CF zones"""
# print("Show DSN records")
try:
records = self._all_records()
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones %d %s - api call failed' % (e, e))
if not self._ctx.pretty:
print(json.dumps(records, indent=4))
return
records_by_type = {}
types = {}
for rec in records:
if not records_by_type.get(rec["type"]):
types[rec["type"]] = 0
records_by_type[rec["type"]] = []
types[rec["type"]] += 1
records_by_type[rec["type"]].append(rec)
for t in sorted(list(types)):
for rec in records_by_type[t]:
# print(json.dumps(rec, indent=4))
print("Type: {}".format(rec["type"]))
print("Name: {}".format(rec["name"]))
print("Content: {}".format(rec["content"]))
print("TTL: {}{}".format(
rec["ttl"],
" (auto)" if str(rec["ttl"]) == "1" else "",
))
print("Proxied: {}".format(rec["proxied"]))
print("Auto: {}".format(rec["meta"]["auto_added"]))
print("")
print("")
print("-------------------")
print("Records stat:")
print("-------------------")
print("{0: <11} {1: >4}".format("<type>", "<count>"))
for t in sorted(list(types)):
print("{0: <11} {1: >4}".format(t, types[t]))
print("-------------------")
print("{0: <11} {1: >4}".format("Total:", len(records)))
def _all_records(self):
cf = CloudFlare.CloudFlare()
zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1})
if len(zones) == 0:
exit('No zones found')
zone_id = zones[0]['id']
cf_raw = CloudFlare.CloudFlare(raw=True)
page = 1
records = []
while True:
raw_results = cf_raw.zones.dns_records.get(
zone_id,
params={'per_page':100, 'page':page},
)
total_pages = raw_results['result_info']['total_pages']
result = raw_results['result']
for rec in result:
records.append(rec)
if page == total_pages:
break
page += 1
return records
# https://api.cloudflare.com/#zone-list-zones
class CF_Zones(object):
"""
commands for zones manipulation
"""
def __init__(self, ctx):
self._ctx = ctx
def run(self):
"""
run - entry point for zones manipulations
"""
cmd = self._ctx.command
if cmd == "show":
self.show()
else:
sys.exit("Command " + cmd + " not implemened for zones")
def show(self):
"""Show CF zones"""
# print("Show cf zones")
try:
zones = self._all_zones()
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones %d %s - api call failed' % (e, e))
if not self._ctx.pretty:
print(json.dumps(zones, indent=4))
return
for z in zones:
print("Zone: {0: <16} NS: {1}".format(
z["name"],
z["name_servers"][0],
))
for ns in z["name_servers"][1:]:
print(" {0: <16} {1}".format("", ns))
def _all_zones(self):
cf = CloudFlare.CloudFlare(raw=True)
if self._ctx.zone_name:
raw_results = cf.zones.get(params={
'name': self._ctx.zone_name,
'per_page': 1,
'page': 1,
})
return raw_results['result']
page = 1
domains = []
while True:
raw_results = cf.zones.get(params={'per_page':5, 'page':page})
total_pages = raw_results['result_info']['total_pages']
zones = raw_results['result']
for z in zones:
domains.append(z)
if page == total_pages:
break
page += 1
return domains
COMMANDS = [
"show",
# "restore"
]
OBJECT_ENTRYPOINT = {
"zones": CF_Zones,
"dns": CF_DNS_Records,
}
def main():
"""Main entry"""
parser = argparse.ArgumentParser(
prog="cfbackup",
description='Simple Cloudflare backup tool.',
)
parser.add_argument(
"command",
choices=[x for x in COMMANDS],
help="command",
)
subparsers = parser.add_subparsers(
help='Object of command',
dest="object"
)
parser_zones = subparsers.add_parser("zones")
parser_zones.add_argument(
"--pretty",
action='store_true',
help="show user friendly output",
)
parser_zones.add_argument(
"-z", "--zone-name",
help="optional zone name",
)
parser_dns = subparsers.add_parser("dns")
parser_dns.add_argument(
"-z", "--zone-name",
required=True,
help="required zone name",
)
parser_dns.add_argument(
"--pretty",
action='store_true',
help="show user friendly output",
)
args = parser.parse_args()
OBJECT_ENTRYPOINT[args.object](args).run()
|
nordicdyno/cfbackup
|
cfbackup/core.py
|
Python
|
bsd-2-clause
| 6,025 | 0.001494 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# Documents
#
"""
Documents
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import inspect
from . import pyarduino
this_file_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
def get_plugin_path():
this_folder_path = os.path.dirname(this_file_path)
plugin_path = os.path.dirname(this_folder_path)
return plugin_path
def get_packages_path():
plugin_path = get_plugin_path()
packages_path = os.path.dirname(plugin_path)
return packages_path
def get_stino_user_path():
packages_path = get_packages_path()
user_path = os.path.join(packages_path, 'User')
stino_user_path = os.path.join(user_path, 'Stino')
return stino_user_path
def get_preset_path():
plugin_path = get_plugin_path()
preset_path = os.path.join(plugin_path, 'preset')
return preset_path
def get_user_preset_path():
stino_user_path = get_stino_user_path()
preset_path = os.path.join(stino_user_path, 'preset')
return preset_path
def get_user_menu_path():
stino_user_path = get_stino_user_path()
preset_path = os.path.join(stino_user_path, 'menu')
return preset_path
def get_settings():
settings = pyarduino.base.settings.get_arduino_settings()
return settings
def get_arduino_info():
arduino_info = pyarduino.arduino_info.get_arduino_info()
return arduino_info
def get_i18n():
i18n = pyarduino.base.i18n.I18N()
return i18n
|
kidmillions/Stino
|
stino/st_base.py
|
Python
|
mit
| 1,584 | 0.000631 |
'''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.linear = nn.Linear(1280, num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNetV2()
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
|
kuangliu/pytorch-cifar
|
models/mobilenetv2.py
|
Python
|
mit
| 3,092 | 0.003558 |
#!/usr/bin/env python3
############################################################################
# Copyright 2017 RIFT.IO Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import argparse
import logging
import os
import stat
import subprocess
import sys
import time
import yaml
class ConfigurationError(Exception):
pass
def configure_waf_haproxy_cp(logger, run_dir, mgmt_ip, haproxy_cp_ip):
sh_file = "{}/waf_set_haproxy_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "echo \"<VirtualHost *:80>\r"
send " AddDefaultCharset UTF-8\r"
send " ProxyPreserveHost On\r"
send " ProxyRequests off\r"
send " ProxyVia Off\r"
send " ProxyPass / http://{haproxy_cp_ip}:5000/\r"
send " ProxyPassReverse / http://{haproxy_cp_ip}:5000/\r"
send " </VirtualHost>\" > /etc/httpd/conf.d/waf_proxy.conf\r"
expect "]# "
send "echo \"<IfModule mod_security2.c>\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/modsecurity_crs_10_setup.conf\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/base_rules/*.conf\r\r"
send " SecRuleEngine On\r"
send " SecRequestBodyAccess On\r"
send " SecResponseBodyAccess On\r"
send " SecDebugLog /var/log/httpd/modsec-debug.log\r"
send " SecDebugLogLevel 3\r"
send "</IfModule>\" > /etc/httpd/conf.d/mod_security.conf\r"
expect "]# "
send "systemctl stop httpd\r"
expect "]# "
send "systemctl start httpd\r"
expect "]# "
'''.format(mgmt_ip=mgmt_ip, haproxy_cp_ip=haproxy_cp_ip))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_add_waf(logger, run_dir, haproxy_mgmt_ip, waf_cp_ip, waf_server_name):
sh_file = "{}/haproxy_add_waf_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "grep \"server {waf_server_name} {waf_cp_ip}\" /etc/haproxy/haproxy.cfg && echo \"Already configured\" && exit 0\r"
expect {{
"]$ " {{ exit }}
"]# "
}}
send "sed -i \'s/\\(.*WAF list.*\\)/\\1\\n server {waf_server_name} {waf_cp_ip}:80 check/g\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_cp_ip=waf_cp_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_remove_waf(logger, run_dir, haproxy_mgmt_ip, waf_server_name):
sh_file = "{}/haproxy_remove_httpd_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "sed -i \'/server {waf_server_name}/d\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy remove waf config failed: {}".format(rc))
def main(argv=sys.argv[1:]):
try:
parser = argparse.ArgumentParser()
parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")
args = parser.parse_args()
run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
if not os.path.exists(run_dir):
os.makedirs(run_dir)
log_file = "{}/rift_waf_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logging.basicConfig(filename=log_file, level=logging.DEBUG)
logger = logging.getLogger()
ch = logging.StreamHandler()
if args.verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
except Exception as e:
print("Got exception:{}".format(e))
raise
try:
dry_run = args.dry_run
yaml_str = args.yaml_cfg_file.read()
logger.debug("Input YAML file: %s", yaml_str)
yaml_cfg = yaml.load(yaml_str)
logger.debug("Input YAML cfg: %s", yaml_cfg)
# Check if this is post scale out trigger
def find_cp_ip(vnfr_list, vnfd_name, cp_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
for cp in vnfr['connection_points']:
logger.debug("Connection point: %s", format(cp))
if cp_name in cp['name']:
return cp['ip_address']
raise ValueError("Could not find vnfd %s connection point %s", vnfd_name, cp_name)
def find_mgmt_ip(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr['rw_mgmt_ip']
raise ValueError("Could not find vnfd %s mgmt ip", vnfd_name)
def find_vnfr(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr
raise ValueError("Could not find vnfd %s", vnfd_name)
haproxy_cp_ip = find_cp_ip(yaml_cfg['vnfrs_others'], "haproxy_vnfd", "cp0")
haproxy_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_others'], "haproxy_vnfd")
waf_cp_ip = find_cp_ip(yaml_cfg['vnfrs_in_group'], "waf_vnfd", "cp0")
waf_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_in_group'], "waf_vnfd")
waf_vnfr = find_vnfr(yaml_cfg['vnfrs_in_group'], "waf_vnfd")
# HAProxy wants to use a name without .'s
waf_server_name = waf_vnfr["name"].replace(".", "__")
if yaml_cfg['trigger'] == 'post_scale_out':
logger.debug("Sleeping for 60 seconds to give VNFD mgmt VM a chance to boot up")
time.sleep(60)
configure_haproxy_add_waf(logger, run_dir, haproxy_mgmt_ip, waf_cp_ip, waf_server_name)
configure_waf_haproxy_cp(logger, run_dir, waf_mgmt_ip, haproxy_cp_ip)
elif yaml_cfg['trigger'] == 'pre_scale_in':
configure_haproxy_remove_waf(logger, run_dir, haproxy_mgmt_ip, waf_server_name)
else:
raise ValueError("Unexpected trigger {}".format(yaml_cfg['trigger']))
except Exception as e:
logger.exception(e)
raise
if __name__ == "__main__":
main()
|
RIFTIO/rift.ware-descriptor-packages
|
4.3/src/nsd/haproxy_waf_http_ns/scripts/waf_config.py
|
Python
|
apache-2.0
| 9,663 | 0.003415 |
# Mostly from http://peterdowns.com/posts/first-time-with-pypi.html
from distutils.core import setup
setup(
name = 'pmdp',
packages = ['pmdp'],
version = '0.3',
description = 'A poor man\'s data pipeline',
author = 'Dan Goldin',
author_email = 'dangoldin@gmail.com',
url = 'https://github.com/dangoldin/poor-mans-data-pipeline',
download_url = 'https://github.com/dangoldin/poor-mans-data-pipeline/tarball/0.3',
keywords = ['data', 'data-pipeline'],
classifiers = [],
)
|
dangoldin/poor-mans-data-pipeline
|
setup.py
|
Python
|
mit
| 491 | 0.04277 |
# -*- coding: utf-8 -*-
import os
import glob
import sys
import re
# Display debug information, print to statistics file
verbose = 0
obstruents = {'b':'B', 'd':'D', 'g':'G'}
nasals = ['m', 'n', 'N']
# Vocales
vowels = ['a', 'e', 'i', 'o', 'u']
# Semivocales
semivowels = ['%', '#', '@', '$', '&', '!', '*', '+', '-', '3']
# Voiced consonants
voiced = ['b', 'B', 'd', 'D', 'g', 'G', 'm', 'n', 'N', '|', 'J', 'r', 'R']
# Track the number of utterances
numUtterances = 0
# Track the number of words
numWords = 0
#wordsPerUtterance = []
phonemesPerWord = []
def interVocalicRules(sent):
newSent = sent
# Create all the dipthongs that occur between words
newSent = newSent.replace('a i', '- ')
newSent = newSent.replace('a u', '+ ')
# Do I indicate vowel lengthening?
# newSent = newSent.replace('a a', 'aa ')
newSent = newSent.replace('e i', '* ')
# newSent = newSent.replace('e e', 'ee ')
newSent = newSent.replace('i a', '% ')
newSent = newSent.replace('i e', '# ')
newSent = newSent.replace('i o', '@ ')
# newSent = newSent.replace('i i', 'ii ')
newSent = newSent.replace('o i', '3 ')
# newSent = newSent.replace('o o', 'oo ')
# This is not a dipthong replacement but it still needs to happen:
# lo ultimo = [lultimo]
newSent = newSent.replace('o u', 'u ')
newSent = newSent.replace('u a', '& ')
newSent = newSent.replace('u e', '$ ')
newSent = newSent.replace('u i', '! ')
# newSent = newSent.replace('u u', 'uu ')
# Avoid creating onsets that are illegal
newSent = newSent.replace(' nt','n t')
newSent = newSent.replace(' nR','n R')
newSent = newSent.replace(' zl','z l')
newSent = newSent.replace(' zR','z R')
newSent = newSent.replace(' ts','t s')
newSent = newSent.replace(' tl','t l')
newSent = newSent.replace(' tR','t R')
newSent = newSent.replace(' nd','n d')
newSent = newSent.replace(' ks','k s')
newSent = newSent.replace(' kl','k l')
# Turn b/d/g's into B/D/G's where appropriate
strList = list(newSent)
i = 0
prev = None
for symbol in strList:
if symbol in obstruents:
if not prev or prev in nasals:
i += 1
continue
else:
strList[i] = obstruents[symbol]
if symbol in voiced:
if prev == 's':
strList[i-1] = 'z'
prev = symbol
i += 1
newSent = "".join(strList)
return newSent
def sententialRules(sentence):
# Apply rules between words, like when a [b] occurs between vowels, turn it into a [B]
# Vowels together.. a aser = aser
# Apply rule for two vowels being together.. si aqui = s(ia dipthong)ki...
# Split the sentence into chunks based on pauses.
# This distinction exists because:
chunks = sentence.split('[/]')
# This has to be done here because I allow [/] to be remain up until this point
# for the purpose of knowing where boundaries occur, but we don't want to count [/]
newChunkList = []
for chunk in chunks:
#wordsPerUtterance.append(len(chunk.split()))
globals()["numWords"] += len(chunk.split())
newChunk = interVocalicRules(chunk)
if verbose == 1:
print newChunk
newChunkList.append(newChunk)
return newChunkList
def main():
dictFile = "Spanish/dicts/dict_converted.txt"
chaDir = "Spanish/cha_files/"
file = open(dictFile, 'r')
lines = file.readlines()
file.close()
# Word bank is a dictionary - lookup by its key retrieves its IPA translation
word = {}
# Split by whitespace since that's how it's set up
for line in lines:
x = line.split()
word[x[0].lower()] = x[1]
keyErrors = open("Spanish/dicts/keyErrors.txt", "w")
outFile = open("Spanish/Spanish-phon.txt", 'w')
outFileOrig = open("Spanish/Spanish-ortho.txt", 'w')
for fileName in sorted(glob.glob(os.path.join(chaDir, '*.cha'))):
# Skip file if it's not below 20 months
if fileName.startswith(tuple([chaDir + str(28),chaDir + str(36)])):
continue
if verbose == 1:
print fileName
file = open(fileName, 'r')
lines = file.readlines()
file.close()
#file = open(fileName.replace('.cha', '_ipa.txt'), 'w')
for line in lines:
# Only look at child-directed speech(from INV or PAR)
if line.startswith('*INV') or line.startswith('*PAR') or line.startswith('*TEA') or line.startswith('*FAT'):
if verbose == 1:
print 'Original line: ' + line
# Split on pauses to separate utterances and count them
#numUtterances += len(line.split('[/]'))
# Split the sentence into individual words
words = line.split()
# Build the IPA-translated sentence
ipaSentence = ""
# Look up individual words
for x in words[1:]:
# Ignore punctuation
if x == '.' or x == '?' or x == '!':
continue
outFileOrig.write(x + ' ')
# Need to make some character substitions to make dictionary search work
x = re.sub('é','}',x)
x = re.sub('á','{',x)
x = re.sub('í','<',x)
x = re.sub('ó','>',x)
x = re.sub('ú','}',x)
x = re.sub('ñ','|',x)
x = re.sub('ü','=',x)
x = re.sub(':','',x)
x = re.sub('<.+>','',x)
try:
ipaSentence += word[x.lower()]
ipaSentence += " "
except KeyError:
keyErrors.write("KeyError with: " + x.lower() + "\n")
continue
outFileOrig.write('\n')
newChunks = sententialRules(ipaSentence)
ipaSentence = ""
for chunk in newChunks:
ipaSentence += chunk
ipaSentence += " "
newChunks = ipaSentence.split()
ipaSentence = ""
for chunk in newChunks:
ipaSentence += chunk
ipaSentence += " "
# Remove trailing whitespace
ipaSentence = ipaSentence.rstrip()
# Calculate phonemes per word
ipaWords = ipaSentence.split()
phonemesInWord = 0
for ipaWord in ipaWords:
phonemesInWord += len(ipaWord)
# Number of original words is the length of the "words" variable beyond the first
# part that indicates the speaker(i.e. *INV:)
globals()["phonemesPerWord"].append(float(float(phonemesInWord) / float(len(words[1:]))))
if verbose == 1:
print ipaSentence
if len(ipaSentence) > 0:
outFile.write(ipaSentence + '\n')
globals()["numUtterances"] += 1
#file.write(ipaSentence + '\n')
#file.close()
outFile.close()
keyErrors.close()
if verbose == 1:
statisticsFile = open("statistics.txt", 'w')
statisticsFile.write("Number of utterances: " + str(globals()["numUtterances"]) + "\n")
statisticsFile.write("Number of words by tokens: " + str(globals()["numWords"]) + "\n")
statisticsFile.write("Number of words by type: " + str(len(word)) + "\n")
averageWordsPerUtterance = float(float(globals()["numWords"]) / float(numUtterances))
statisticsFile.write("Words per utterance on average: " + str(averageWordsPerUtterance) + "\n")
averagePhonemesPerWord = float(float(sum(globals()["phonemesPerWord"])) / float(len(globals()["phonemesPerWord"])))
statisticsFile.write("Phonemes per word on average: " + str(averagePhonemesPerWord))
statisticsFile.close()
if __name__ == "__main__":
main()
|
lawphill/PhillipsPearl_Corpora
|
Spanish/scripts/dict_convert.py
|
Python
|
mit
| 8,349 | 0.010189 |
#!/usr/bin/env python
#
# Copyright (c) 2013 In-Q-Tel, Inc/Lab41, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on 19 October 2013
@author: Lab41
Helper functions for creating visualizations
"""
import array
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
def visualize_scatter(counts, codes, data, codebook, num_clusters, xlabel="", ylabel="", title=""):
"""
Generates a 2-d scatter plot visualization of two feature data for
:param counts: dictionary of counts for the number of observations pairs for
each cluster
:param codes: list of codes for each observation row in the order returned by the original query
:param data: list of observations returned from query in their original order
:param codebook: the coordinates of the centroids
:param num_clusters: number of specified clusters up to 8
:param xlabel: a label for the x axis (Default: None)
:param ylabel: a label for the y axis (Default: None)
"""
if num_clusters > 8:
print "Visualize scatter only supports up to 8 clusters"
return
num_features = 2
list_arrays = list()
list_arr_idx = array.array("I", [0, 0, 0])
for idx in range(num_clusters):
list_arrays.append(np.zeros((counts[idx], num_features)))
for i, j in zip(codes, data):
list_arrays[i][list_arr_idx[i]][0] = j[0]
list_arrays[i][list_arr_idx[i]][1] = j[1]
list_arr_idx[i] += 1
#plot the clusters first as relatively larger circles
plt.scatter(codebook[:,0], codebook[:,1], color='orange', s=260)
colors = ['red', 'blue', 'green', 'purple', 'cyan', 'black', 'brown', 'grey']
for idx in range(num_clusters):
plt.scatter(list_arrays[idx][:,0], list_arrays[idx][:,1], c=colors[idx])
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
#plt.show()
plt.savefig('/home/docker/foo.png')
plt.close()
|
kfoss/try41
|
dockerfiles/redwood/visual.py
|
Python
|
apache-2.0
| 2,481 | 0.008061 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains an Outlook Registry parser."""
from plaso.lib import event
from plaso.parsers.winreg_plugins import interface
__author__ = 'David Nides (david.nides@gmail.com)'
class OutlookSearchMRUPlugin(interface.KeyPlugin):
"""Windows Registry plugin parsing Outlook Search MRU keys."""
NAME = 'winreg_outlook_mru'
DESCRIPTION = 'PST Paths'
REG_KEYS = [
u'\\Software\\Microsoft\\Office\\15.0\\Outlook\\Search',
u'\\Software\\Microsoft\\Office\\14.0\\Outlook\\Search']
# TODO: The catalog for Office 2013 (15.0) contains binary values not
# dword values. Check if Office 2007 and 2010 have the same. Re-enable the
# plug-ins once confirmed and OutlookSearchMRUPlugin has been extended to
# handle the binary data or create a OutlookSearchCatalogMRUPlugin.
# Registry keys for:
# MS Outlook 2007 Search Catalog:
# '\\Software\\Microsoft\\Office\\12.0\\Outlook\\Catalog'
# MS Outlook 2010 Search Catalog:
# '\\Software\\Microsoft\\Office\\14.0\\Outlook\\Search\\Catalog'
# MS Outlook 2013 Search Catalog:
# '\\Software\\Microsoft\\Office\\15.0\\Outlook\\Search\\Catalog'
REG_TYPE = 'NTUSER'
def GetEntries(self, key, **unused_kwargs):
"""Collect the values under Outlook and return event for each one."""
value_index = 0
for value in key.GetValues():
# Ignore the default value.
if not value.name:
continue
# Ignore any value that is empty or that does not contain an integer.
if not value.data or not value.DataIsInteger():
continue
# TODO: change this 32-bit integer into something meaningful, for now
# the value name is the most interesting part.
text_dict = {}
text_dict[value.name] = '0x{0:08x}'.format(value.data)
if value_index == 0:
timestamp = key.last_written_timestamp
else:
timestamp = 0
yield event.WinRegistryEvent(
key.path, text_dict, timestamp=timestamp,
source_append=': {0:s}'.format(self.DESCRIPTION))
value_index += 1
|
iwm911/plaso
|
plaso/parsers/winreg_plugins/outlook.py
|
Python
|
apache-2.0
| 2,761 | 0.010503 |
# -*- coding: utf-8 -*-
import warnings
from django import forms
from django.contrib.admin.sites import site
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.urlresolvers import reverse
from django.db import models
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from filer.models import Folder
from filer.utils.compatibility import truncate_words
from filer.utils.model_label import get_model_label
class AdminFolderWidget(ForeignKeyRawIdWidget):
choices = None
input_type = 'hidden'
is_hidden = False
def render(self, name, value, attrs=None):
obj = self.obj_for_value(value)
css_id = attrs.get('id')
css_id_folder = "%s_folder" % css_id
css_id_description_txt = "%s_description_txt" % css_id
if attrs is None:
attrs = {}
related_url = None
if value:
try:
folder = Folder.objects.get(pk=value)
related_url = folder.get_admin_directory_listing_url_path()
except Exception:
pass
if not related_url:
related_url = reverse('admin:filer-directory_listing-last')
params = self.url_parameters()
params['select_folder'] = 1
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in list(params.items())])
else:
url = ''
if 'class' not in attrs:
# The JavaScript looks for this hook.
attrs['class'] = 'vForeignKeyRawIdAdminField'
super_attrs = attrs.copy()
hidden_input = super(ForeignKeyRawIdWidget, self).render(name, value, super_attrs)
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
context = {
'hidden_input': hidden_input,
'lookup_url': '%s%s' % (related_url, url),
'lookup_name': name,
'span_id': css_id_description_txt,
'object': obj,
'clear_id': '%s_clear' % css_id,
'descid': css_id_description_txt,
'noimg': 'filer/icons/nofile_32x32.png',
'foldid': css_id_folder,
'id': css_id,
}
html = render_to_string('admin/filer/widgets/admin_folder.html', context)
return mark_safe(html)
def label_for_value(self, value):
obj = self.obj_for_value(value)
return ' <strong>%s</strong>' % truncate_words(obj, 14)
def obj_for_value(self, value):
try:
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
except:
obj = None
return obj
class Media(object):
js = (static('filer/js/addons/popup_handling.js'), )
class AdminFolderFormField(forms.ModelChoiceField):
widget = AdminFolderWidget
def __init__(self, rel, queryset, to_field_name, *args, **kwargs):
self.rel = rel
self.queryset = queryset
self.limit_choices_to = kwargs.pop('limit_choices_to', None)
self.to_field_name = to_field_name
self.max_value = None
self.min_value = None
kwargs.pop('widget', None)
forms.Field.__init__(self, widget=self.widget(rel, site), *args, **kwargs)
def widget_attrs(self, widget):
widget.required = self.required
return {}
class FilerFolderField(models.ForeignKey):
default_form_class = AdminFolderFormField
default_model_class = Folder
def __init__(self, **kwargs):
# We hard-code the `to` argument for ForeignKey.__init__
dfl = get_model_label(self.default_model_class)
if "to" in kwargs.keys(): # pragma: no cover
old_to = get_model_label(kwargs.pop("to"))
if old_to != dfl:
msg = "%s can only be a ForeignKey to %s; %s passed" % (
self.__class__.__name__, dfl, old_to
)
warnings.warn(msg, SyntaxWarning)
kwargs['to'] = dfl
super(FilerFolderField, self).__init__(**kwargs)
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
defaults = {
'form_class': self.default_form_class,
'rel': self.rel,
}
defaults.update(kwargs)
return super(FilerFolderField, self).formfield(**defaults)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.related.ForeignKey"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
|
DylannCordel/django-filer
|
filer/fields/folder.py
|
Python
|
bsd-3-clause
| 4,982 | 0.001004 |
"""
Copyright 2016 Andrea McIntosh
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views import generic
from .models import Question, Choice
class IndexView(generic.ListView):
template_name = "polls/index.html"
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except:
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
|
akmcinto/TodoApp
|
ToDoApp/polls/views.py
|
Python
|
apache-2.0
| 1,788 | 0.003356 |
import os
import sys
root_path = os.path.abspath("../../../")
if root_path not in sys.path:
sys.path.append(root_path)
import numpy as np
import tensorflow as tf
from _Dist.NeuralNetworks.Base import Generator4d
from _Dist.NeuralNetworks.h_RNN.RNN import Basic3d
from _Dist.NeuralNetworks.NNUtil import Activations
class Basic4d(Basic3d):
def _calculate(self, x, y=None, weights=None, tensor=None, n_elem=1e7, is_training=False):
return super(Basic4d, self)._calculate(x, y, weights, tensor, n_elem / 10, is_training)
class CNN(Basic4d):
def __init__(self, *args, **kwargs):
self.height, self.width = kwargs.pop("height", None), kwargs.pop("width", None)
super(CNN, self).__init__(*args, **kwargs)
self._name_appendix = "CNN"
self._generator_base = Generator4d
self.conv_activations = None
self.n_filters = self.filter_sizes = self.poolings = None
def init_model_param_settings(self):
super(CNN, self).init_model_param_settings()
self.conv_activations = self.model_param_settings.get("conv_activations", "relu")
def init_model_structure_settings(self):
super(CNN, self).init_model_structure_settings()
self.n_filters = self.model_structure_settings.get("n_filters", [32, 32])
self.filter_sizes = self.model_structure_settings.get("filter_sizes", [(3, 3), (3, 3)])
self.poolings = self.model_structure_settings.get("poolings", [None, "max_pool"])
if not len(self.filter_sizes) == len(self.poolings) == len(self.n_filters):
raise ValueError("Length of filter_sizes, n_filters & pooling should be the same")
if isinstance(self.conv_activations, str):
self.conv_activations = [self.conv_activations] * len(self.filter_sizes)
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
if self.height is None or self.width is None:
assert len(x.shape) == 4, "height and width are not provided, hence len(x.shape) should be 4"
self.height, self.width = x.shape[1:3]
if len(x.shape) == 2:
x = x.reshape(len(x), self.height, self.width, -1)
else:
assert self.height == x.shape[1], "height is set to be {}, but {} found".format(self.height, x.shape[1])
assert self.width == x.shape[2], "width is set to be {}, but {} found".format(self.height, x.shape[2])
if x_test is not None and len(x_test.shape) == 2:
x_test = x_test.reshape(len(x_test), self.height, self.width, -1)
super(CNN, self).init_from_data(x, y, x_test, y_test, sample_weights, names)
def _define_input_and_placeholder(self):
self._is_training = tf.placeholder(tf.bool, name="is_training")
self._tfx = tf.placeholder(tf.float32, [None, self.height, self.width, self.n_dim], name="X")
self._tfy = tf.placeholder(tf.float32, [None, self.n_class], name="Y")
def _build_model(self, net=None):
self._model_built = True
if net is None:
net = self._tfx
for i, (filter_size, n_filter, pooling) in enumerate(zip(
self.filter_sizes, self.n_filters, self.poolings
)):
net = tf.layers.conv2d(net, n_filter, filter_size, padding="same")
net = tf.layers.batch_normalization(net, training=self._is_training)
activation = self.conv_activations[i]
if activation is not None:
net = getattr(Activations, activation)(net, activation)
net = tf.layers.dropout(net, training=self._is_training)
if pooling is not None:
net = tf.layers.max_pooling2d(net, 2, 2, name="pool")
fc_shape = np.prod([net.shape[i].value for i in range(1, 4)])
net = tf.reshape(net, [-1, fc_shape])
super(CNN, self)._build_model(net)
|
carefree0910/MachineLearning
|
_Dist/NeuralNetworks/i_CNN/CNN.py
|
Python
|
mit
| 3,860 | 0.00544 |
# Initialize App Engine and import the default settings (DB backend, etc.).
# If you want to use a different backend you have to remove all occurences
# of "djangoappengine" from this file.
from djangoappengine.settings_base import *
from private_settings import SECRET_KEY
import os
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
AUTOLOAD_SITECONF = 'indexes'
INSTALLED_APPS = (
# 'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.markup',
'djangotoolbox',
'autoload',
'dbindexer',
"simpleblog.content",
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = [
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
]
if not DEBUG:
# Put the stats middleware after autoload
MIDDLEWARE_CLASSES.insert(
1, 'google.appengine.ext.appstats.recording.AppStatsDjangoMiddleware')
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
ROOT_URLCONF = 'simpleblog.urls'
if DEBUG:
STATIC_URL = "/devstatic/"
else:
STATIC_URL = "/static/"
STATICFILES_DIRS = ("staticfiles", )
STATIC_ROOT = "static_collected"
PRE_DEPLOY_COMMANDS = ("collectstatic", )
LOGIN_URL = "/login"
LOGIN_REDIRECT_URL = "/"
|
ukch/gae_simple_blog
|
settings.py
|
Python
|
bsd-3-clause
| 2,108 | 0.000949 |
import utils
import re
import subprocess
#regexes
duration_regex = re.compile('Duration:\s*(?P<time>\d{2}:\d{2}:\d{2}.\d{2})')
stream_regex = re.compile('Stream #(?P<stream_id>\d+:\d+)(\((?P<language>\w+)\))?: (?P<type>\w+): (?P<format>[\w\d]+)')
crop_regex = re.compile('crop=(?P<width>\d+):(?P<height>\d+):(?P<x>\d+):(?P<y>\d+)')
# detect crop settings
def detect_crop(src):
proc = subprocess.Popen(['ffmpeg', '-i', src, '-t', str(100), '-filter:v', 'cropdetect', '-f', 'null', '-'], stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
crops = crop_regex.findall(stderr)
return max(set(crops), key=crops.count)
# detect duration
def detect_duration(src):
proc = subprocess.Popen(['ffmpeg', '-i', src], stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
match = duration_regex.search(stderr)
duration_str = match.group('time')
duration_secs = utils.timestring_to_seconds(duration_str)
return (duration_str, duration_secs)
# detects stream IDs
def detect_streams(src):
proc = subprocess.Popen(['ffmpeg', '-i', src], stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
streams = []
for m in stream_regex.finditer(stderr):
streams.append({
'id': m.group('stream_id'),
'lang': m.group('language'),
'type': m.group('type'),
'fmt': m.group('format')
})
return streams
|
choffmeister/transcode
|
lib/ffmpeg.py
|
Python
|
mit
| 1,356 | 0.030236 |
# -*- coding: utf-8-unix; mode: python -*-
"""Module to assist to make HTML.
This module provides class that assist to make HTML.
Author: 2011 IMAI Toshiyuki
Copyright (c) 2011 IMAI Toshiyuki
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
Class:
HTML -- Assist to make HTML.
"""
__author__ = 'IMAI Toshiyuki'
__version__ = '1.0'
import os
import Cookie
import cgi
class HTML:
"""Assist to make HTML.
Attributes:
encode -- encoding
lang -- lang attribute of html element
sitetitle -- site title
pagetitle -- default page title
titledelimiter -- delimiter of site title and page title
cssfiles -- list object that contains path strings to css files
jsfiles -- list object that contains path string to JavaScript files
jstext -- text of JavaScript code
cookie -- http cookie
nocache -- if it is True then do not make user agents create cache
Methodes:
set_encode(encode) -- Set attribute encode.
set_lang(lang) -- Set attribute lang.
set_site_title(sitetitle) -- Set attribute sitetitle.
set_page_title(pagetitle) -- Set attribute pagetitle.
set_titledelimiter(titledelimiter) -- Set attribute titledelimiter.
set_cookie(cookie) -- Set attribute cookie.
set_nocache(nocache) -- Set attribute nocache.
print_resp_header() -- Print HTTP Response Header.
print_html_header() -- Print xhtml DTD, html start tag, head element
and body start tag.
print_html_close() -- Print end tags of body element and html element.
printe(value) -- Encode value and print it.
h1(content, [attrs]) -- Create h1 element.
h2(content, [attrs]) -- Create h2 element.
h3(content, [attrs]) -- Create h3 element.
h4(content, [attrs]) -- Create h4 element.
h5(content, [attrs]) -- Create h5 element.
h6(content, [attrs]) -- Create h6 element.
p(content, [attrs]) -- Create p element.
start_p([attrs]) -- Create start tag of p element.
end_p() -- Create end tag of p element.
div(content, [attrs]) -- Create div element.
start_div([attrs]) -- Create start tag of div element.
end_div() -- Create end tag of div element.
blockquote(content, [cite], [attrs]) -- Create blockquote element.
start_blockquote([cite], [attrs]) -- Create start tag of blockquote
element.
end_blockquote() -- Create end tag of blockquote element.
pre(content, [attrs]) -- Create pre element.
start_pre([attrs]) -- Create start tag of pre element.
end_pre() -- Create end tag of pre element.
address(content, [attrs]) -- Create address element.
Del(content, [attrs]) -- Create del element.
ins(content, [attrs]) -- Create ins element.
a(content, [attrs]) -- Create a element.
em(content, [attrs]) -- Create em element.
strong(content, [attrs]) -- Create strong element.
abbr(content, [attrs]) -- Create abbr element.
acronym(content, [attrs]) -- Create acronym element.
bdo(content, [attrs]) -- Create bdo element.
cite(content, [attrs]) -- Create cite element.
code(content, [attrs]) -- Create code element.
dfn(content, [attrs]) -- Create dfn element.
kbd(content, [attrs]) -- Create kbd element.
q(content, [attrs]) -- Create q element.
samp(content, [attrs]) -- Create samp element.
span(content, [attrs]) -- Create span element.
sub(content, [attrs]) -- Create sub element.
sup(content, [attrs]) -- Create sup element.
var(content, [attrs]) -- Create var element.
ruby(content, title, [attrs]) -- Create ruby element.
ol(content, [attrs]) -- Create ol element.
start_ol([attrs]) -- Create start tag of ol element.
end_ol() -- Create end tag of ol element.
ul(content, [attrs]) -- Create ul element.
start_ul([attrs]) -- Create start tag of ul element.
end_ul() -- Create end tag of ul element.
li(content, [attrs]) -- Create li element.
dl(content, [attrs]) -- Create dl element.
start_dl([attrs]) -- Create start tag of dl element.
end_dl() -- Create end tag of p element.
dt(content, [attrs]) -- Create dt element.
dd(content, [attrs]) -- Create dd element.
br([attrs]) -- Create br element.
hr([attrs]) -- Create hr element.
start_form([method], [action], [enctype], [attrs]) -- Create start tag
of form element.
start_multipart_form([method], [action], [enctype], [attrs]) -- Create
start tag of form element for multipart.
end_form() -- Create end tag of form element.
textfield([name], [value], [size], [maxlength], [attrs]) -- Create
input element as form item text field.
textarea([name], [value], [rows], [columns], [attrs]) -- Create textarea
element.
password_field([name], [value], [size], [maxlength], [attrs]) -- Create
input element as form item password field.
filefield([name], [value], [size], [maxlength], [attrs]) -- Create input
element as form item file field.
popup_menu([name], [values], [default], [labels], [attributes], [attrs])
-- Create select element as form item popup menu.
scrolling_list([name], [values], [default], [size], [multiple],
[labels], [attributes], [attrs]) -- Create select element
as form item scrolling list.
select_list([name], [values], [default], [labels], [attributes], [size],
[multiple], [attrs]) -- Create select element.
checkbox_group([name], [values], [default], [delimiter], [labels],
[attributes], [attrs]) -- Create input elements as form
item check box group.
checkbox([name], [checked], [value], [label], [attrs]) -- Create input
element as form item check box group.
radio_group([name], [values], [default], [delimiter], [labels],
[attributes], [attrs]) -- Create input elements as form item
radio button group.
button_group([type], [name], [values], [default], [delimiter], [labels],
[attributes], [attrs]) -- Create input elements.
submit([name], [value], [attrs]) -- Create input element as form item
submit button.
reset([name], [value], [attrs]) -- Create input element as form item
reset button.
button([name], [value], [attrs]) -- Create input element as form item
button.
hidden([name], [value], [attrs]) -- Create input element as form item
hidden.
input(type, [attrs]) -- Create input element.
Useage:
import htmldocument
ht = htmldocument.HTML(
encode='utf-8',
lang='ja',
sitetitle='Site Name',
cssfiles=['./css/main.css'],
jsfiles=['./js/main.js'])
html.print_resp_header()
html.print_html_header()
ht.printe(ht.h1('Header Level 1'))
ht.printe(ht.p('Text body.'))
html.print_html_close()
"""
def __init__(self, encode='utf-8', lang='en', sitetitle=u'Untitled Site',
pagetitle=u'Untitled', titledelimiter=u' :: ',
cssfiles=None, jsfiles=None, jstext=None, cookie=None,
nocache=False):
"""Constructor of class HTML.
Keyword arguments:
encode -- encoding (default 'utf-8')
lang -- lang attribute of html element (default 'en')
sitetitle -- site title (default 'Untitled Site')
pagetitle -- default page title (default 'Untitled')
titledelimiter -- delimiter of site title and page title
(default ' :: ')
cssfiles -- list object that contains path strings to css files
(default None)
jsfiles -- list object that contains path string to JavaScript files
(default None)
jstext -- text of JavaScript code (default None)
cookie -- http cookie (default None)
nocache -- if it is True then do not make user agents create cache
(default False)
"""
self.encode = encode
self.lang = lang
self.sitetitle = sitetitle
self.pagetitle = pagetitle
self.titledelimiter = titledelimiter
self.cssfiles = cssfiles
self.jsfiles = jsfiles
self.jstext = jstext
self.cookie = cookie
self.nocache = nocache
# setters
def set_encode(self, encode):
"""Set attribute encode."""
self.encode = encode
def set_lang(self, lang):
"""Set attribute lang."""
self.lang = lang
def set_site_title(self, sitetitle):
"""Set attribute sitetitle."""
self.sitetitle = sitetitle
def set_page_title(self, pagetitle):
"""Set attribute pagetitle."""
self.pagetitle = pagetitle
def set_titledelimiter(self, titledelimiter):
"""Set attribute titledelimiter."""
self.titledelimiter = titledelimiter
def set_cookie(self, cookie):
"""Set attribute cookie."""
self.cookie = cookie
def set_nocache(self, nocache):
"""Set attribute nocache."""
self.nocache = nocache
# printers
def print_resp_header(self):
"""Print HTTP Response Header."""
if self.encode == u'' or not isinstance(self.encode, basestring):
self.printe(u'Content-Type: text/html')
else:
self.printe(u'Content-Type: text/html; charset={0}'.format(
self.encode))
if isinstance(self.cookie, Cookie.SimpleCookie):
self.printe(self.cookie.output())
if self.nocache:
self.printe('Pragma: no-cache')
self.printe('Cache-Control: no-cache')
self.printe('Expires: Thu, 01 Dec 1994 16:00:00 GMT')
self.printe(u'')
def print_html_header(self):
"""Print xhtml DTD, html start tag, head element and body start tag."""
dtd = u'{0}{1}'.format(
u'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"\n',
u'\t"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">')
self.printe(dtd)
self.printe(u'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="{0}" lang="{0}">'.format(self.lang))
self.printe(u'<head>')
self.printe(u'<meta http-equiv="Content-Style-Type" content="text/css" />')
self.printe(u'<meta http-equiv="Content-Script-Type" content="text/javascript" />')
self.printe(u'<title>{0} {1} {2}</title>'.format(
cgi.escape(self.pagetitle),
cgi.escape(self.titledelimiter),
cgi.escape(self.sitetitle)))
if isinstance(self.cssfiles, list):
for cssfile in self.cssfiles:
self.printe(u'<link rel="stylesheet" type="text/css" href="{0}" />'.format(cssfile))
elif isinstance(self.cssfiles, basestring):
self.printe(u'<link rel="stylesheet" type="text/css" href="{0}" />'.format(self.cssfiles))
if isinstance(self.jsfiles, list):
for jsfile in self.jsfiles:
self.printe(u'<script type="text/javascript" src="{0}"></script>'.format(jsfile))
elif isinstance(self.jsfiles, basestring):
self.printe(u'<script type="text/javascript" src="{0}"></script>'.format(self.jsfiles))
if isinstance(self.jstext, basestring):
self.printe(u'<script type="text/javascript">{0}</script>'.format(
self.jstext))
self.printe(u'</head>')
self.printe(u'<body>')
def print_html_close(self):
"""Print end tags of body element and html element."""
self.printe(u'</body>\n</html>')
def printe(self, value):
"""Encode value and print it.
Keyword argument:
value -- some text
"""
if isinstance(value, unicode):
print(value.encode(self.encode))
else:
print(value)
# elements
# block
def h1(self, content, attrs=None):
"""Create h1 element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'h1', content, attrs)
def h2(self, content, attrs=None):
"""Create h2 element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'h2', content, attrs)
def h3(self, content, attrs=None):
"""Create h3 element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'h3', content, attrs)
def h4(self, content, attrs=None):
"""Create h4 element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'h4', content, attrs)
def h5(self, content, attrs=None):
"""Create h5 element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'h5', content, attrs)
def h6(self, content, attrs=None):
"""Create h6 element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'h6', content, attrs)
def p(self, content, attrs=None):
"""Create p element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'p', content, attrs)
def start_p(self, attrs=None):
"""Create start tag of p element.
Keyword arguments:
attrs -- dict object that contains attributes (default None)
"""
return self._create_start_tag(u'p', attrs)
def end_p(self):
"""Create end tag of p element."""
return self._create_end_tag(u'p')
def div(self, content, attrs=None):
"""Create div element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'div', content, attrs)
def start_div(self, attrs=None):
"""Create start tag of div element.
Keyword arguments:
attrs -- dict object that contains attributes (default None)
"""
return self._create_start_tag(u'div', attrs)
def end_div(self):
"""Create end tag of div element."""
return self._create_end_tag(u'div')
def blockquote(self, content, cite=None, attrs=None):
"""Create blockquote element.
Keyword arguments:
content -- some text
cite -- cite attribute (default None)
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = dict()
if cite is not None:
attrs['cite'] = cite
return self._create_element(u'blockquote', content, attrs)
def start_blockquote(self, cite=None, attrs=None):
"""Create start tag of blockquote element.
Keyword arguments:
cite -- cite attribute (default None)
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = dict()
if cite is not None:
attrs['cite'] = cite
return self._create_start_tag(u'blockquote', attrs)
def end_blockquote(self):
"""Create end tag of blockquote element."""
return self._create_end_tag(u'blockqute')
def pre(self, content, attrs=None):
"""Create pre element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'pre', content, attrs)
def start_pre(self, attrs=None):
"""Create start tag of pre element.
Keyword arguments:
attrs -- dict object that contains attributes (default None)
"""
return self._create_start_tag(u'pre', attrs)
def end_pre(self):
"""Create end tag of pre element."""
return self._create_end_tag(u'pre')
def address(self, content, attrs=None):
"""Create address element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'address', content, attrs)
def fieldset(self):
pass
def Del(self, content, attrs=None):
"""Create del element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'del', content, attrs)
def ins(self, content, attrs=None):
"""Create ins element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'ins', content, attrs)
# inline
def a(self, content, attrs=None):
"""Create a element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'a', content, attrs)
def em(self, content, attrs=None):
"""Create em element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'em', content, attrs)
def strong(self, content, attrs=None):
"""Create strong element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'strong', content, attrs)
def abbr(self, content, attrs=None):
"""Create abbr element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'abbr', content, attrs)
def acronym(self, content, attrs=None):
"""Create acronym element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'acronym', content, attrs)
def bdo(self, content, attrs=None):
"""Create bdo element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'bdo', content, attrs)
def cite(self, content, attrs=None):
"""Create cite element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'cite', content, attrs)
def code(self, content, attrs=None):
"""Create code element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'code', content, attrs)
def dfn(self, content, attrs=None):
"""Create dfn element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'dfn', content, attrs)
def kbd(self, content, attrs=None):
"""Create kbd element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'kbd', content, attrs)
def q(self, content, attrs=None):
"""Create q element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'q', content, attrs)
def samp(self, content, attrs=None):
"""Create samp element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'samp', content, attrs)
def span(self, content, attrs=None):
"""Create span element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'span', content, attrs)
def sub(self, content, attrs=None):
"""Create sub element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'sub', content, attrs)
def sup(self, content, attrs=None):
"""Create sup element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'sup', content, attrs)
def var(self, content, attrs=None):
"""Create var element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'var', content, attrs)
def ruby(self, content, title, attrs=None):
"""Create ruby element.
Keyword arguments:
content -- some text
title -- ruby title text
attrs -- dict object that contains attributes (default None)
"""
return u'<ruby><rp>(</rp><rb>{0}</rb><rt>{1}</rb><rp>)</rp></ruby>'.format(content, title)
# list
def ol(self, content, attrs=None):
"""Create ol element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'ol', content, attrs)
def start_ol(self, attrs=None):
"""Create start tag of ol element.
Keyword arguments:
attrs -- dict object that contains attributes (default None)
"""
return self._create_start_tag(u'ol', attrs)
def end_ol(self):
"""Create end tag of ol element."""
return self._create_end_tag(u'ol')
def ul(self, content, attrs=None):
"""Create ul element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'ul', content, attrs)
def start_ul(self, attrs=None):
"""Create start tag of ul element.
Keyword arguments:
attrs -- dict object that contains attributes (default None)
"""
return self._create_start_tag(u'ul', attrs)
def end_ul(self):
"""Create end tag of ul element."""
return self._create_end_tag(u'ul')
def li(self, content, attrs=None):
"""Create li element.
Keyword arguments:
content -- some text or list contains some texts
attrs -- dict object that contains attributes (default None)
"""
if isinstance(content, list) or isinstance(content, tuple):
result = list()
for li in content:
result.append(self._create_element(u'li', li, attrs))
return u''.join(result)
else:
return self._create_element(u'li', content, attrs)
def dl(self, content, attrs=None):
"""Create dl element.
Keyword arguments:
content -- some text or dict contains some texts
attrs -- dict object that contains attributes (default None)
"""
if isinstance(content, dict):
result = list()
result.append(self.start_dl(attrs))
for di in content.keys():
result.append(self.dt(di))
result.append(self.dd(content[di]))
result.append(self.end_dl())
return u''.join(result)
else:
return self._create_element(u'dl', content, attrs)
def start_dl(self, attrs=None):
"""Create start tag of dl element.
Keyword arguments:
attrs -- dict object that contains attributes (default None)
"""
return self._create_start_tag(u'dl', attrs)
def end_dl(self):
"""Create end tag of p element."""
return self._create_end_tag(u'dl')
def dt(self, content, attrs=None):
"""Create dt element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'dt', content, attrs)
def dd(self, content, attrs=None):
"""Create dd element.
Keyword arguments:
content -- some text
attrs -- dict object that contains attributes (default None)
"""
return self._create_element(u'dd', content, attrs)
# empty
def br(self, attrs=None):
"""Create br element.
Keyword arguments:
attrs -- dict object that contains attributes (default None)
"""
return self._create_empty_element(u'br', attrs)
def hr(self, attrs=None):
"""Create hr element.
Keyword arguments:
attrs -- dict object that contains attributes (default None)
"""
return self._create_empty_element(u'hr', attrs)
# form elements
def start_form(self, method=None, action=None, enctype=None, attrs=None):
"""Create start tag of form element.
Keyword arguments:
method -- method attribute (default None)
action -- action attribute (default None)
enctype -- enctype attirbute (default None)
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
if method is None:
attrs['method'] = u'POST'
else:
attrs['method'] = method
if action is None:
attrs['action'] = os.environ.get('SCRIPT_NAME', '')
else:
attrs['action'] = action
if enctype is not None:
attrs['enctype'] = enctype
return self._create_start_tag(u'form', attrs)
def start_multipart_form(self, method=None, action=None,
enctype=u'multipart/form-data', attrs=None):
"""Create start tag of form element for multipart.
Keyword arguments:
method -- method attribute (default None)
action -- action attribute (default None)
enctype -- enctype attirbute (default 'multipart/form-data')
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
if method is None:
attrs['method'] = u'POST'
else:
attrs['method'] = method
if action is None:
attrs['action'] = os.environ.get('SCRIPT_NAME', '')
else:
attrs['action'] = action
if enctype is not None:
attrs['enctype'] = enctype
return self._create_start_tag(u'form', attrs)
def end_form(self):
"""Create end tag of form element."""
return self._create_end_tag(u'form')
def textfield(self, name=None, value=None, size=None, maxlength=None,
attrs=None):
"""Create input element as form item text field.
Keyword arguments:
name -- name attribute (default None)
value -- value attribute (default None)
size -- size attribute (default None)
maxlength -- maxlength attribute (default None)
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
attrs['name'] = name
attrs['value'] = value
attrs['size'] = size
attrs['maxlength'] = maxlength
return self.input(u'text', attrs)
def textarea(self, name=None, value=None, rows=None, columns=None,
attrs=None):
"""Create textarea element.
Keyword arguments:
name -- name attribute (default None)
value -- value attribute (default None)
rows -- rows attribute (default None)
columns -- cols attribute (default None)
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
attrs['name'] = name
attrs['rows'] = rows
attrs['cols'] = columns
if value is None:
value = u''
return self._create_element(u'textarea', value, attrs)
def password_field(self, name=None, value=None, size=None,
maxlength=None, attrs=None):
"""Create input element as form item password field.
Keyword arguments:
name -- name attribute (default None)
value -- value attribute (default None)
size -- size attribute (default None)
maxlength -- maxlength attribute (default None)
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
attrs['name'] = name
attrs['value'] = value
attrs['size'] = size
attrs['maxlength'] = maxlength
return self.input(u'password', attrs)
def filefield(self, name=None, value=None, size=None, maxlength=None,
attrs=None):
"""Create input element as form item file field.
Keyword arguments:
name -- name attribute (default None)
value -- value attribute (default None)
size -- size attribute (default None)
maxlength -- maxlength attribute (default None)
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
attrs['name'] = name
attrs['value'] = value
attrs['size'] = size
attrs['maxlength'] = maxlength
return self.input(u'file', attrs)
def popup_menu(self, name=None, values=None, default=None,
labels=None, attributes=None, attrs=None):
"""Create select element as form item popup menu.
Keyword arguments:
name -- name attribute (default None)
values -- list object that contains values (default None)
default -- default value (default None)
labels -- dict object that contains label text (default None)
attributes -- dict object that contains attributes for each item
(default None)
attrs -- dict object that contains attributes (default None)
"""
return self.select_list(name=name, values=values, default=default,
labels=labels, attributes=attributes,
attrs=attrs)
def scrolling_list(self, name=None, values=None, default=None,
size=4, multiple=False,
labels=None, attributes=None, attrs=None):
"""Create select element as form item scrolling list.
Keyword arguments:
name -- name attribute (default None)
value -- list object that contains values (default None)
default -- default value (default None)
size -- size attribute (default 4)
multiple -- multiple attribute (default False)
labels -- dict object that contains label text (default None)
attributes -- dict object that contains attributes for each item
(default None)
attrs -- dict object that contains attributes (default None)
"""
return self.select_list(name=name, values=values, default=default,
labels=labels, attributes=attributes,
size=size, multiple=multiple, attrs=attrs)
def select_list(self, name=None, values=None, default=None,
labels=None, attributes=None, size=None, multiple=False,
attrs=None):
"""Create select element.
Keyword arguments:
name -- name attribute (default None)
values -- list object that contains values (default None)
default -- default value (default None)
labels -- dict object that contains label text (default None)
attributes -- dict object that contains attributes for each item
(default None)
size -- size attribute (default None)
multiple -- multiple attribute (default None)
attrs -- dict object that contains attributes (default None)
"""
if not isinstance(values, list) and not isinstance(values, tuple):
raise TypeError(u'need list, got %r' % type(values))
if labels is not None and not isinstance(labels, dict):
raise TypeError(u'need dict, got %r' % type(labels))
if attributes is not None and not isinstance(attributes, dict):
raise TypeError(u'need dict, got %r' % type(attributes))
if attrs is None or not isinstance(attrs, dict):
attrs = {}
attrs['name'] = name
if isinstance(size, int):
attrs['size'] = size
if multiple:
attrs['multiple'] = u'multiple'
result = self._create_start_tag(u'select', attrs)
for li in values:
attrs = {}
if attributes is not None:
if attributes.has_key(li):
if isinstance(attributes[li], dict):
attrs = attributes[li]
attrs['value'] = li
if isinstance(default, list) or isinstance(default, tuple):
for item in default:
if item == li:
attrs['selected'] = u'selected'
else:
if default == li:
attrs['selected'] = u'selected'
content = li
if labels is not None:
if labels.has_key(li):
if labels[li] is not None:
content = labels[li]
result += self._create_element(u'option', content, attrs)
result += self._create_end_tag(u'select')
return result
def checkbox_group(self, name=None, values=None, default=None,
delimiter=None,labels=None, attributes=None,
attrs=None):
"""Create input elements as form item check box group.
Keyword arguments:
name -- name attribute (default None)
values -- list object that contains values (default None)
default -- default value (default None)
delimiter -- delimiter for input elements. if it is None,
return list object contains input elements
(default None)
labels -- list object that contains label text (default None)
attributes -- dict object that contains attributes for each item
(default None)
attrs -- dict object that contains attributes (default None)
"""
return self.button_group(type=u'checkbox', name=name,values=values,
default=default, delimiter=delimiter,
labels=labels, attributes=attributes,
attrs=attrs)
def checkbox(self, name=None, checked=False, value=None, label=u'',
attrs=None):
"""Create input element as form item check box group.
Keyword arguments:
name -- name attribute (default None)
checked -- checked attribute (default False)
value -- value attribute (default None)
label -- label text (default '')
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
attrs['name'] = name
if checked:
attrs['checked'] = checked
attrs['value'] = value
result = []
result.append(self.input(u'checkbox', attrs))
if len(label) == 0:
label = value
if isinstance(label, int):
label = str(int)
if label is not None:
result.append(label)
return u' '.join(result)
def radio_group(self, name=None, values=None, default=None,
delimiter=None,labels=None, attributes=None, attrs=None):
"""Create input elements as form item radio button group.
Keyword arguments:
name -- name attribute (default None)
values -- list object that contains values (default None)
default -- default value (default None)
delimiter -- delimiter for input elements. if it is None,
return list object contains input elements
(default None)
labels -- list object that contains label text (default None)
attributes -- dict object that contains attributes for each item
(default None)
attrs -- dict object that contains attributes (default None)
"""
if isinstance(default, list) or isinstance(default, tuple):
default = default[0]
return self.button_group(type=u'radio', name=name,values=values,
default=default, delimiter=delimiter,
labels=labels, attributes=attributes,
attrs=attrs)
def button_group(self, type=u'radio', name=None, values=None,
default=None, delimiter=None,labels=None,
attributes=None, attrs=None):
"""Create input elements.
Keyword arguments:
type -- type attribute (default 'radio')
name -- name attribute (default None)
values -- list object that contains values (default None)
default -- default value (default None)
delimiter -- delimiter for button elements. if it is None,
return list object contains button elements
(default None)
labels -- list object that contains label text (default None)
attributes -- dict object that contains attributes for each item
(default None)
attrs -- dict object that contains attributes (default None)
"""
if not isinstance(values, list):
raise TypeError(u'need list, got %r' % type(values))
if labels is not None and not isinstance(labels, dict):
raise TypeError(u'need dict, got %r' % type(labels))
if attributes is not None and not isinstance(attributes, dict):
raise TypeError(u'need dict, got %r' % type(attributes))
result = []
for li in values:
iattrs = {}
if attrs is not None or isinstance(attrs, dict):
iattrs.update(attrs)
if attributes is not None:
if attributes.has_key(li):
if isinstance(attributes[li], dict):
iattrs.update(attributes[li])
iattrs['name'] = name
iattrs['value'] = li
if isinstance(default, list):
for item in default:
if item == li:
iattrs['checked'] = u'checked'
else:
if default == li:
iattrs['checked'] = u'checked'
content = li
if labels is not None:
if labels.has_key(li):
if labels[li] is not None:
content = labels[li]
if isinstance(content, int):
content = str(content)
result.append(u'{0} {1}'.format(self.input(type, iattrs), content))
if delimiter is not None and isinstance(delimiter, basestring):
result = delimiter.join(result)
return result
def submit(self, name=None, value=None, attrs=None):
"""Create input element as form item submit button.
Keyword arguments:
name -- name attribute (default None)
value -- value attribute (default None)
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
attrs['name'] = name
attrs['value'] = value
return self.input(u'submit', attrs)
def reset(self, name=None, value=None, attrs=None):
"""Create input element as form item reset button.
Keyword arguments:
name -- name attribute (default None)
value -- value attribute (default None)
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
attrs['name'] = name
attrs['value'] = value
return self.input(u'reset', attrs)
def button(self, name=None, value=None, attrs=None):
"""Create input element as form item button.
Keyword arguments:
name -- name attribute (default None)
value -- value attribute (default None)
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
attrs['name'] = name
attrs['value'] = value
return self.input(u'button', attrs)
def hidden(self, name=None, value=None, attrs=None):
"""Create input element as form item hidden.
Keyword arguments:
name -- name attribute (default None)
value -- value attribute (default None)
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
attrs['name'] = name
attrs['value'] = value
return self.input(u'hidden', attrs)
def input(self, type, attrs=None):
"""Create input element.
Keyword arguments:
type -- type attribute
attrs -- dict object that contains attributes (default None)
"""
if attrs is None or not isinstance(attrs, dict):
attrs = {}
attrs['type'] = type
return self._create_empty_element('input', attrs)
def formitem(self, value, attrs=None):
return self._create_element(u'input', value, attrs)
# internal methods
def _create_start_tag(self, elemname, attrs=None):
return u'<{0}{1}>'.format(elemname, self._create_attr_string(attrs))
def _create_end_tag(self, elemname):
return u'</{0}>'.format(elemname)
def _create_attr_string(self, attrs):
attrstr = u''
if isinstance(attrs, dict):
for attrname in (attrs.keys()):
if attrs[attrname] is not None:
attrvalue = attrs[attrname]
if isinstance(attrvalue, int):
attrvalue = str(attrvalue)
attrstr = u'{0} {1}="{2}"'.format(
attrstr,
cgi.escape(attrname, True),
cgi.escape(attrvalue, True))
return attrstr
def _create_element(self, elemname, content, attrs=None):
starttag = self._create_start_tag(elemname, attrs)
endtag = self._create_end_tag(elemname)
if isinstance(content, int):
content = str(content)
if isinstance(content, basestring):
return u'{0}{1}{2}'.format(starttag, content, endtag)
else:
raise TypeError(u'need string or int, got %r' % type(content))
def _create_empty_element(self, elemname, attrs=None):
return u'<{0}{1} />'.format(elemname, self._create_attr_string(attrs))
|
imait/HtmlDocument
|
for_python2.x/htmldocument.py
|
Python
|
mit
| 46,242 | 0.00093 |
# -*- coding: utf-8 -*-
import logging
from speaklater import make_lazy_string
from quokka.modules.accounts.models import User
logger = logging.getLogger()
def lazy_str_setting(key, default=None):
from flask import current_app
return make_lazy_string(
lambda: current_app.config.get(key, default)
)
def get_current_user():
from flask.ext.security import current_user
try:
if not current_user.is_authenticated():
return None
except RuntimeError:
# Flask-Testing will fail
pass
try:
return User.objects.get(id=current_user.id)
except Exception as e:
logger.warning("No user found: %s" % e.message)
return None
|
maurobaraldi/quokka
|
quokka/utils/__init__.py
|
Python
|
mit
| 714 | 0 |
from .req import Req
class Records(Req):
def __init__(self, url, email, secret):
super().__init__(url=url, email=email, secret=secret)
def get(self, zone_id, layer='default'):
return self.do_get("/zones/{}/{}/records".format(zone_id, layer))
def create(self, zone, layer, name, ttl, rtype, data, priority=0):
url = "/zones/{}/{}/records".format(zone, layer)
data = {
'layer': layer,
'name': name,
'ttl': ttl,
'record_type': rtype,
'value': data,
'priority': priority
}
return self.do_post(url, data=data)
def delete(self, zone, layer, record_id):
url = "/zones/{}/{}/records/{}".format(zone, layer, record_id)
return self.do_delete(url)
def update(self, zone, layer, record_id, **params):
url = "/zones/{}/{}/records/{}".format(zone, layer, record_id)
return self.do_put(url, data=params)
|
cloudnsru/PyCloudNS
|
PyCloudNS/records.py
|
Python
|
mit
| 967 | 0 |
# crop.py
# Derek Groenendyk
# 2/15/2017
# reads input data from Excel workbook
from collections import OrderedDict
import logging
import numpy as np
import os
import sys
from cons2.cu import CONSUMPTIVE_USE
# from utils import excel
logger = logging.getLogger('crop')
logger.setLevel(logging.DEBUG)
class CROP(object):
"""docstring for CROP"""
def __init__(self, shrtname, longname, crop_type, mmnum, directory, sp):
self.sname = shrtname
self.lname = longname
self.crop_type = crop_type
self.directory = directory
if self.crop_type == 'ANNUAL':
self.mmnum = mmnum
if sp.et_method == 'fao':
self.stages = {}
self.kc = {}
# self.read_cropdev()
self.read_stages()
self.read_kc()
elif sp.et_method == 'scs':
self.get_nckc()
self.get_ckc()
# methods = {
# 'ANNUAL': ANNUAL,
# 'PERENNIAL': PERENNIAL
# }
# self.cu = methods[crop_type](sp, self)
self.cu = CONSUMPTIVE_USE(sp, self)
def read_cropdev(self):
try:
infile = open(os.path.join(self.directory,'data','crop_dev_coef.csv'),'r')
except TypeError:
logger_fn.critical('crop_dev_coef.csv file not found.')
raise
lines = infile.readlines()
infile.close()
# sline = lines[1].split(',')
# cname = sline[0].replace(' ','')
# temp_cname = cname
stage_flag = False
kc_flag = False
switch = False
i = 1
# while i < len(lines):
while i < len(lines):
sline = lines[i].split(',')
cname = sline[0].replace(' ','')
# print(cname,self.sname)
if cname != '':
if cname == self.sname:
# print(i)
if not switch:
stage = sline[1].lower()
self.stages[stage] = np.array([float(item) for item in sline[2:6]])
# print(1.0-np.sum(self.stages[stage]))
stage_flag = True
else:
num = int(sline[1].replace(' ',''))
self.kc[num] = np.array([float(item) for item in sline[2:5]])
kc_flag = True
else:
if switch:
break
i += 1
switch = True
i += 1
if stage_flag == False or kc_flag == False:
logger.critical('Crop, ' + self.sname + ', not found in crop_dev_coef.csv.') # include site??
raise
def read_stages(self):
try:
infile = open(os.path.join(self.directory,'data','fao_crop_stages.csv'),'r')
except TypeError:
logger_fn.critical('fao_crop_stages.csv file not found.')
raise
lines = infile.readlines()
infile.close()
flag = False
i = 1
while i < len(lines):
sline = lines[i].split(',')
cname = sline[0].replace(' ','')
if cname != '':
if cname == self.sname:
stage = sline[1].lower()
self.stages[stage] = np.array([float(item) for item in sline[2:6]])
flag = True
else:
if flag:
break
flag = False
i += 1
if not flag:
logger.critical('Crop, ' + self.sname + ', not found in fao_crop_stages.csv.') # include site??
raise
def read_kc(self):
try:
infile = open(os.path.join(self.directory,'data','fao_crop_coef.csv'),'r')
except TypeError:
logger_fn.critical('fao_crop_coef.csv file not found.')
raise
lines = infile.readlines()
infile.close()
flag = False
i = 1
while i < len(lines):
sline = lines[i].split(',')
cname = sline[0].replace(' ','')
if cname != '':
if cname == self.sname:
num = int(sline[1].replace(' ',''))
self.kc[num] = np.array([float(item) for item in sline[2:5]])
flag = True
else:
if flag:
break
flag = False
i += 1
if not flag:
logger.critical('Crop, ' + self.sname + ', not found in fao_crop_coef.csv.') # include site??
raise
def get_nckc(self):
"""
Reads in crop coefficients.
Parameters
----------
name: string
Name of the crop
Returns
-------
nckc: list
List of crop coefficients
"""
try:
infile = open(os.path.join(self.directory,'data','scs_crop_stages.csv'),'r')
except TypeError:
logger.critical('scs_crop_stages.csv file not found.')
raise
lines = infile.readlines()
infile.close()
nckca = [float(item) for item in lines[0].split(',')[1:]]
nckcp = [float(item) for item in lines[1].split(',')[1:]]
if self.crop_type == 'PERENNIAL':
self.nckc= nckcp
else:
self.nckc = nckca
def get_ckc(self):
"""
Reads in crop coefficients.
Parameters
----------
name: string
Name of the crop
Returns
-------
ckc: list
List of crop coefficients
"""
try:
infile = open(os.path.join(self.directory,'data','scs_crop_coef.csv'),'r')
except TypeError:
logger_fn.critical('scs_crop_coef.csv file not found.')
raise
else:
lines = infile.readlines()
infile.close()
if self.crop_type == 'PERENNIAL':
end = 26
else:
end = 22
for line in lines:
sline = line.split(',')
sline[-1] = sline[-1][:-1]
# print(sline[0],self.sname)
if sline[0] == self.sname:
vals = [float(item) for item in sline[1:end]]
self.ckc = vals
break
|
MoonRaker/cons2-python
|
cons2/crop.py
|
Python
|
gpl-3.0
| 6,619 | 0.009216 |
"""Auto-generated file, do not edit by hand. BS metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BS = PhoneMetadata(id='BS', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='9\\d\\d', possible_length=(3,)),
toll_free=PhoneNumberDesc(national_number_pattern='9(?:1[19]|88)', example_number='911', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='91[19]', example_number='911', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='9(?:1[19]|88)', example_number='911', possible_length=(3,)),
short_data=True)
|
daviddrysdale/python-phonenumbers
|
python/phonenumbers/shortdata/region_BS.py
|
Python
|
apache-2.0
| 677 | 0.008863 |
"""
Functions that ignore NaN.
Functions
---------
- `nanmin` -- minimum non-NaN value
- `nanmax` -- maximum non-NaN value
- `nanargmin` -- index of minimum non-NaN value
- `nanargmax` -- index of maximum non-NaN value
- `nansum` -- sum of non-NaN values
- `nanprod` -- product of non-NaN values
- `nanmean` -- mean of non-NaN values
- `nanvar` -- variance of non-NaN values
- `nanstd` -- standard deviation of non-NaN values
- `nanmedian` -- median of non-NaN values
- `nanpercentile` -- qth percentile of non-NaN values
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.lib.function_base import _ureduce as _ureduce
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
]
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
is_new = not isinstance(a, np.ndarray)
if is_new:
a = np.array(a)
if not issubclass(a.dtype.type, np.inexact):
return a, None
if not is_new:
# need copy
a = np.array(a, subok=True)
mask = np.isnan(a)
np.copyto(a, val, where=mask)
return a, mask
def _copyto(a, val, mask):
"""
Replace values in `a` with NaN where `mask` is True. This differs from
copyto in that it will deal with the case where `a` is a numpy scalar.
Parameters
----------
a : ndarray or numpy scalar
Array or numpy scalar some of whose values are to be replaced
by val.
val : numpy scalar
Value used a replacement.
mask : ndarray, scalar
Boolean array. Where True the corresponding element of `a` is
replaced by `val`. Broadcasts.
Returns
-------
res : ndarray, scalar
Array with elements replaced or scalar `val`.
"""
if isinstance(a, np.ndarray):
np.copyto(a, val, where=mask, casting='unsafe')
else:
a = a.dtype.type(val)
return a
def _divide_by_count(a, b, out=None):
"""
Compute a/b ignoring invalid results. If `a` is an array the division
is done in place. If `a` is a scalar, then its type is preserved in the
output. If out is None, then then a is used instead so that the
division is in place. Note that this is only called with `a` an inexact
type.
Parameters
----------
a : {ndarray, numpy scalar}
Numerator. Expected to be of inexact type but not checked.
b : {ndarray, numpy scalar}
Denominator.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
Returns
-------
ret : {ndarray, numpy scalar}
The return value is a/b. If `a` was an ndarray the division is done
in place. If `a` is a numpy scalar, the division preserves its type.
"""
with np.errstate(invalid='ignore'):
if isinstance(a, np.ndarray):
if out is None:
return np.divide(a, b, out=a, casting='unsafe')
else:
return np.divide(a, b, out=out, casting='unsafe')
else:
if out is None:
return a.dtype.type(a / b)
else:
# This is questionable, but currently a numpy scalar can
# be output to a zero dimensional array.
return np.divide(a, b, out=out, casting='unsafe')
def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
Nan is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the minimum is computed. The default is to compute
the minimum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `min` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmin : ndarray
An array with the same shape as `a`, with the specified axis
removed. If `a` is a 0-d array, or if axis is None, an ndarray
scalar is returned. The same dtype as `a` is returned.
See Also
--------
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
amin :
The minimum value of an array along a given axis, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amax, fmax, maximum
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if not isinstance(a, np.ndarray) or type(a) is np.ndarray:
# Fast, but not safe for subclasses of ndarray
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN axis encountered", RuntimeWarning)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
res = np.amin(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning)
return res
def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis, ignoring any
NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
raised and NaN is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `max` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, an ndarray scalar is
returned. The same dtype as `a` is returned.
See Also
--------
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
amax :
The maximum value of an array along a given axis, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amin, fmin, minimum
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if not isinstance(a, np.ndarray) or type(a) is np.ndarray:
# Fast, but not safe for subclasses of ndarray
res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
res = np.amax(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning)
return res
def nanargmin(a, axis=None):
"""
Return the indices of the minimum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
cannot be trusted if a slice contains only NaNs and Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
a, mask = _replace_nan(a, np.inf)
res = np.argmin(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def nanargmax(a, axis=None):
"""
Return the indices of the maximum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
results cannot be trusted if a slice contains only NaNs and -Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
a, mask = _replace_nan(a, -np.inf)
res = np.argmax(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
In Numpy versions <= 1.8 Nan is returned for slices that are all-NaN or
empty. In later versions zero is returned.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute the
sum of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
.. versionadded:: 1.8.0
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
y : ndarray or numpy scalar
See Also
--------
numpy.sum : Sum across array propagating NaNs.
isnan : Show which elements are NaN.
isfinite: Show which elements are not NaN or +/-inf.
Notes
-----
If both positive and negative infinity are present, the sum will be Not
A Number (NaN).
Numpy integer arithmetic is modular. If the size of a sum exceeds the
size of an integer accumulator, its value will wrap around and the
result will be incorrect. Specifying ``dtype=double`` can alleviate
that problem.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
nan
"""
a, mask = _replace_nan(a, 0)
return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
One is returned for slices that are all-NaN or empty.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the product is computed. The default is to compute
the product of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the original `arr`.
Returns
-------
y : ndarray or numpy scalar
See Also
--------
numpy.prod : Product across array propagating NaNs.
isnan : Show which elements are NaN.
Notes
-----
Numpy integer arithmetic is modular. If the size of a product exceeds
the size of an integer accumulator, its value will wrap around and the
result will be incorrect. Specifying ``dtype=double`` can alleviate
that problem.
Examples
--------
>>> np.nanprod(1)
1
>>> np.nanprod([1])
1
>>> np.nanprod([1, np.nan])
1.0
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanprod(a)
6.0
>>> np.nanprod(a, axis=0)
array([ 3., 2.])
"""
a, mask = _replace_nan(a, 1)
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis, ignoring NaNs.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for inexact inputs, it is the same as the input
dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned. Nan is
returned for slices that contain only NaNs.
See Also
--------
average : Weighted average
mean : Arithmetic mean taken while not ignoring NaNs
var, nanvar
Notes
-----
The arithmetic mean is the sum of the non-NaN elements along the axis
divided by the number of non-NaN elements.
Note that for floating-point input, the mean is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32`. Specifying a
higher-precision accumulator using the `dtype` keyword can alleviate
this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanmean(a)
2.6666666666666665
>>> np.nanmean(a, axis=0)
array([ 2., 4.])
>>> np.nanmean(a, axis=1)
array([ 1., 3.5])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
# The warning context speeds things up.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims)
tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
avg = _divide_by_count(tot, cnt, out=out)
isbad = (cnt == 0)
if isbad.any():
warnings.warn("Mean of empty slice", RuntimeWarning)
# NaN is the only possible bad value, so no further
# action is needed to handle bad results.
return avg
def _nanmedian1d(arr1d, overwrite_input=False):
"""
Private function for rank 1 arrays. Compute the median ignoring NaNs.
See nanmedian for parameter usage
"""
c = np.isnan(arr1d)
s = np.where(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size == 0:
return np.median(arr1d, overwrite_input=overwrite_input)
else:
if overwrite_input:
x = arr1d
else:
x = arr1d.copy()
# select non-nans at end of array
enonan = arr1d[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
return np.median(x[:-s.size], overwrite_input=True)
def _nanmedian(a, axis=None, out=None, overwrite_input=False):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanmedian for parameter usage
"""
if axis is None or a.ndim == 1:
part = a.ravel()
if out is None:
return _nanmedian1d(part, overwrite_input)
else:
out[...] = _nanmedian1d(part, overwrite_input)
return out
else:
# for small medians use sort + indexing which is still faster than
# apply_along_axis
if a.shape[axis] < 400:
return _nanmedian_small(a, axis, out, overwrite_input)
result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
"""
sort + indexing median, faster for small medians along multiple
dimensions due to the high overhead of apply_along_axis
see nanmedian for parameter usage
"""
a = np.ma.masked_array(a, np.isnan(a))
m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
for i in range(np.count_nonzero(m.mask.ravel())):
warnings.warn("All-NaN slice encountered", RuntimeWarning)
if out is not None:
out[...] = m.filled(np.nan)
return out
return m.filled(np.nan)
def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
"""
Compute the median along the specified axis, while ignoring NaNs.
Returns the median of the array elements.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If this is anything but the default value it will be passed
through (in the special case of an empty array) to the
`mean` function of the underlying array. If the array is
a sub-class and `mean` does not have the kwarg `keepdims` this
will raise a RuntimeError.
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10.0, 7, 4], [3, 2, 1]])
>>> a[0, 1] = np.nan
>>> a
array([[ 10., nan, 4.],
[ 3., 2., 1.]])
>>> np.median(a)
nan
>>> np.nanmedian(a)
3.0
>>> np.nanmedian(a, axis=0)
array([ 6.5, 2., 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> b = a.copy()
>>> np.nanmedian(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.nanmedian(b, axis=None, overwrite_input=True)
3.0
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
# apply_along_axis in _nanmedian doesn't handle empty arrays well,
# so deal them upfront
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
r, k = _ureduce(a, func=_nanmedian, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims and keepdims is not np._NoValue:
return r.reshape(k)
else:
return r
def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""
Compute the qth percentile of the data along the specified axis,
while ignoring nan values.
Returns the qth percentile(s) of the array elements.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100
inclusive.
axis : {int, sequence of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array. A sequence of axes is supported since
version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`percentile`. This will save memory when you do not need to
preserve the contents of the input array. In this case you
should not make any assumptions about the contents of the input
`a` after this function completes -- treat it as undefined.
Default is False. If `a` is not already an array, this parameter
will have no effect as `a` will be converted to an array
internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
If this is anything but the default value it will be passed
through (in the special case of an empty array) to the
`mean` function of the underlying array. If the array is
a sub-class and `mean` does not have the kwarg `keepdims` this
will raise a RuntimeError.
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
nanmean, nanmedian, percentile, median, mean
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the mimumum to the
maximum in in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
>>> a[0][1] = np.nan
>>> a
array([[ 10., nan, 4.],
[ 3., 2., 1.]])
>>> np.percentile(a, 50)
nan
>>> np.nanpercentile(a, 50)
3.5
>>> np.nanpercentile(a, 50, axis=0)
array([ 6.5, 2., 2.5])
>>> np.nanpercentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.nanpercentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.nanpercentile(a, 50, axis=0, out=out)
array([ 6.5, 2., 2.5])
>>> m
array([ 6.5, 2. , 2.5])
>>> b = a.copy()
>>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
q = np.asanyarray(q)
# apply_along_axis in _nanpercentile doesn't handle empty arrays well,
# so deal them upfront
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
r, k = _ureduce(a, func=_nanpercentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims and keepdims is not np._NoValue:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear'):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanpercentile for parameter usage
"""
if axis is None:
part = a.ravel()
result = _nanpercentile1d(part, q, overwrite_input, interpolation)
else:
result = np.apply_along_axis(_nanpercentile1d, axis, a, q,
overwrite_input, interpolation)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.rollaxis(result, axis)
if out is not None:
out[...] = result
return result
def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'):
"""
Private function for rank 1 arrays. Compute percentile ignoring
NaNs.
See nanpercentile for parameter usage
"""
c = np.isnan(arr1d)
s = np.where(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
if q.ndim == 0:
return np.nan
else:
return np.nan * np.ones((len(q),))
elif s.size == 0:
return np.percentile(arr1d, q, overwrite_input=overwrite_input,
interpolation=interpolation)
else:
if overwrite_input:
x = arr1d
else:
x = arr1d.copy()
# select non-nans at end of array
enonan = arr1d[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
return np.percentile(x[:-s.size], q, overwrite_input=True,
interpolation=interpolation)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis, while ignoring NaNs.
Returns the variance of the array elements, a measure of the spread of
a distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
Returns
-------
variance : ndarray, see dtype parameter above
If `out` is None, return a new array containing the variance,
otherwise return a reference to the output array. If ddof is >= the
number of non-NaN elements in a slice or the slice contains only
NaNs, then the result for that slice is NaN.
See Also
--------
std : Standard deviation
mean : Average
var : Variance while not ignoring NaNs
nanstd, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite
population. ``ddof=0`` provides a maximum likelihood estimate of the
variance for normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
For this function to work on sub-classes of ndarray, they must define
`sum` with the kwarg `keepdims`
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.var(a)
1.5555555555555554
>>> np.nanvar(a, axis=0)
array([ 1., 0.])
>>> np.nanvar(a, axis=1)
array([ 0., 0.25])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# Compute mean
if type(arr) is np.matrix:
_keepdims = np._NoValue
else:
_keepdims = True
# we need to special case matrix for reverse compatibility
# in order for this to work, these sums need to be called with
# keepdims=True, however matrix now raises an error in this case, but
# the reason that it drops the keepdims kwarg is to force keepdims=True
# so this used to work by serendipity.
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims)
avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims)
avg = _divide_by_count(avg, cnt)
# Compute squared deviation from mean.
np.subtract(arr, avg, out=arr, casting='unsafe')
arr = _copyto(arr, 0, mask)
if issubclass(arr.dtype.type, np.complexfloating):
sqr = np.multiply(arr, arr.conj(), out=arr).real
else:
sqr = np.multiply(arr, arr, out=arr)
# Compute variance.
var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if var.ndim < cnt.ndim:
# Subclasses of ndarray may ignore keepdims, so check here.
cnt = cnt.squeeze(axis)
dof = cnt - ddof
var = _divide_by_count(var, dof)
isbad = (dof <= 0)
if np.any(isbad):
warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning)
# NaN, inf, or negative numbers are all possible bad
# values, so explicitly replace them with NaN.
var = _copyto(var, np.nan, isbad)
return var
def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis, while
ignoring NaNs.
Returns the standard deviation, a measure of the spread of a
distribution, of the non-NaN array elements. The standard deviation is
computed for the flattened array by default, otherwise over the
specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Calculate the standard deviation of the non-NaN values.
axis : int, optional
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it
is the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the
calculated values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If this value is anything but the default it is passed through
as-is to the relevant functions of the sub-classes. If these
functions do not have a `keepdims` kwarg, a RuntimeError will
be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard
deviation, otherwise return a reference to the output array. If
ddof is >= the number of non-NaN elements in a slice or the slice
contains only NaNs, then the result for that slice is NaN.
See Also
--------
var, mean, std
nanvar, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is
specified, the divisor ``N - ddof`` is used instead. In standard
statistical practice, ``ddof=1`` provides an unbiased estimator of the
variance of the infinite population. ``ddof=0`` provides a maximum
likelihood estimate of the variance for normally distributed variables.
The standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute value before
squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example
below). Specifying a higher-accuracy accumulator using the `dtype`
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanstd(a)
1.247219128924647
>>> np.nanstd(a, axis=0)
array([ 1., 0.])
>>> np.nanstd(a, axis=1)
array([ 0., 0.5])
"""
var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(var, np.ndarray):
std = np.sqrt(var, out=var)
else:
std = var.dtype.type(np.sqrt(var))
return std
|
chiffa/numpy
|
numpy/lib/nanfunctions.py
|
Python
|
bsd-3-clause
| 46,492 | 0.000022 |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from google.appengine.ext import db as models
import appengine_django.models as aed_models
from oauth import oauth
from django.conf import settings
from django.db import models as django_models
from common import profile
from common import properties
from common import util
import settings
PRIVACY_PRIVATE = 1
PRIVACY_CONTACTS = 2
PRIVACY_PUBLIC = 3
ACTOR_ALLOWED_EXTRA = ('contact_count',
'follower_count',
'icon',
'description',
'member_count',
'admin_count',
'given_name',
'family_name'
'homepage'
)
ACTOR_LIMITED_EXTRA = ('icon',
'description',
'given_name',
'family_name'
)
# Internal Utility Functions
def _get_actor_type_from_nick(nick):
if nick[0] == "#":
return "channel"
return "user"
def _get_actor_urlnick_from_nick(nick):
parts = nick.split('@')
nick = parts[0]
if nick[0] == "#":
nick = nick[1:]
return nick
def _to_api(v):
if hasattr(v, 'to_api'):
v = v.to_api()
elif isinstance(v, type([])):
v = [_to_api(x) for x in v]
elif isinstance(v, type({})):
v = dict([(key, _to_api(value)) for (key, value) in v.iteritems()])
elif isinstance(v, datetime.datetime):
v = str(v)
return v
# Base Models, Internal Only
class ApiMixinModel(aed_models.BaseModel):
def to_api(self):
o = {}
for prop in self.properties().keys():
value = getattr(self, prop)
o[prop] = _to_api(value)
return o
class CachingModel(ApiMixinModel):
"""A simple caching layer for model objects: caches any item read with
get_by_key_name and removes from the cache on put() and delete()
You must call reset_cache() in the beginning of any HTTP request or test.
The design idea is that this should give a consistent view of the data within
the processing a single request.
"""
# TODO(mikie): appengine has non-Model put() and delete() that act on a bunch
# of items at once. To be correct this should hook those as well.
# TODO(mikie): should hook to the django sync_db signal so that the cache is
# reset when database is (to support fixtures in tests correctly).
# TODO(mikie): should cache items read through methods other than
# get_by_key_name()
_cache = { }
_cache_enabled = False
_get_count = 0
def __init__(self, parent=None, key_name=None, _app=None, **kw):
if not key_name and 'key' not in kw:
key_name = self.key_from(**kw)
super(CachingModel, self).__init__(
parent, key_name=key_name, _app=_app, **kw)
if not key_name:
key_name = self.key_from(**kw)
self._cache_keyname__ = (key_name, parent)
@classmethod
def key_from(cls, **kw):
if hasattr(cls, 'key_template'):
try:
return cls.key_template % kw
except KeyError:
logging.warn('Automatic key_name generation failed: %s <- %s',
cls.key_template,
kw)
return None
def _remove_from_cache(self):
clsname = self.__class__.__name__
if CachingModel._cache_enabled:
if CachingModel._cache.has_key(clsname):
if CachingModel._cache[clsname].has_key(self._cache_keyname__):
CachingModel._cache[clsname].pop(self._cache_keyname__)
@profile.log_write
def put(self):
self._remove_from_cache()
ret = super(CachingModel, self).put()
self._cache_keyname__ = (self.key().name(), self.parent_key())
self._remove_from_cache()
return ret
def save(self):
return self.put()
@profile.log_write
def delete(self):
self._remove_from_cache()
return super(CachingModel, self).delete()
@classmethod
@profile.log_call('threadlocal_cached_read')
def get_by_key_name(cls, key_names, parent=None):
if not key_names:
return
# Only caches when called with a single key
if CachingModel._cache_enabled and (
isinstance(key_names, str) or isinstance(key_names, unicode)):
clsname = cls.__name__
if not CachingModel._cache.has_key(clsname):
CachingModel._cache[clsname] = { }
elif CachingModel._cache[clsname].has_key((key_names, parent)):
profile.store_call(cls, 'get_by_key_name', 'threadlocal_cache_hit')
return CachingModel._cache[clsname][(key_names, parent)]
profile.store_call(cls, 'get_by_key_name', 'threadlocal_cache_miss')
ret = super(CachingModel, cls).get_by_key_name(key_names, parent)
CachingModel._get_count += 1
CachingModel._cache[clsname][(key_names, parent)] = ret
if ret:
ret._cache_keyname__ = (key_names, parent)
return ret
else:
CachingModel._get_count += len(key_names)
return super(CachingModel, cls).get_by_key_name(key_names, parent)
@classmethod
def db_get_count(cls):
return CachingModel._get_count
@classmethod
def reset_cache(cls):
CachingModel._cache = { }
@classmethod
def enable_cache(cls, enabled = True):
CachingModel._cache_enabled = enabled
if not enabled:
CachingModel._cache = { }
@classmethod
def reset_get_count(cls):
CachingModel._get_count = 0
@classmethod
@profile.log_read
def gql(cls, *args, **kw):
return super(CachingModel, cls).gql(*args, **kw)
@classmethod
@profile.log_read
def Query(cls):
# TODO(termie): I don't like that this module is called "models" here,
# I'd prefer to be accessing it by "db"
return models.Query(cls)
class DeletedMarkerModel(CachingModel):
deleted_at = properties.DateTimeProperty()
def mark_as_deleted(self):
self.deleted_at = datetime.datetime.utcnow()
self.put()
def is_deleted(self):
return self.deleted_at
# Public Models
class AbuseReport(CachingModel):
entry = models.StringProperty() # ref - entry
actor = models.StringProperty() # ref - actor for entry
reports = models.StringListProperty() # the actors who have reported this
count = models.IntegerProperty() # the count of the number of reports so far
key_template = '%(entry)s'
class Activation(CachingModel):
actor = models.StringProperty()
content = models.StringProperty()
code = models.StringProperty()
type = models.StringProperty()
key_template = 'activation/%(actor)s/%(type)s/%(content)s'
def actor_url(nick, actor_type, path='', request=None, mobile=False):
""" returns a url, with optional path appended
NOTE: if appending a path, it should start with '/'
"""
prefix = ""
mobile = mobile or (request and request.mobile)
if mobile:
prefix = "m."
if (settings.WILDCARD_USER_SUBDOMAINS_ENABLED
and actor_type == 'user'
and not mobile):
return 'http://%s.%s%s' % (nick, settings.HOSTED_DOMAIN, path)
elif mobile and settings.SUBDOMAINS_ENABLED:
return 'http://%s%s/%s/%s%s' % (prefix,
settings.HOSTED_DOMAIN,
actor_type,
nick,
path)
else:
return 'http://%s/%s/%s%s' % (settings.DOMAIN,
actor_type,
nick,
path)
class Actor(DeletedMarkerModel):
"""
extra:
channel_count - int; number of channels
contact_count - int; number of contacts
follower_count - int; number of followers
icon - string; avatar path
bg_image - string; image for background (takes precedence over bg_color)
bg_color - string; color for background
bg_repeat - whether to repeat bg_image
description [channel] - string; Channel description
external_url [channel] - string; External url related to channel
member_count [channel] - int; number of members
admin_count [channel] - int; number of admins
email_notify [user] - boolean; does the user want email notifications?
given_name [user] - string; First name
family_name [user] - string; Last Name
comments_hide [user] - boolean; Whether comments should be hidden on
overview
"""
nick = models.StringProperty()
# the appengine datastore is case-sensitive whereas human brains are not,
# Paul is not different from paul to regular people so we need a way to
# prevent duplicate names from cropping up, this adds an additional indexed
# property to support that
normalized_nick = models.StringProperty()
password = models.StringProperty()
privacy = models.IntegerProperty()
type = models.StringProperty()
extra = properties.DictProperty()
# avatar_updated_at is used by DJabberd to get a list of changed avatar. We
# set the default to a date before the launch so that initial avatars have an
# updated_at that is less than any real changes.
avatar_updated_at = properties.DateTimeProperty(
default=datetime.datetime(2009, 01, 01))
key_template = 'actor/%(nick)s'
def url(self, path="", request=None, mobile=False):
""" returns a url, with optional path appended
NOTE: if appending a path, it should start with '/'
"""
return actor_url(_get_actor_urlnick_from_nick(self.nick),
self.type,
path=path,
request=request,
mobile=mobile)
def shortnick(self):
return _get_actor_urlnick_from_nick(self.nick)
def display_nick(self):
return self.nick.split("@")[0]
return _get_actor_urlnick_from_nick(self.nick)
def to_api(self):
rv = super(Actor, self).to_api()
del rv['password']
del rv['normalized_nick']
extra = {}
for k, v in rv['extra'].iteritems():
if k in ACTOR_ALLOWED_EXTRA:
extra[k] = v
rv['extra'] = extra
return rv
def to_api_limited(self):
rv = self.to_api()
extra = {}
for k, v in rv['extra'].iteritems():
if k in ACTOR_LIMITED_EXTRA:
extra[k] = v
rv['extra'] = extra
return rv
def is_channel(self):
return self.type == 'channel'
def is_public(self):
return self.privacy == PRIVACY_PUBLIC
def is_restricted(self):
return self.privacy == PRIVACY_CONTACTS
def __repr__(self):
# Get all properties, but not directly as property objects, because
# constructor requires values to be passed in.
d = dict([(k, self.__getattribute__(k)) for k in self.properties().keys()])
return "%s(**%s)" % (self.__class__.__name__, repr(d))
class Image(CachingModel):
actor = models.StringProperty() # whose image is this?
content = models.BlobProperty() # the image itself
size = models.StringProperty() # see api.avatar_upload
# TODO(termie): key_template plans don't really work very well here
# because we haven't been storing the path :/
class InboxEntry(CachingModel):
"""This is the inbox index for an entry.
the index allows us to quickly pull the overview for a user. There may be
items in the results that are later filtered out - deleted items or items
whose privacy has changed.
"""
inbox = models.StringListProperty() # ref - who this is the inbox for
stream = models.StringProperty() # ref - the stream this belongs to
stream_type = models.StringProperty() # separate because we may filter on it
entry = models.StringProperty() # ref - the entry if this is a comment
created_at = properties.DateTimeProperty()
uuid = models.StringProperty()
shard = models.StringProperty() # an identifier for this portion of
# inboxes
key_template = 'inboxentry/%(stream)s/%(uuid)s/%(shard)s'
def stream_entry_keyname(self):
"""Returns the key name of the corresponding StreamEntry"""
return "%s/%s" % (self.stream, self.uuid)
class Invite(CachingModel):
code = models.StringProperty() # the code for the invite
email = models.StringProperty() # the email this invite went to
to_actor = models.StringProperty() # ref - the actor this invite was sent to
from_actor = models.StringProperty() # ref - who sent this invite
for_actor = models.StringProperty() # ref - invited to what, probs a channel
status = models.StringProperty(default="active") # enum - active, blocked
key_template = 'invite/%(code)s'
class KeyValue(CachingModel):
actor = models.StringProperty()
keyname = models.StringProperty()
value = models.TextProperty()
key_template = 'keyvalue/%(actor)s/%(keyname)s'
class OAuthAccessToken(CachingModel):
key_ = models.StringProperty() # the token key
secret = models.StringProperty() # the token secret
consumer = models.StringProperty() # the consumer this key is assigned to
actor = models.StringProperty() # the actor this key authenticates for
created_at = properties.DateTimeProperty(auto_now_add=True)
# when this was created
perms = models.StringProperty() # read / write / delete
key_template = 'oauth/accesstoken/%(key_)s'
def to_string(self):
token = oauth.OAuthToken(self.key_, self.secret)
return token.to_string()
class OAuthConsumer(CachingModel):
key_ = models.StringProperty() # the consumer key
secret = models.StringProperty() # the consumer secret
actor = models.StringProperty() # the actor who owns this
status = models.StringProperty() # active / pending / inactive
type = models.StringProperty() # web / desktop / mobile
commercial = models.IntegerProperty() # is this a commercial key?
app_name = models.StringProperty() # the name of the app this is for,
# to be displayed to the user
created_at = properties.DateTimeProperty(auto_now_add=True)
key_template = 'oauth/consumer/%(key_)s'
def url(self):
return '/api/keys/%s' % self.key_
class OAuthNonce(CachingModel):
nonce = models.StringProperty() # the nonce
consumer = models.StringProperty() # the consumer this nonce is for
token = models.StringProperty() # the token this nonce is for
created_at = properties.DateTimeProperty(auto_now_add=True)
# when this was created
class OAuthRequestToken(CachingModel):
key_ = models.StringProperty() # the token key
secret = models.StringProperty() # the token secret
consumer = models.StringProperty() # the consumer this key is assigned to
actor = models.StringProperty() # the actor this key authenticates for
authorized = models.IntegerProperty() # has the actor authorized this token?
created_at = properties.DateTimeProperty(auto_now_add=True)
# when this was created
perms = models.StringProperty() # read / write / delete
key_template = 'oauth/requesttoken/%(key_)s'
def to_string(self):
token = oauth.OAuthToken(self.key_, self.secret)
return token.to_string()
class Presence(CachingModel):
"""This represents all the presence data for an actor at a moment in time.
extra:
status - string; message (like an "away message")
location - string; TODO(tyler): Consider gps / cell / structured data
availability - string; TODO(tyler): Define structure
"""
actor = models.StringProperty() # The actor whose presence this is
updated_at = properties.DateTimeProperty(auto_now_add=True)
# The moment we got the update
uuid = models.StringProperty()
extra = properties.DictProperty() # All the rich presence
# TODO(termie): can't do key_template here yet because we include
# current and history keys :/
class Task(CachingModel):
actor = models.StringProperty() # ref - the owner of this queue item
action = models.StringProperty() # api call we are iterating through
action_id = models.StringProperty() # unique identifier for this queue item
args = models.StringListProperty() # *args
kw = properties.DictProperty() # *kw
expire = properties.DateTimeProperty()
# when our lock will expire
progress = models.StringProperty() # a string representing the offset to
# which we've progressed so far
created_at = properties.DateTimeProperty(auto_now_add=True)
key_template = 'task/%(actor)s/%(action)s/%(action_id)s'
class Relation(CachingModel):
owner = models.StringProperty() # ref - actor nick
relation = models.StringProperty() # what type of relationship this is
target = models.StringProperty() # ref - actor nick
key_template = 'relation/%(relation)s/%(owner)s/%(target)s'
class Stream(DeletedMarkerModel):
"""
extra: see api.stream_create()
"""
owner = models.StringProperty() # ref
title = models.StringProperty()
type = models.StringProperty()
slug = models.StringProperty()
read = models.IntegerProperty() # TODO: document this
write = models.IntegerProperty()
extra = properties.DictProperty()
key_template = 'stream/%(owner)s/%(slug)s'
def is_public(self):
return self.read == PRIVACY_PUBLIC
def is_restricted(self):
return self.read == PRIVACY_CONTACTS
def keyname(self):
"""Returns the key name"""
return self.key().name()
class StreamEntry(DeletedMarkerModel):
"""
extra :
title -
location -
icon -
content -
entry_stream -
entry_stream_type -
entry_title -
entry_uuid -
comment_count -
"""
stream = models.StringProperty() # ref - the stream this belongs to
owner = models.StringProperty() # ref - the actor who owns the stream
actor = models.StringProperty() # ref - the actor who wrote this
entry = models.StringProperty() # ref - the parent of this,
# should it be a comment
uuid = models.StringProperty()
created_at = properties.DateTimeProperty(auto_now_add=True)
extra = properties.DictProperty()
key_template = '%(stream)s/%(uuid)s'
def url(self, with_anchor=True, request=None, mobile=False):
if self.entry:
# TODO bad?
slug = self.entry.split("/")[-1]
anchor = "#c-%s" % self.uuid
else:
# TODO(termie): add slug property
slug = self.uuid
anchor = ""
path = "/%s/%s" % ('presence', slug)
if with_anchor:
path = "%s%s" % (path, anchor)
return actor_url(_get_actor_urlnick_from_nick(self.owner),
_get_actor_type_from_nick(self.owner),
path=path,
request=request,
mobile=mobile)
def keyname(self):
"""Returns the key name"""
return self.key().name()
def title(self):
""" build a title for this entry, for a presence entry it will just be
the title, but for a comment it will look like:
Comment from [commenter nick] on [entry title] by [nick]
Comment from [commenter nick] on [entry title] by [nick] to #[channel name]
"""
if not self.is_comment():
return self.extra.get('title')
template = "Comment from %(actor)s on %(entry_title)s by %(entry_actor)s"
actor = _get_actor_urlnick_from_nick(self.actor)
entry_title = self.extra.get('entry_title')
entry_actor = _get_actor_urlnick_from_nick(self.extra.get('entry_actor'))
entry_owner_nick = util.get_user_from_topic(self.entry)
entry_type = _get_actor_type_from_nick(entry_owner_nick)
v = {'actor': actor,
'entry_title': entry_title,
'entry_actor': entry_actor,
}
if entry_type == 'channel':
template += ' to #%(channel)s'
channel = _get_actor_urlnick_from_nick(entry_owner_nick)
v['channel'] = channel
return template % v
def is_comment(self):
return (self.entry != None)
def is_channel(self):
return self.owner.startswith('#')
def entry_actor(self):
if self.entry:
return util.get_user_from_topic(self.entry)
return None
class Subscription(CachingModel):
"""this represents a topic, usually a stream, that a subscriber
(usually an inbox) would like to receive updates to
"""
topic = models.StringProperty() # ref - the stream being subscribed to
subscriber = models.StringProperty() # ref - the subscriber (actor)
target = models.StringProperty() # where to dump this
state = models.StringProperty() # The status of remote subs, see XEP-0060
# sect 4.2. The 'pending' state is ignored if
# the target of the subscription is used.
# The design is for performance: on public
# entries
# the state is ignored and listing the
# subscriptions is a single query; for
# contacts-only entries the state is used but
# it is also kept up-to-date regarding buddy
# relationships, so a single query for
# state='subscribed' can again be used.
extra = properties.DictProperty() # holds a bunch of stuff
created_at = properties.DateTimeProperty(auto_now_add=True)
# for ordering someday
key_template = '%(topic)s/%(target)s'
def is_subscribed(self):
# LEGACY COMPAT: the 'or' here is for legacy compat
return (self.state == 'subscribed' or self.state == None)
#class ActorMobile(models.Model):
# nick = models.TextField()
# mobile = models.TextField()
# country_code = models.TextField()
# confirmed = models.BooleanField()
#class ActorEmail(models.Model):
# nick = models.TextField()
# email = models.EmailField()
|
chheplo/jaikuengine
|
common/models.py
|
Python
|
apache-2.0
| 22,503 | 0.016531 |
##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import IECore
import Gaffer
import GafferUI
## A dialogue which allows a user to edit the parameters of an
# IECore.Op instance and then execute it.
class OpDialogue( GafferUI.Dialogue ) :
def __init__( self, opInstance, title=None, sizeMode=GafferUI.Window.SizeMode.Manual, **kw ) :
if title is None :
title = IECore.CamelCase.toSpaced( opInstance.typeName() )
GafferUI.Dialogue.__init__( self, title, sizeMode=sizeMode, **kw )
self.__node = Gaffer.ParameterisedHolderNode()
self.__node.setParameterised( opInstance )
frame = GafferUI.Frame()
frame.setChild( GafferUI.NodeUI.create( self.__node ) )
self._setWidget( frame )
self.__cancelButton = self._addButton( "Cancel" )
self.__cancelButtonConnection = self.__cancelButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ) )
executeLabel = "OK"
with IECore.IgnoredExceptions( KeyError ) :
executeLabel = opInstance.userData()["UI"]["buttonLabel"].value
self.__executeButton = self._addButton( executeLabel )
self.__executeButtonConnection = self.__executeButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ) )
self.__opExecutedSignal = Gaffer.Signal1()
## A signal called when the user has pressed the execute button
# and the Op has been successfully executed. This is passed the
# result of the execution.
def opExecutedSignal( self ) :
return self.__opExecutedSignal
## Causes the dialogue to enter a modal state, returning the result
# of executing the Op, or None if the user cancelled the operation. Any
# validation or execution errors will be reported to the user and return
# to the dialogue for them to cancel or try again.
def waitForResult( self, **kw ) :
# block our button connection so we don't end up executing twice
with Gaffer.BlockedConnection( self.__executeButtonConnection ) :
while 1 :
button = self.waitForButton( **kw )
if button is self.__executeButton :
result = self.__execute()
if result is not None :
return result
else :
return None
def __execute( self ) :
try :
self.__node.setParameterisedValues()
result = self.__node.getParameterised()[0]()
self.opExecutedSignal()( result )
## \todo Support Op userData for specifying closing of Dialogue?
self.close()
return result
except :
GafferUI.ErrorDialogue.displayException( parentWindow=self )
return None
def __buttonClicked( self, button ) :
if button is self.__executeButton :
self.__execute()
else :
self.close()
|
DoubleNegativeVisualEffects/gaffer
|
python/GafferUI/OpDialogue.py
|
Python
|
bsd-3-clause
| 4,512 | 0.038121 |
# Authors: John Dennis <jdennis@redhat.com>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
# Module exports
__all__ = ['log_mgr', 'root_logger', 'standard_logging_setup',
'IPA_ROOT_LOGGER_NAME', 'ISO8601_UTC_DATETIME_FMT',
'LOGGING_FORMAT_STDERR', 'LOGGING_FORMAT_STDOUT', 'LOGGING_FORMAT_FILE']
#-------------------------------------------------------------------------------
import sys
import re
import copy
from log_manager import LogManager, parse_log_level
#-------------------------------------------------------------------------------
# Our root logger, all loggers will be descendents of this.
IPA_ROOT_LOGGER_NAME = 'ipa'
# Format string for time.strftime() to produce a ISO 8601 date time
# formatted string in the UTC time zone.
ISO8601_UTC_DATETIME_FMT = '%Y-%m-%dT%H:%M:%SZ'
# Logging format string for use with logging stderr handlers
LOGGING_FORMAT_STDERR = 'ipa: %(levelname)s: %(message)s'
# Logging format string for use with logging stdout handlers
LOGGING_FORMAT_STDOUT = '[%(asctime)s %(name)s] <%(levelname)s>: %(message)s'
# Logging format string for use with logging file handlers
LOGGING_FORMAT_FILE = '\t'.join([
'%(asctime)s',
'%(process)d',
'%(threadName)s',
'%(name)s',
'%(levelname)s',
'%(message)s',
])
# Used by standard_logging_setup() for console message
LOGGING_FORMAT_STANDARD_CONSOLE = '%(name)-12s: %(levelname)-8s %(message)s'
# Used by standard_logging_setup() for file message
LOGGING_FORMAT_STANDARD_FILE = '%(asctime)s %(levelname)s %(message)s'
#-------------------------------------------------------------------------------
class IPALogManager(LogManager):
'''
Subclass the LogManager to enforce some IPA specfic logging
conventions.
* Default to timestamps in UTC.
* Default to ISO 8601 timestamp format.
* Default the message format.
'''
log_logger_level_config_re = re.compile(r'^log_logger_level_(debug|info|warn|warning|error|critical|\d+)$')
def __init__(self, configure_state=None):
'''
:parameters:
configure_state
Used by clients of the log manager to track the
configuration state, may be any object.
'''
super(IPALogManager, self).__init__(IPA_ROOT_LOGGER_NAME, configure_state)
def configure_from_env(self, env, configure_state=None):
'''
Read the loggger configuration from the Env config. The
following items may be configured:
Logger Levels
*log_logger_XXX = comma separated list of regexps*
Logger levels can be explicitly specified for specific loggers as
opposed to a global logging level. Specific loggers are indiciated
by a list of regular expressions bound to a level. If a logger's
name matches the regexp then it is assigned that level. The keys
in the Env config must begin with "log_logger_level\_" and then be
followed by a symbolic or numeric log level, for example::
log_logger_level_debug = ipapython\.dn\..*
log_logger_level_35 = ipalib\.plugins\.dogtag
The first line says any logger belonging to the ipapython.dn module
will have it's level configured to debug.
The second line say the ipa.plugins.dogtag logger will be
configured to level 35.
Note: logger names are a dot ('.') separated list forming a path
in the logger tree. The dot character is also a regular
expression metacharacter (matches any character) therefore you
will usually need to escape the dot in the logger names by
preceeding it with a backslash.
The return value of this function is a dict with the following
format:
logger_regexps
List of (regexp, level) tuples
:parameters:
env
Env object configuration values are read from.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
'''
logger_regexps = []
config = {'logger_regexps' : logger_regexps,
}
for attr in ('debug', 'verbose'):
value = getattr(env, attr, None)
if value is not None:
config[attr] = value
for attr in list(env):
# Get logger level configuration
match = IPALogManager.log_logger_level_config_re.search(attr)
if match:
value = match.group(1)
level = parse_log_level(value)
value = getattr(env, attr)
regexps = re.split('\s*,\s*', value)
# Add the regexp, it maps to the configured level
for regexp in regexps:
logger_regexps.append((regexp, level))
continue
self.configure(config, configure_state)
return config
def create_log_handlers(self, configs, logger=None, configure_state=None):
'Enforce some IPA specific configurations'
configs = copy.copy(configs)
for cfg in configs:
if not 'time_zone_converter' in cfg:
cfg['time_zone_converter'] = 'utc'
if not 'datefmt' in cfg:
cfg['datefmt'] = ISO8601_UTC_DATETIME_FMT
if not 'format' in cfg:
cfg['format'] = LOGGING_FORMAT_STDOUT
return super(IPALogManager, self).create_log_handlers(configs, logger, configure_state)
#-------------------------------------------------------------------------------
def standard_logging_setup(filename=None, verbose=False, debug=False,
filemode='w', console_format=LOGGING_FORMAT_STANDARD_CONSOLE):
handlers = []
# File output is always logged at debug level
if filename is not None:
file_handler = dict(name='file',
filename=filename,
filemode=filemode,
permission=0o600,
level='debug',
format=LOGGING_FORMAT_STANDARD_FILE)
handlers.append(file_handler)
if log_mgr.handlers.has_key('console'):
log_mgr.remove_handler('console')
level = 'error'
if verbose:
level = 'info'
if debug:
level = 'debug'
console_handler = dict(name='console',
stream=sys.stderr,
level=level,
format=console_format)
handlers.append(console_handler)
# default_level must be debug becuase we want the file handler to
# always log at the debug level.
log_mgr.configure(dict(default_level='debug',
handlers=handlers),
configure_state='standard')
return log_mgr.root_logger
#-------------------------------------------------------------------------------
# Single shared instance of log manager
#
# By default always starts with stderr console handler at error level
# so messages generated before logging is fully configured have some
# place to got and won't get lost.
log_mgr = IPALogManager()
log_mgr.configure(dict(default_level='error',
handlers=[dict(name='console',
stream=sys.stderr)]),
configure_state='default')
root_logger = log_mgr.root_logger
|
hroncok/freeipa
|
ipapython/ipa_log_manager.py
|
Python
|
gpl-3.0
| 8,302 | 0.004095 |
import ConfigParser
import StringIO
import os
import unittest
import UserDict
from test import test_support
class SortedDict(UserDict.UserDict):
def items(self):
result = self.data.items()
result.sort()
return result
def keys(self):
result = self.data.keys()
result.sort()
return result
def values(self):
# XXX never used?
result = self.items()
return [i[1] for i in result]
def iteritems(self): return iter(self.items())
def iterkeys(self): return iter(self.keys())
__iter__ = iterkeys
def itervalues(self): return iter(self.values())
class TestCaseBase(unittest.TestCase):
allow_no_value = False
def newconfig(self, defaults=None):
if defaults is None:
self.cf = self.config_class(allow_no_value=self.allow_no_value)
else:
self.cf = self.config_class(defaults,
allow_no_value=self.allow_no_value)
return self.cf
def fromstring(self, string, defaults=None):
cf = self.newconfig(defaults)
sio = StringIO.StringIO(string)
cf.readfp(sio)
return cf
def test_basic(self):
config_string = (
"[Foo Bar]\n"
"foo=bar\n"
"[Spacey Bar]\n"
"foo = bar\n"
"[Commented Bar]\n"
"foo: bar ; comment\n"
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[Section\\with$weird%characters[\t]\n"
"[Internationalized Stuff]\n"
"foo[bg]: Bulgarian\n"
"foo=Default\n"
"foo[en]=English\n"
"foo[de]=Deutsch\n"
"[Spaces]\n"
"key with spaces : value\n"
"another with spaces = splat!\n"
)
if self.allow_no_value:
config_string += (
"[NoValue]\n"
"option-without-value\n"
)
cf = self.fromstring(config_string)
L = cf.sections()
L.sort()
E = [r'Commented Bar',
r'Foo Bar',
r'Internationalized Stuff',
r'Long Line',
r'Section\with$weird%characters[' '\t',
r'Spaces',
r'Spacey Bar',
]
if self.allow_no_value:
E.append(r'NoValue')
E.sort()
eq = self.assertEqual
eq(L, E)
# The use of spaces in the section names serves as a
# regression test for SourceForge bug #583248:
# http://www.python.org/sf/583248
eq(cf.get('Foo Bar', 'foo'), 'bar')
eq(cf.get('Spacey Bar', 'foo'), 'bar')
eq(cf.get('Commented Bar', 'foo'), 'bar')
eq(cf.get('Spaces', 'key with spaces'), 'value')
eq(cf.get('Spaces', 'another with spaces'), 'splat!')
if self.allow_no_value:
eq(cf.get('NoValue', 'option-without-value'), None)
self.assertNotIn('__name__', cf.options("Foo Bar"),
'__name__ "option" should not be exposed by the API!')
# Make sure the right things happen for remove_option();
# added to include check for SourceForge bug #123324:
self.assertTrue(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report existence of option")
self.assertFalse(cf.has_option('Foo Bar', 'foo'),
"remove_option() failed to remove option")
self.assertFalse(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report non-existence of option"
" that was removed")
self.assertRaises(ConfigParser.NoSectionError,
cf.remove_option, 'No Such Section', 'foo')
eq(cf.get('Long Line', 'foo'),
'this line is much, much longer than my editor\nlikes it.')
def test_case_sensitivity(self):
cf = self.newconfig()
cf.add_section("A")
cf.add_section("a")
L = cf.sections()
L.sort()
eq = self.assertEqual
eq(L, ["A", "a"])
cf.set("a", "B", "value")
eq(cf.options("a"), ["b"])
eq(cf.get("a", "b"), "value",
"could not locate option, expecting case-insensitive option names")
self.assertTrue(cf.has_option("a", "b"))
cf.set("A", "A-B", "A-B value")
for opt in ("a-b", "A-b", "a-B", "A-B"):
self.assertTrue(
cf.has_option("A", opt),
"has_option() returned false for option which should exist")
eq(cf.options("A"), ["a-b"])
eq(cf.options("a"), ["b"])
cf.remove_option("a", "B")
eq(cf.options("a"), [])
# SF bug #432369:
cf = self.fromstring(
"[MySection]\nOption: first line\n\tsecond line\n")
eq(cf.options("MySection"), ["option"])
eq(cf.get("MySection", "Option"), "first line\nsecond line")
# SF bug #561822:
cf = self.fromstring("[section]\nnekey=nevalue\n",
defaults={"key":"value"})
self.assertTrue(cf.has_option("section", "Key"))
def test_default_case_sensitivity(self):
cf = self.newconfig({"foo": "Bar"})
self.assertEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive option names")
cf = self.newconfig({"Foo": "Bar"})
self.assertEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive defaults")
def test_parse_errors(self):
self.newconfig()
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces: splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces= splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n:value-without-option-name\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n=value-without-option-name\n")
self.parse_error(ConfigParser.MissingSectionHeaderError,
"No Section!\n")
def parse_error(self, exc, src):
sio = StringIO.StringIO(src)
self.assertRaises(exc, self.cf.readfp, sio)
def test_query_errors(self):
cf = self.newconfig()
self.assertEqual(cf.sections(), [],
"new ConfigParser should have no defined sections")
self.assertFalse(cf.has_section("Foo"),
"new ConfigParser should have no acknowledged "
"sections")
self.assertRaises(ConfigParser.NoSectionError,
cf.options, "Foo")
self.assertRaises(ConfigParser.NoSectionError,
cf.set, "foo", "bar", "value")
self.get_error(ConfigParser.NoSectionError, "foo", "bar")
cf.add_section("foo")
self.get_error(ConfigParser.NoOptionError, "foo", "bar")
def get_error(self, exc, section, option):
try:
self.cf.get(section, option)
except exc, e:
return e
else:
self.fail("expected exception type %s.%s"
% (exc.__module__, exc.__name__))
def test_boolean(self):
cf = self.fromstring(
"[BOOLTEST]\n"
"T1=1\n"
"T2=TRUE\n"
"T3=True\n"
"T4=oN\n"
"T5=yes\n"
"F1=0\n"
"F2=FALSE\n"
"F3=False\n"
"F4=oFF\n"
"F5=nO\n"
"E1=2\n"
"E2=foo\n"
"E3=-1\n"
"E4=0.1\n"
"E5=FALSE AND MORE"
)
for x in range(1, 5):
self.assertTrue(cf.getboolean('BOOLTEST', 't%d' % x))
self.assertFalse(cf.getboolean('BOOLTEST', 'f%d' % x))
self.assertRaises(ValueError,
cf.getboolean, 'BOOLTEST', 'e%d' % x)
def test_weird_errors(self):
cf = self.newconfig()
cf.add_section("Foo")
self.assertRaises(ConfigParser.DuplicateSectionError,
cf.add_section, "Foo")
def test_write(self):
config_string = (
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[DEFAULT]\n"
"foo: another very\n"
" long line\n"
)
if self.allow_no_value:
config_string += (
"[Valueless]\n"
"option-without-value\n"
)
cf = self.fromstring(config_string)
output = StringIO.StringIO()
cf.write(output)
expect_string = (
"[DEFAULT]\n"
"foo = another very\n"
"\tlong line\n"
"\n"
"[Long Line]\n"
"foo = this line is much, much longer than my editor\n"
"\tlikes it.\n"
"\n"
)
if self.allow_no_value:
expect_string += (
"[Valueless]\n"
"option-without-value\n"
"\n"
)
self.assertEqual(output.getvalue(), expect_string)
def test_set_string_types(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
# Check that we don't get an exception when setting values in
# an existing section using strings:
class mystr(str):
pass
cf.set("sect", "option1", "splat")
cf.set("sect", "option1", mystr("splat"))
cf.set("sect", "option2", "splat")
cf.set("sect", "option2", mystr("splat"))
try:
unicode
except NameError:
pass
else:
cf.set("sect", "option1", unicode("splat"))
cf.set("sect", "option2", unicode("splat"))
def test_read_returns_file_list(self):
file1 = test_support.findfile("cfgparser.1")
# check when we pass a mix of readable and non-readable files:
cf = self.newconfig()
parsed_files = cf.read([file1, "nonexistent-file"])
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only a filename:
cf = self.newconfig()
parsed_files = cf.read(file1)
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only missing files:
cf = self.newconfig()
parsed_files = cf.read(["nonexistent-file"])
self.assertEqual(parsed_files, [])
# check when we pass no files:
cf = self.newconfig()
parsed_files = cf.read([])
self.assertEqual(parsed_files, [])
# shared by subclasses
def get_interpolation_config(self):
return self.fromstring(
"[Foo]\n"
"bar=something %(with1)s interpolation (1 step)\n"
"bar9=something %(with9)s lots of interpolation (9 steps)\n"
"bar10=something %(with10)s lots of interpolation (10 steps)\n"
"bar11=something %(with11)s lots of interpolation (11 steps)\n"
"with11=%(with10)s\n"
"with10=%(with9)s\n"
"with9=%(with8)s\n"
"with8=%(With7)s\n"
"with7=%(WITH6)s\n"
"with6=%(with5)s\n"
"With5=%(with4)s\n"
"WITH4=%(with3)s\n"
"with3=%(with2)s\n"
"with2=%(with1)s\n"
"with1=with\n"
"\n"
"[Mutual Recursion]\n"
"foo=%(bar)s\n"
"bar=%(foo)s\n"
"\n"
"[Interpolation Error]\n"
"name=%(reference)s\n",
# no definition for 'reference'
defaults={"getname": "%(__name__)s"})
def check_items_config(self, expected):
cf = self.fromstring(
"[section]\n"
"name = value\n"
"key: |%(name)s| \n"
"getdefault: |%(default)s|\n"
"getname: |%(__name__)s|",
defaults={"default": "<default>"})
L = list(cf.items("section"))
L.sort()
self.assertEqual(L, expected)
class ConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.ConfigParser
allow_no_value = True
def test_interpolation(self):
rawval = {
ConfigParser.ConfigParser: ("something %(with11)s "
"lots of interpolation (11 steps)"),
ConfigParser.SafeConfigParser: "%(with1)s",
}
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "Foo")
eq(cf.get("Foo", "bar"), "something with interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something with lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something with lots of interpolation (10 steps)")
self.get_error(ConfigParser.InterpolationDepthError, "Foo", "bar11")
def test_interpolation_missing_value(self):
self.get_interpolation_config()
e = self.get_error(ConfigParser.InterpolationError,
"Interpolation Error", "name")
self.assertEqual(e.reference, "reference")
self.assertEqual(e.section, "Interpolation Error")
self.assertEqual(e.option, "name")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|<default>|'),
('getname', '|section|'),
('key', '|value|'),
('name', 'value')])
def test_set_nonstring_types(self):
cf = self.newconfig()
cf.add_section('non-string')
cf.set('non-string', 'int', 1)
cf.set('non-string', 'list', [0, 1, 1, 2, 3, 5, 8, 13, '%('])
cf.set('non-string', 'dict', {'pi': 3.14159, '%(': 1,
'%(list)': '%(list)'})
cf.set('non-string', 'string_with_interpolation', '%(list)s')
cf.set('non-string', 'no-value')
self.assertEqual(cf.get('non-string', 'int', raw=True), 1)
self.assertRaises(TypeError, cf.get, 'non-string', 'int')
self.assertEqual(cf.get('non-string', 'list', raw=True),
[0, 1, 1, 2, 3, 5, 8, 13, '%('])
self.assertRaises(TypeError, cf.get, 'non-string', 'list')
self.assertEqual(cf.get('non-string', 'dict', raw=True),
{'pi': 3.14159, '%(': 1, '%(list)': '%(list)'})
self.assertRaises(TypeError, cf.get, 'non-string', 'dict')
self.assertEqual(cf.get('non-string', 'string_with_interpolation',
raw=True), '%(list)s')
self.assertRaises(ValueError, cf.get, 'non-string',
'string_with_interpolation', raw=False)
self.assertEqual(cf.get('non-string', 'no-value'), None)
class MultilineValuesTestCase(TestCaseBase):
config_class = ConfigParser.ConfigParser
wonderful_spam = ("I'm having spam spam spam spam "
"spam spam spam beaked beans spam "
"spam spam and spam!").replace(' ', '\t\n')
def setUp(self):
cf = self.newconfig()
for i in range(100):
s = 'section{}'.format(i)
cf.add_section(s)
for j in range(10):
cf.set(s, 'lovely_spam{}'.format(j), self.wonderful_spam)
with open(test_support.TESTFN, 'w') as f:
cf.write(f)
def tearDown(self):
os.unlink(test_support.TESTFN)
def test_dominating_multiline_values(self):
# we're reading from file because this is where the code changed
# during performance updates in Python 3.2
cf_from_file = self.newconfig()
with open(test_support.TESTFN) as f:
cf_from_file.readfp(f)
self.assertEqual(cf_from_file.get('section8', 'lovely_spam4'),
self.wonderful_spam.replace('\t\n', '\n'))
class RawConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.RawConfigParser
def test_interpolation(self):
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "%(__name__)s")
eq(cf.get("Foo", "bar"),
"something %(with1)s interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something %(with9)s lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something %(with10)s lots of interpolation (10 steps)")
eq(cf.get("Foo", "bar11"),
"something %(with11)s lots of interpolation (11 steps)")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|%(default)s|'),
('getname', '|%(__name__)s|'),
('key', '|%(name)s|'),
('name', 'value')])
def test_set_nonstring_types(self):
cf = self.newconfig()
cf.add_section('non-string')
cf.set('non-string', 'int', 1)
cf.set('non-string', 'list', [0, 1, 1, 2, 3, 5, 8, 13])
cf.set('non-string', 'dict', {'pi': 3.14159})
self.assertEqual(cf.get('non-string', 'int'), 1)
self.assertEqual(cf.get('non-string', 'list'),
[0, 1, 1, 2, 3, 5, 8, 13])
self.assertEqual(cf.get('non-string', 'dict'), {'pi': 3.14159})
class SafeConfigParserTestCase(ConfigParserTestCase):
config_class = ConfigParser.SafeConfigParser
def test_safe_interpolation(self):
# See http://www.python.org/sf/511737
cf = self.fromstring("[section]\n"
"option1=xxx\n"
"option2=%(option1)s/xxx\n"
"ok=%(option1)s/%%s\n"
"not_ok=%(option2)s/%%s")
self.assertEqual(cf.get("section", "ok"), "xxx/%s")
self.assertEqual(cf.get("section", "not_ok"), "xxx/xxx/%s")
def test_set_malformatted_interpolation(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
self.assertEqual(cf.get('sect', "option1"), "foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "%foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "foo%")
self.assertRaises(ValueError, cf.set, "sect", "option1", "f%oo")
self.assertEqual(cf.get('sect', "option1"), "foo")
# bug #5741: double percents are *not* malformed
cf.set("sect", "option2", "foo%%bar")
self.assertEqual(cf.get("sect", "option2"), "foo%bar")
def test_set_nonstring_types(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
# Check that we get a TypeError when setting non-string values
# in an existing section:
self.assertRaises(TypeError, cf.set, "sect", "option1", 1)
self.assertRaises(TypeError, cf.set, "sect", "option1", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option1", object())
self.assertRaises(TypeError, cf.set, "sect", "option2", 1)
self.assertRaises(TypeError, cf.set, "sect", "option2", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option2", object())
def test_add_section_default_1(self):
cf = self.newconfig()
self.assertRaises(ValueError, cf.add_section, "default")
def test_add_section_default_2(self):
cf = self.newconfig()
self.assertRaises(ValueError, cf.add_section, "DEFAULT")
class SafeConfigParserTestCaseNoValue(SafeConfigParserTestCase):
allow_no_value = True
class TestChainMap(unittest.TestCase):
def test_issue_12717(self):
d1 = dict(red=1, green=2)
d2 = dict(green=3, blue=4)
dcomb = d2.copy()
dcomb.update(d1)
cm = ConfigParser._Chainmap(d1, d2)
self.assertIsInstance(cm.keys(), list)
self.assertEqual(set(cm.keys()), set(dcomb.keys())) # keys()
self.assertEqual(set(cm.values()), set(dcomb.values())) # values()
self.assertEqual(set(cm.items()), set(dcomb.items())) # items()
self.assertEqual(set(cm), set(dcomb)) # __iter__ ()
self.assertEqual(cm, dcomb) # __eq__()
self.assertEqual([cm[k] for k in dcomb], dcomb.values()) # __getitem__()
klist = 'red green blue black brown'.split()
self.assertEqual([cm.get(k, 10) for k in klist],
[dcomb.get(k, 10) for k in klist]) # get()
self.assertEqual([k in cm for k in klist],
[k in dcomb for k in klist]) # __contains__()
with test_support.check_py3k_warnings():
self.assertEqual([cm.has_key(k) for k in klist],
[dcomb.has_key(k) for k in klist]) # has_key()
class Issue7005TestCase(unittest.TestCase):
"""Test output when None is set() as a value and allow_no_value == False.
http://bugs.python.org/issue7005
"""
expected_output = "[section]\noption = None\n\n"
def prepare(self, config_class):
# This is the default, but that's the point.
cp = config_class(allow_no_value=False)
cp.add_section("section")
cp.set("section", "option", None)
sio = StringIO.StringIO()
cp.write(sio)
return sio.getvalue()
def test_none_as_value_stringified(self):
output = self.prepare(ConfigParser.ConfigParser)
self.assertEqual(output, self.expected_output)
def test_none_as_value_stringified_raw(self):
output = self.prepare(ConfigParser.RawConfigParser)
self.assertEqual(output, self.expected_output)
class SortedTestCase(RawConfigParserTestCase):
def newconfig(self, defaults=None):
self.cf = self.config_class(defaults=defaults, dict_type=SortedDict)
return self.cf
def test_sorted(self):
self.fromstring("[b]\n"
"o4=1\n"
"o3=2\n"
"o2=3\n"
"o1=4\n"
"[a]\n"
"k=v\n")
output = StringIO.StringIO()
self.cf.write(output)
self.assertEqual(output.getvalue(),
"[a]\n"
"k = v\n\n"
"[b]\n"
"o1 = 4\n"
"o2 = 3\n"
"o3 = 2\n"
"o4 = 1\n\n")
class ExceptionPicklingTestCase(unittest.TestCase):
"""Tests for issue #13760: ConfigParser exceptions are not picklable."""
def test_error(self):
import pickle
e1 = ConfigParser.Error('value')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(repr(e1), repr(e2))
def test_nosectionerror(self):
import pickle
e1 = ConfigParser.NoSectionError('section')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(repr(e1), repr(e2))
def test_nooptionerror(self):
import pickle
e1 = ConfigParser.NoOptionError('option', 'section')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_duplicatesectionerror(self):
import pickle
e1 = ConfigParser.DuplicateSectionError('section')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationerror(self):
import pickle
e1 = ConfigParser.InterpolationError('option', 'section', 'msg')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationmissingoptionerror(self):
import pickle
e1 = ConfigParser.InterpolationMissingOptionError('option', 'section',
'rawval', 'reference')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(e1.reference, e2.reference)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationsyntaxerror(self):
import pickle
e1 = ConfigParser.InterpolationSyntaxError('option', 'section', 'msg')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationdeptherror(self):
import pickle
e1 = ConfigParser.InterpolationDepthError('option', 'section',
'rawval')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_parsingerror(self):
import pickle
e1 = ConfigParser.ParsingError('source')
e1.append(1, 'line1')
e1.append(2, 'line2')
e1.append(3, 'line3')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.filename, e2.filename)
self.assertEqual(e1.errors, e2.errors)
self.assertEqual(repr(e1), repr(e2))
def test_missingsectionheadererror(self):
import pickle
e1 = ConfigParser.MissingSectionHeaderError('filename', 123, 'line')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.line, e2.line)
self.assertEqual(e1.filename, e2.filename)
self.assertEqual(e1.lineno, e2.lineno)
self.assertEqual(repr(e1), repr(e2))
def test_main():
test_support.run_unittest(
ConfigParserTestCase,
MultilineValuesTestCase,
RawConfigParserTestCase,
SafeConfigParserTestCase,
SafeConfigParserTestCaseNoValue,
SortedTestCase,
Issue7005TestCase,
TestChainMap,
ExceptionPicklingTestCase,
)
if __name__ == "__main__":
test_main()
|
alanjw/GreenOpenERP-Win-X86
|
python/Lib/test/test_cfgparser.py
|
Python
|
agpl-3.0
| 28,482 | 0.000632 |
"""This example shows how to create a scatter plot using the `shell` package.
"""
# Major library imports
from numpy import linspace, random, pi
# Enthought library imports
from chaco.shell import plot, hold, title, show
# Create some data
x = linspace(-2*pi, 2*pi, 100)
y1 = random.random(100)
y2 = random.random(100)
# Create some scatter plots
plot(x, y1, "b.")
hold(True)
plot(x, y2, "g+", marker_size=2)
# Add some titles
title("simple scatter plots")
# This command is only necessary if running from command line
show()
|
burnpanck/chaco
|
examples/demo/shell/scatter.py
|
Python
|
bsd-3-clause
| 533 | 0 |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 05 17:10:34 2014
@author: Ning
"""
from util import *
from util.log import _logger
from feat.terms.term_categorize import term_category
import codecs
def parse(sentence):
for term in sentence.split():
yield term_category(term)
def tokenize():
rows = tsv.reader(conv.redirect("data|train.dat"))
with codecs.open("train.tokenized.dat",'w',encoding='utf-8') as fl:
for row in rows:
fl.write("%s\t%s\n" % (' '.join(list(parse(row[0]))) , row[1]) )
rows = tsv.reader(conv.redirect("data|test.dat"))
with codecs.open("test.tokenized.dat",'w',encoding='utf-8') as fl:
for row in rows:
fl.write("%s\t%s\n" % (' '.join(list(parse(row[0]))) , row[1]) )
if __name__ == "__main__":
tokenize()
|
luanjunyi/cortana
|
feat/bow/tokenize.py
|
Python
|
mit
| 834 | 0.020384 |
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
import re
import os
from copy import deepcopy
import numpy as np
import toytree
import toyplot
#######################################################
# Exception Classes
#######################################################
class ToytreeError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class TreeError(Exception):
"A problem occurred during a TreeNode operation"
def __init__(self, value=''):
self.value = value
def __str__(self):
return repr(self.value)
# TREE FORMATS
NW_FORMAT = {
# flexible with support
# Format 0 = (A:0.35,(B:0.72,(D:0.60,G:0.12)1.00:0.64)1.00:0.56);
0: [
('name', str, True),
('dist', float, True),
('support', float, True),
('dist', float, True),
],
# flexible with internal node names
# Format 1 = (A:0.35,(B:0.72,(D:0.60,G:0.12)E:0.64)C:0.56);
1: [
('name', str, True),
('dist', float, True),
('name', str, True),
('dist', float, True),
],
# strict with support values
# Format 2 = (A:0.35,(B:0.72,(D:0.60,G:0.12)1.00:0.64)1.00:0.56);
2: [
('name', str, False),
('dist', float, False),
('support', str, False),
('dist', float, False),
],
# strict with internal node names
# Format 3 = (A:0.35,(B:0.72,(D:0.60,G:0.12)E:0.64)C:0.56);
3: [
('name', str, False),
('dist', float, False),
('name', str, False),
('dist', float, False),
],
# strict with internal node names
# Format 4 = (A:0.35,(B:0.72,(D:0.60,G:0.12)));
4: [
('name', str, False),
('dist', float, False),
(None, None, False),
(None, None, False),
],
# Format 5 = (A:0.35,(B:0.72,(D:0.60,G:0.12):0.64):0.56);
5: [
('name', str, False),
('dist', float, False),
(None, None, False),
('dist', float, False),
],
# Format 6 = (A:0.35,(B:0.72,(D:0.60,G:0.12)E)C);
6: [
('name', str, False),
(None, None, False),
(None, None, False),
('dist', float, False),
],
# Format 7 = (A,(B,(D,G)E)C);
7: [
('name', str, False),
('dist', float, False),
('name', str, False),
(None, None, False),
],
# Format 8 = (A,(B,(D,G)));
8: [
('name', str, False),
(None, None, False),
('name', str, False),
(None, None, False),
],
# Format 9 = (,(,(,)));
9: [
('name', str, False),
(None, None, False),
(None, None, False),
(None, None, False),
],
# Format 10 = ((a[&Z=1,Y=2]:1.0[&X=3], b[&Z=1,Y=2]:3.0[&X=2]):1.0[&L=1,W=0], ...
# NHX Like mrbayes NEXUS common
10: [
('name', str, True),
('dist', str, True),
('name', str, True),
('dist', str, True),
]
}
def parse_network(net, disconnect=True, root=None):
"""
Parse network to extract the major topology.
This leaves the hybrid nodes in the tree and labels each with
.name="H{int}" and .gamma={float}.
root: list of tip names used to root the tree. If "None" then roots on a
random tip.
"""
# if net is a file then read the first line
if os.path.exists(net):
with open(net, 'r') as infile:
net = infile.readline()
# trim off loglik and anything after it (TODO: keep loglik)
if ";" in net:
net = net.split(";")[0] + ';'
# sub :xxx:: to be ::: b/c I don't care about admix edge bls
net = re.sub(r":\d.\w*::", ":::", net)
# change H nodes to proper format
while ",#" in net:
pre, post = net.split(",#", 1)
npre, npost = post.split(")", 1)
newpre = npre.split(":")[0] + "-" + npre.split(":")[-1]
net = pre + ")#" + newpre + npost
net = net.replace(":::", "-")
# parse cleaned newick and set empty gamma on all nodes
net = toytree.tree(net, tree_format=1)
# store admix data
admix = {}
# root on tips if provided by user -- otherwise pick a non-H root
if not root:
# if not rooted choose any non-H root
if not net.is_rooted():
net = net.root(
[i for i in net.get_tip_labels() if not i.startswith("#H")][0]
)
else:
net = net.root(root)
# Traverse tree to find hybrid nodes. If a hybrid node is labeled as a
# distinct branch in the tree then it is dropped from the tree and
for node in net.treenode.traverse("postorder"):
# find hybrid nodes as internal nchild=1, or external with H in name
if (len(node.children) == 1) or node.name.startswith("#H"):
# assign name and gamma to hybrid nodes
aname, aprop = node.name.split("-")
aname = aname.lstrip("#")
node.name = aname
# assign hybrid to closest nodes up and down from edge
# node.children[0].hybrid = int(aname[1:])
# node.gamma = round(float(aprop), 3)
# node.up.hybrid = int(aname[1:])
# if root is a hybrid edge (ugh)
if node.up is None:
small, big = sorted(node.children, key=lambda x: len(x))
root = toytree.TreeNode.TreeNode(name='root')
node.children = [small]
small.up = node
node.up = root
big.up = root
root.children = [node, big]
net.treenode = root
# disconnect node by connecting children to parent
if disconnect:
# if tip is a hybrid
if not node.children:
# get sister node
sister = [i for i in node.up.children if i != node][0]
# connect sister to gparent
sister.up = node.up.up
node.up.up.children.remove(node.up)
node.up.up.children.append(sister)
# if hybrid is internal
else:
node.up.children.remove(node)
for child in node.children:
child.up = node.up
node.up.children.append(child)
# store admix data by descendants but remove hybrid tips
desc = node.get_leaf_names()
if aname in desc:
desc = [i for i in node.up.get_leaf_names() if i != aname]
desc = [i for i in desc if not i.startswith("#H")]
# put this node into admix
if aname not in admix:
admix[aname] = (desc, aprop)
# matching edge in admix, no arrange into correct order by minor
else:
# this is the minor edge
if aprop < admix[aname][1]:
admix[aname] = (
admix[aname][0],
desc,
0.5,
{},
str(round(float(aprop), 3)),
)
# this is the major edge
else:
admix[aname] = (
desc,
admix[aname][0],
0.5,
{},
str(round(float(admix[aname][1]), 3)),
)
# update coords needed if node disconnection is turned back on.
net._coords.update()
net = net.ladderize()
return net, admix
class Annotator(object):
"""
Add annotations as a new mark on top of an existing toytree mark.
"""
def __init__(self, tree, axes, mark):
self.tree = tree
self.axes = axes
self.mark = mark
def draw_clade_box(
self,
names=None,
regex=None,
wildcard=None,
yspace=None,
xspace=None,
**kwargs):
"""
Draw a rectangle around a clade on a toytree.
Parameters:
-----------
names, regex, wildcard:
Choose one of these three methods to select one or more tipnames.
The clade composing all descendants of their common ancestor will
be highlighted.
yspace (float or None):
The extent to which boxes extend above and below the root and tip
nodes. If None then this is automatically generated.
xspace (float or None):
The extent to which the clade box extends to the sides
(out of the clade towards other tips.) If None default uses 0.5.
kwargs:
Additional styling options are supported: color, opacity, etc.
Returns:
------------
Toyplot.mark.Range
"""
# get the common ancestor
nidx = self.tree.get_mrca_idx_from_tip_labels(
names=names, regex=regex, wildcard=wildcard)
# get tips descended from mrca
tips = self.tree.idx_dict[nidx].get_leaves()
tidxs = [i.idx for i in tips]
# extent to which box bounds extend outside of the exact clade size.
if not yspace:
yspace = self.tree.treenode.height / 15.
if not xspace:
xspace = 0.45
# left and right positions
if self.mark.layout == 'r':
xmin = self.mark.ntable[nidx, 0] - yspace
xmax = max(self.mark.ntable[tidxs, 0]) + yspace
ymin = min(self.mark.ntable[tidxs, 1]) - xspace
ymax = max(self.mark.ntable[tidxs, 1]) + xspace
if self.mark.layout == 'l':
xmin = self.mark.ntable[nidx, 0] + yspace
xmax = max(self.mark.ntable[tidxs, 0]) - yspace
ymin = max(self.mark.ntable[tidxs, 1]) + xspace
ymax = min(self.mark.ntable[tidxs, 1]) - xspace
elif self.mark.layout == 'd':
ymax = self.mark.ntable[nidx, 1] + yspace
ymin = min(self.mark.ntable[tidxs, 1]) - yspace
xmin = min(self.mark.ntable[tidxs, 0]) - xspace
xmax = max(self.mark.ntable[tidxs, 0]) + xspace
elif self.mark.layout == 'u':
ymin = self.mark.ntable[nidx, 1] - yspace
ymax = min(self.mark.ntable[tidxs, 1]) + yspace
xmin = min(self.mark.ntable[tidxs, 0]) - xspace
xmax = max(self.mark.ntable[tidxs, 0]) + xspace
# draw the rectangle
newmark = self.axes.rectangle(xmin, xmax, ymin, ymax, **kwargs)
# put tree at the top of the scenegraph
self.axes._scenegraph.remove_edge(self.axes, 'render', self.mark)
self.axes._scenegraph.add_edge(self.axes, 'render', self.mark)
return newmark
# def draw_tip_box(
# self,
# names=None,
# regex=None,
# wildcard=None,
# yspace=None,
# xspace=None,
# **kwargs):
# """
# Draw a rectangle around the tips of a clade on a toytree.
# Parameters:
# -----------
# names, regex, wildcard:
# Choose one of these three methods to select one or more tipnames.
# The clade composing all descendants of their common ancestor will
# be highlighted.
# yspace (float or None):
# The extent to which boxes extend above and below the root and tip
# nodes. If None then this is automatically generated.
# xspace (float or None):
# The extent to which the clade box extends to the sides
# (out of the clade towards other tips.) If None default uses 0.5.
# kwargs:
# Additional styling options are supported: color, opacity, etc.
# Returns:
# ------------
# Toyplot.mark.Range
# """
# # get the common ancestor
# nidx = self.tree.get_mrca_idx_from_tip_labels(
# names=names, regex=regex, wildcard=wildcard)
# # get tips descended from mrca
# tips = self.tree.idx_dict[nidx].get_leaves()
# tidxs = [i.idx for i in tips]
# # get nudge size from dists in the tree or user supplied
# if not yspace:
# yspace = self.tree.get_node_values("dist", 1, 1).mean() / 4.
# if not xspace:
# xspace = 0.5
# # distance in PIXELS to the tip labels
# tipx = toyplot.units.convert(
# mark.tip_labels_style["-toyplot-anchor-shift"], 'px')
# # left and right positions
# if self.mark.layout == 'r':
# # get unit conversion
# tipstart = tipx / (axes.project('x', 1) - axes.project('x', 0))
# xmin = self.mark.ntable[nidx, 0] - yspace
# xmax = max(self.mark.ntable[tidxs, 0]) + yspace
# ymin = min(self.mark.ntable[tidxs, 1]) - xspace
# ymax = max(self.mark.ntable[tidxs, 1]) + xspace
# if self.mark.layout == 'l':
# xmin = self.mark.ntable[nidx, 0] + yspace
# xmax = max(self.mark.ntable[tidxs, 0]) - yspace
# ymin = max(self.mark.ntable[tidxs, 1]) + xspace
# ymax = min(self.mark.ntable[tidxs, 1]) - xspace
# elif self.mark.layout == 'd':
# ymax = self.mark.ntable[nidx, 1] + yspace
# ymin = min(self.mark.ntable[tidxs, 1]) - yspace
# xmin = min(self.mark.ntable[tidxs, 0]) - xspace
# xmax = max(self.mark.ntable[tidxs, 0]) + xspace
# elif self.mark.layout == 'u':
# ymin = self.mark.ntable[nidx, 1] - yspace
# ymax = min(self.mark.ntable[tidxs, 1]) + yspace
# xmin = min(self.mark.ntable[tidxs, 0]) - xspace
# xmax = max(self.mark.ntable[tidxs, 0]) + xspace
# # draw the rectangle
# mark = self.axes.rectangle(xmin, xmax, ymin, ymax, **kwargs)
# return mark
# def generate_rectangle(self, firstname=None, lastname=None, axes=None, color="green", opacity=.25):
# """
# Returns an updated axes with a generated rectangle based on input labels provided
# """
# index_of_first = self.get_mrca_idx_from_tip_labels(names=firstname)
# index_of_last = self.get_mrca_idx_from_tip_labels(names=lastname)
# x_vals = (x[0] for x in self.get_node_coordinates())
# axes.rectangle(
# min(self.get_tip_coordinates()[index_of_first][0], self.get_tip_coordinates()[index_of_last][0]),
# max(x_vals),
# self.get_tip_coordinates()[index_of_first][1],
# self.get_tip_coordinates()[index_of_last][1],
# opacity=opacity,
# color=color,
# )
# return axes
# class TreeInference:
# - get distance matrix (from an input data set... phy, nex)
# - ----- create a class to store DNA matrix (pandas colored)
# - NJ tree infer
# ------ uses distance matrix
# - UPGMA tree infer
# ------ uses distance matrix
#class TreeMoves:
# def move_spr(self):
# """
# Sub-tree pruning and Regrafting.
# Select one edge randomly from the tree and split on that edge to create
# two subtrees. Attach one of the subtrees (e.g., the smaller one)
# randomly to the larger tree to create a new node.
# ... does SPR break edges connected to root when tree is real rooted?
# """
# pass
# # On rooted trees we can work with nodes easier than edges. Start by
# # selected a node at random that is not root.
# # nodes = [i for i in self.ttree.tree.traverse() if not i.is_root()]
# # rnode = nodes[random.randint(0, len(nodes) - 1)]
# # # get all edges on the tree, skip last one which is non-real root edge
# # edges = self.ttree.tree.get_edges()[:-1]
# # # select a random edge
# # redge = edges[random.randint(0, len(edges))]
# # # break into subtrees
# # tre1 = self.tree.prune(self.tree.get_common_ancestor(redge[0]).idx)
# # tre2 = self.tree.prune(self.tree.get_common_ancestor(redge[1]).idx)
# def move_tbr(self):
# pass
# def move_nni(self):
# pass
# def non_parametric_rate_smoothing(self):
# """
# Non-parametric rate smooting.
# A method for estimating divergence times when evolutionary rates are
# variable across lineages by minimizing ancestor-descendant local rate
# changes. According to Sanderson this method is motivated by the
# likelihood that evolutionary rates are autocorrelated in time.
# returns Toytree
# """
# # p is a fixed exponent
# p = 2
# W = []
# for node in self.ttree.traverse():
# if not node.is_leaf():
# children = node.children
# ks = []
# for child in children:
# dist = abs(node.dist - child.dist)
# ks.append(dist ** p)
# W.append(sum(ks))
# # root rate is mean of all descendant rates --
# # n is the number of edges (rates) (nnodes - 1 for root)
# r_root = np.mean(W)
# rootw = []
# for child in self.ttree.tree.children:
# rootw.append((r_rroot - child.dist) ** p)
# w_root = sum(rootw)
# W.append(w_root)
# k = []
# for
# k = sum( np.exp(abs(ri - rj), p) )
# W = sum(k)
# def penalized_likelihood(...):
# pass
#
# def wfunc(ttree, p):
# ws = []
# for node in ttree.tree.traverse():
# if not node.is_leaf():
# w = sum([(node.dist - child.dist) ** p for child in node.children])
# ws.append(w)
# return sum(ws)
#######################################################
# Other
#######################################################
def bpp2newick(bppnewick):
"converts bpp newick format to normal newick. ugh."
regex1 = re.compile(r" #[-+]?[0-9]*\.?[0-9]*[:]")
regex2 = re.compile(r" #[-+]?[0-9]*\.?[0-9]*[;]")
regex3 = re.compile(r": ")
new = regex1.sub(":", bppnewick)
new = regex2.sub(";", new)
new = regex3.sub(":", new)
return new.strip()
# TODO: would be useful for (eg., root) to have option to return not mrca,
# and fuzzy match just tips, or nodes, etc...
def normalize_values(vals, nbins=10, minsize=2, maxsize=12):
"""
Distributes values into bins spaced at reasonable sizes for plotting.
Example, this can be used automatically scale Ne values to plot as
edge widths.
"""
# make copy of original
ovals = deepcopy(vals)
# if 6X min value is higher than max then add this
# as a fake value to scale more nicely
vals = list(vals)
if min(vals) * 6 > max(vals):
vals.append(min(vals) * 6)
# sorted vals list
svals = sorted(vals)
# put vals into bins
bins = np.histogram(vals, bins=nbins)[0]
# convert binned vals to widths in 2-12
newvals = {}
sizes = np.linspace(minsize, maxsize, nbins)
for idx, inbin in enumerate(bins):
for num in range(inbin):
newvals[svals.pop(0)] = sizes[idx]
return np.array([newvals[i] for i in ovals])
# def fuzzy_match_tipnames(ttree, names, wildcard, regex, mono=True, retnode=True):
def fuzzy_match_tipnames(ttree, names, wildcard, regex, mrca=True, mono=True):
"""
Used in multiple internal functions (e.g., .root()) and .drop_tips())
to select an internal mrca node, or multiple tipnames, using fuzzy matching
so that every name does not need to be written out by hand.
name: verbose list
wildcard: matching unique string
regex: regex expression
mrca: return mrca node of selected tipnames.
mono: raise error if selected tipnames are not monophyletic
"""
# require arguments
if not any([names, wildcard, regex]):
raise ToytreeError(
"must enter an outgroup, wildcard selector, or regex pattern")
# get list of **nodes** from {list, wildcard, or regex}
tips = []
if names:
if isinstance(names, (str, int)):
names = [names]
notfound = [i for i in names if i not in ttree.get_tip_labels()]
if any(notfound):
raise ToytreeError(
"Sample {} is not in the tree".format(notfound))
tips = [i for i in ttree.treenode.get_leaves() if i.name in names]
# use regex to match tipnames
elif regex:
tips = [
i for i in ttree.treenode.get_leaves() if re.match(regex, i.name)
]
if not any(tips):
raise ToytreeError("No Samples matched the regular expression")
# use wildcard substring matching
elif wildcard:
tips = [i for i in ttree.treenode.get_leaves() if wildcard in i.name]
if not any(tips):
raise ToytreeError("No Samples matched the wildcard")
# build list of **tipnames** from matched nodes
if not tips:
raise ToytreeError("no matching tipnames")
tipnames = [i.name for i in tips]
# if a single tipname matched no need to check for monophyly
if len(tips) == 1:
if mrca:
return tips[0]
else:
return tipnames
# if multiple nodes matched, check if they're monophyletic
mbool, mtype, mnames = (
ttree.treenode.check_monophyly(
tipnames, "name", ignore_missing=True)
)
# get mrca node
node = ttree.treenode.get_common_ancestor(tips)
# raise an error if required to be monophyletic but not
if mono:
if not mbool:
raise ToytreeError(
"Taxon list cannot be paraphyletic")
# return tips or nodes
if not mrca:
return tipnames
else:
return node
|
eaton-lab/toytree
|
toytree/utils.py
|
Python
|
bsd-3-clause
| 22,118 | 0.003979 |
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'XMLRPC Operation Invoice',
'version': '0.1',
'category': 'ETL',
'description': '''
XMLRPC Import invoice
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'xmlrpc_base',
'account',
],
'init_xml': [],
'demo': [],
'data': [
'security/xml_groups.xml',
#'operation_view.xml',
'invoice_view.xml',
'data/operation.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
|
Micronaet/micronaet-xmlrpc
|
xmlrpc_operation_invoice/__openerp__.py
|
Python
|
agpl-3.0
| 1,553 | 0.001288 |
import re
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.utils.feedgenerator import Atom1Feed
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from .models import LoggedAction
lock_re = re.compile(r'^(?:Unl|L)ocked\s*constituency (.*) \((\d+)\)$')
class RecentChangesFeed(Feed):
site_name = Site.objects.get_current().name
title = _("{site_name} recent changes").format(site_name=site_name)
description = _("Changes to {site_name} candidates").format(site_name=site_name)
link = "/feeds/changes.xml"
feed_type = Atom1Feed
def items(self):
return LoggedAction.objects.order_by('-updated')[:50]
def item_title(self, item):
m = lock_re.search(item.source)
if m:
return u"{0} - {1}".format(
m.group(1),
item.action_type
)
else:
return u"{0} - {1}".format(
item.person_id,
item.action_type
)
def item_description(self, item):
updated = _(u"Updated at {0}").format(str(item.updated))
description = u"{0}\n\n{1}\n".format(item.source, updated)
return description
def item_link(self, item):
# As a hack for the moment, constituencies are just mentioned
# in the source message:
m = lock_re.search(item.source)
if m:
return reverse('constituency', kwargs={
'post_id': m.group(2),
'ignored_slug': slugify(m.group(1))
})
else:
if item.person_id:
return reverse('person-view', args=[item.person_id])
else:
return '/'
|
datamade/yournextmp-popit
|
candidates/feeds.py
|
Python
|
agpl-3.0
| 1,820 | 0.001099 |
import json
import random
import time
import urllib
import re
from scrapy.utils.misc import load_object
from scrapy.http import Request
from scrapy.conf import settings
import redis
from crawler.schedulers.redis.dupefilter import RFPDupeFilter
from crawler.schedulers.redis.queue import RedisPriorityQueue
try:
import cPickle as pickle
except ImportError:
import pickle
class DistributedScheduler(object):
'''
Scrapy request scheduler that utilizes Priority Queues
to moderate scrape requests within a distributed scrapy
cluster
'''
redis_conn = None # the redis connection
queue = None # the queue to use for crawling
spider = None # the spider using this scheduler
queue_class = None # the class to use for the queue
dupefilter = None # the redis dupefilter
item_retries = 0 # the number of extra tries to get an item
def __init__(self, server, persist, timeout, retries):
'''
Initialize the scheduler
'''
self.redis_conn = server
self.persist = persist
self.rfp_timeout = timeout
self.item_retires = retries
def setup(self):
'''
Used to initialize things when using mock
spider.name is not set yet
'''
self.queue = RedisPriorityQueue(self.redis_conn,
self.spider.name + ":queue")
@classmethod
def from_settings(cls, settings):
server = redis.Redis(host=settings.get('REDIS_HOST'),
port=settings.get('REDIS_PORT'))
persist = settings.get('SCHEDULER_PERSIST', True)
timeout = settings.get('DUPEFILTER_TIMEOUT', 600)
retries = settings.get('SCHEDULER_ITEM_RETRIES', 3)
return cls(server, persist, timeout, retries)
@classmethod
def from_crawler(cls, crawler):
return cls.from_settings(crawler.settings)
def open(self, spider):
self.spider = spider
self.setup()
self.dupefilter = RFPDupeFilter(self.redis_conn,
self.spider.name + ':dupefilter', self.rfp_timeout)
def close(self, reason):
if not self.persist:
self.dupefilter.clear()
self.queue.clear()
def is_blacklisted(self, appid, crawlid):
'''
Checks the redis blacklist for crawls that should not be propagated
either from expiring or stopped
@return: True if the appid crawlid combo is blacklisted
'''
key_check = '{appid}||{crawlid}'.format(appid=appid,
crawlid=crawlid)
redis_key = self.spider.name + ":blacklist"
return self.redis_conn.sismember(redis_key, key_check)
def enqueue_request(self, request):
'''
Pushes a request from the spider back into the queue
'''
if not request.dont_filter and self.dupefilter.request_seen(request):
return
req_dict = self.request_to_dict(request)
if not self.is_blacklisted(req_dict['meta']['appid'],
req_dict['meta']['crawlid']):
key = "{sid}:queue".format(sid=req_dict['meta']['spiderid'])
curr_time = time.time()
# insert if crawl never expires (0) or time < expires
if req_dict['meta']['expires'] == 0 or \
curr_time < req_dict['meta']['expires']:
self.queue.push(req_dict, req_dict['meta']['priority'])
def request_to_dict(self, request):
'''
Convert Request object to a dict.
modified from scrapy.utils.reqser
'''
req_dict = {
# urls should be safe (safe_string_url)
'url': request.url.decode('ascii'),
'method': request.method,
'headers': dict(request.headers),
'body': request.body,
'cookies': request.cookies,
'meta': request.meta,
'_encoding': request._encoding,
'priority': request.priority,
'dont_filter': request.dont_filter,
}
return req_dict
def find_item(self):
'''
Finds an item from the queue
'''
count = 0
while count <= self.item_retries:
item = self.queue.pop()
if item:
# very basic limiter
time.sleep(1)
return item
# we want the spiders to get slightly out of sync
# with each other for better performance
time.sleep(random.random())
count = count + 1
return None
def next_request(self):
'''
Logic to handle getting a new url request
'''
t = time.time()
item = self.find_item()
if item:
try:
req = Request(item['url'])
except ValueError:
# need absolute url
# need better url validation here
req = Request('http://' + item['url'])
if 'meta' in item:
item = item['meta']
# defaults
if "attrs" not in item:
item["attrs"] = {}
if "allowed_domains" not in item:
item["allowed_domains"] = ()
if "allow_regex" not in item:
item["allow_regex"] = ()
if "deny_regex" not in item:
item["deny_regex"] = ()
if "deny_extensions" not in item:
item["deny_extensions"] = None
if 'curdepth' not in item:
item['curdepth'] = 0
if "maxdepth" not in item:
item["maxdepth"] = 0
if "priority" not in item:
item['priority'] = 0
if "retry_times" not in item:
item['retry_times'] = 0
if "expires" not in item:
item['expires'] = 0
for key in ('attrs', 'allowed_domains', 'curdepth', 'maxdepth',
'appid', 'crawlid', 'spiderid', 'priority', 'retry_times',
'expires', 'allow_regex', 'deny_regex', 'deny_extensions'):
req.meta[key] = item[key]
return req
return None
def has_pending_requests(self):
'''
We never want to say we have pending requests
If this returns True scrapy sometimes hangs.
'''
return False
|
openslack/openslack-crawler
|
crawler/schedulers/kafka/scheduler.py
|
Python
|
apache-2.0
| 6,484 | 0.000617 |
from sklearntools.kfold import ThresholdHybridCV
import numpy as np
from six.moves import reduce
from operator import __add__
from numpy.testing.utils import assert_array_equal
from nose.tools import assert_equal
def test_hybrid_cv():
X = np.random.normal(size=(100,10))
y = np.random.normal(size=100)
cv = ThresholdHybridCV(n_folds=10, upper=1.)
folds = list(cv._iter_test_masks(X, y))
assert_array_equal(reduce(__add__, folds), np.ones(100, dtype=int))
assert_equal(len(folds), cv.get_n_splits(X, y))
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
|
jcrudy/sklearntools
|
sklearntools/test/test_kfold.py
|
Python
|
bsd-3-clause
| 809 | 0.003708 |
"""
sentry.utils.email
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import os
import subprocess
import tempfile
import time
from email.utils import parseaddr
from functools import partial
from operator import attrgetter
from random import randrange
from django.conf import settings
from django.core.mail import get_connection as _get_connection
from django.core.mail import send_mail as _send_mail
from django.core.mail import EmailMultiAlternatives
from django.core.mail.backends.base import BaseEmailBackend
from django.core.signing import BadSignature, Signer
from django.utils.crypto import constant_time_compare
from django.utils.encoding import force_bytes, force_str, force_text
from toronado import from_string as inline_css
from sentry import options
from sentry.logging import LoggingFormat
from sentry.models import (
Activity, Event, Group, GroupEmailThread, Project, User, UserOption
)
from sentry.utils import metrics
from sentry.utils.safe import safe_execute
from sentry.utils.strings import is_valid_dot_atom
from sentry.web.helpers import render_to_string
# The maximum amount of recipients to display in human format.
MAX_RECIPIENTS = 5
logger = logging.getLogger('sentry.mail')
class _CaseInsensitiveSigner(Signer):
"""
Generate a signature that is comprised of only lowercase letters.
WARNING: Do not use this for anything that needs to be cryptographically
secure! This is losing entropy and has a much higher chance of collision
due to dropping to lowercase letters. For our purposes, this lack of entropy
is ok and doesn't pose a risk.
NOTE: This is needed strictly for signatures used in email addresses. Some
clients, coughAirmailcough, treat email addresses as being case-insensitive,
and sends the value as all lowercase.
"""
def signature(self, value):
sig = super(_CaseInsensitiveSigner, self).signature(value)
return sig.lower()
def unsign(self, signed_value):
# This unsign is identical to subclass except for the lowercasing
# See: https://github.com/django/django/blob/1.6.11/django/core/signing.py#L165-L172
signed_value = force_str(signed_value)
if self.sep not in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig.lower(), self.signature(value)):
return force_text(value)
raise BadSignature('Signature "%s" does not match' % sig)
signer = _CaseInsensitiveSigner()
def email_to_group_id(address):
"""
Email address should be in the form of:
{group_id}+{signature}@example.com
"""
address = address.split('@', 1)[0]
signed_data = address.replace('+', ':')
return int(force_bytes(signer.unsign(signed_data)))
def group_id_to_email(group_id):
signed_data = signer.sign(str(group_id))
return '@'.join((
signed_data.replace(':', '+'),
options.get('mail.reply-hostname') or get_from_email_domain(),
))
def domain_from_email(email):
email = parseaddr(email)[1]
try:
return email.split('@', 1)[1]
except IndexError:
# The email address is likely malformed or something
return email
# Slightly modified version of Django's
# `django.core.mail.message:make_msgid` becuase we need
# to override the domain. If we ever upgrade to
# django 1.8, we can/should replace this.
def make_msgid(domain):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id. Optional domain if given provides the
portion of the message id after the '@'. It defaults to the locally
defined hostname.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
pid = os.getpid()
randint = randrange(100000)
msgid = '<%s.%s.%s@%s>' % (utcdate, pid, randint, domain)
return msgid
# cache the domain_from_email calculation
# This is just a tuple of (email, email-domain)
_from_email_domain_cache = (None, None)
def get_from_email_domain():
global _from_email_domain_cache
from_ = options.get('mail.from')
if not _from_email_domain_cache[0] == from_:
_from_email_domain_cache = (from_, domain_from_email(from_))
return _from_email_domain_cache[1]
def get_email_addresses(user_ids, project=None):
pending = set(user_ids)
results = {}
if project:
queryset = UserOption.objects.filter(
project=project,
user__in=pending,
key='mail:email',
)
for option in (o for o in queryset if o.value):
results[option.user_id] = option.value
pending.discard(option.user_id)
if pending:
queryset = UserOption.objects.filter(
user__in=pending,
key='alert_email',
)
for option in (o for o in queryset if o.value):
results[option.user_id] = option.value
pending.discard(option.user_id)
if pending:
queryset = User.objects.filter(pk__in=pending, is_active=True)
for (user_id, email) in queryset.values_list('id', 'email'):
if email:
results[user_id] = email
pending.discard(user_id)
if pending:
logger.warning('Could not resolve email addresses for user IDs in %r, discarding...', pending)
return results
class ListResolver(object):
"""
Manages the generation of RFC 2919 compliant list-id strings from varying
objects types.
"""
class UnregisteredTypeError(Exception):
"""
Error raised when attempting to build a list-id from an unregisted object type.
"""
def __init__(self, namespace, type_handlers):
assert is_valid_dot_atom(namespace)
# The list-id-namespace that will be used when generating the list-id
# string. This should be a domain name under the control of the
# generator (see RFC 2919.)
self.__namespace = namespace
# A mapping of classes to functions that accept an instance of that
# class, returning a tuple of values that will be used to generate the
# list label. Returned values must be valid RFC 2822 dot-atom-text
# values.
self.__type_handlers = type_handlers
def __call__(self, instance):
"""
Build a list-id string from an instance.
Raises ``UnregisteredTypeError`` if there is no registered handler for
the instance type. Raises ``AssertionError`` if a valid list-id string
cannot be generated from the values returned by the type handler.
"""
try:
handler = self.__type_handlers[type(instance)]
except KeyError:
raise self.UnregisteredTypeError(
'Cannot generate mailing list identifier for {!r}'.format(instance)
)
label = '.'.join(map(str, handler(instance)))
assert is_valid_dot_atom(label)
return '{}.{}'.format(label, self.__namespace)
default_list_type_handlers = {
Activity: attrgetter('project.slug', 'project.organization.slug'),
Project: attrgetter('slug', 'organization.slug'),
Group: attrgetter('project.slug', 'organization.slug'),
Event: attrgetter('project.slug', 'organization.slug'),
}
make_listid_from_instance = ListResolver(
options.get('mail.list-namespace'),
default_list_type_handlers,
)
class MessageBuilder(object):
def __init__(self, subject, context=None, template=None, html_template=None,
body=None, html_body=None, headers=None, reference=None,
reply_reference=None, from_email=None, type=None):
assert not (body and template)
assert not (html_body and html_template)
assert context or not (template or html_template)
if headers is None:
headers = {}
self.subject = subject
self.context = context or {}
self.template = template
self.html_template = html_template
self._txt_body = body
self._html_body = html_body
self.headers = headers
self.reference = reference # The object that generated this message
self.reply_reference = reply_reference # The object this message is replying about
self.from_email = from_email or options.get('mail.from')
self._send_to = set()
self.type = type if type else 'generic'
if reference is not None and 'List-Id' not in headers:
try:
headers['List-Id'] = make_listid_from_instance(reference)
except ListResolver.UnregisteredTypeError as error:
logger.debug(str(error))
except AssertionError as error:
logger.warning(str(error))
def __render_html_body(self):
html_body = None
if self.html_template:
html_body = render_to_string(self.html_template, self.context)
else:
html_body = self._html_body
if html_body is not None:
return inline_css(html_body)
def __render_text_body(self):
if self.template:
return render_to_string(self.template, self.context)
return self._txt_body
def add_users(self, user_ids, project=None):
self._send_to.update(
get_email_addresses(user_ids, project).values()
)
def build(self, to, reply_to=None, cc=None, bcc=None):
if self.headers is None:
headers = {}
else:
headers = self.headers.copy()
if options.get('mail.enable-replies') and 'X-Sentry-Reply-To' in headers:
reply_to = headers['X-Sentry-Reply-To']
else:
reply_to = set(reply_to or ())
reply_to.remove(to)
reply_to = ', '.join(reply_to)
if reply_to:
headers.setdefault('Reply-To', reply_to)
# Every message sent needs a unique message id
message_id = make_msgid(get_from_email_domain())
headers.setdefault('Message-Id', message_id)
subject = self.subject
if self.reply_reference is not None:
reference = self.reply_reference
subject = 'Re: %s' % subject
else:
reference = self.reference
if isinstance(reference, Group):
thread, created = GroupEmailThread.objects.get_or_create(
email=to,
group=reference,
defaults={
'project': reference.project,
'msgid': message_id,
},
)
if not created:
headers.setdefault('In-Reply-To', thread.msgid)
headers.setdefault('References', thread.msgid)
msg = EmailMultiAlternatives(
subject=subject,
body=self.__render_text_body(),
from_email=self.from_email,
to=(to,),
cc=cc or (),
bcc=bcc or (),
headers=headers,
)
html_body = self.__render_html_body()
if html_body:
msg.attach_alternative(html_body, 'text/html')
return msg
def get_built_messages(self, to=None, bcc=None):
send_to = set(to or ())
send_to.update(self._send_to)
results = [self.build(to=email, reply_to=send_to, bcc=bcc) for email in send_to if email]
if not results:
logger.debug('Did not build any messages, no users to send to.')
return results
def format_to(self, to):
if not to:
return ''
if len(to) > MAX_RECIPIENTS:
to = to[:MAX_RECIPIENTS] + ['and {} more.'.format(len(to[MAX_RECIPIENTS:]))]
return ', '.join(to)
def send(self, to=None, bcc=None, fail_silently=False):
return send_messages(
self.get_built_messages(to, bcc=bcc),
fail_silently=fail_silently,
)
def send_async(self, to=None, bcc=None):
from sentry.tasks.email import send_email
fmt = options.get('system.logging-format')
messages = self.get_built_messages(to, bcc=bcc)
extra = {
'message_type': self.type
}
log_mail_queued = partial(logger.info, 'mail.queued', extra=extra)
for message in messages:
safe_execute(
send_email.delay,
message=message,
_with_transaction=False,
)
extra['message_id'] = message.extra_headers['Message-Id']
if fmt == LoggingFormat.HUMAN:
extra['message_to'] = self.format_to(message.to),
log_mail_queued()
elif fmt == LoggingFormat.MACHINE:
for recipient in message.to:
extra['message_to'] = recipient
log_mail_queued()
def send_messages(messages, fail_silently=False):
connection = get_connection(fail_silently=fail_silently)
sent = connection.send_messages(messages)
metrics.incr('email.sent', len(messages))
for message in messages:
extra = {'message_id': message.extra_headers['Message-Id']}
logger.info('mail.sent', extra=extra)
return sent
def get_mail_backend():
backend = options.get('mail.backend')
try:
return settings.SENTRY_EMAIL_BACKEND_ALIASES[backend]
except KeyError:
return backend
def get_connection(fail_silently=False):
"""
Gets an SMTP connection using our OptionsStore
"""
return _get_connection(
backend=get_mail_backend(),
host=options.get('mail.host'),
port=options.get('mail.port'),
username=options.get('mail.username'),
password=options.get('mail.password'),
use_tls=options.get('mail.use-tls'),
fail_silently=fail_silently,
)
def send_mail(subject, message, from_email, recipient_list, fail_silently=False):
"""
Wrapper that forces sending mail through our connection.
"""
return _send_mail(
subject, message, from_email, recipient_list,
connection=get_connection(fail_silently=fail_silently),
)
def is_smtp_enabled(backend=None):
"""
Check if the current backend is SMTP based.
"""
if backend is None:
backend = get_mail_backend()
return backend not in settings.SENTRY_SMTP_DISABLED_BACKENDS
class PreviewBackend(BaseEmailBackend):
"""
Email backend that can be used in local development to open messages in the
local mail client as they are sent.
Probably only works on OS X.
"""
def send_messages(self, email_messages):
for message in email_messages:
content = str(message.message())
preview = tempfile.NamedTemporaryFile(
delete=False,
prefix='sentry-email-preview-',
suffix='.eml',
)
try:
preview.write(content)
preview.flush()
finally:
preview.close()
subprocess.check_call(('open', preview.name))
return len(email_messages)
|
mitsuhiko/sentry
|
src/sentry/utils/email.py
|
Python
|
bsd-3-clause
| 15,463 | 0.000776 |
#!/usr/bin/env python
"""plot_softmax_results.py: Plot results of mnist softmax tests."""
from helper_scripts.mnist_read_log import plot_results
import matplotlib.pyplot as plt
# Produce cross entropy and accuracy plots for softmax models.
# Requires the training data for each of the models.
files = [r"""../mnist_softmax_models\softmax_alpha=0.1_keepprob=0.9\log\validation"""
]
scalar_names = ['accuracy_1', 'cross_entropy_1']
ylabels = ['Validation Accuracy', 'Cross Entropy (Validation Set)']
legend = [r'$\alpha=0.1, keep\_prob=0.9$']
plot_results(files, scalar_names, ylabels, legend, 'Softmax Models')
plt.show()
|
hutcho66/imagerecognition
|
softmax_scripts/plot_softmax_results.py
|
Python
|
mit
| 636 | 0.003145 |
# -*- coding: utf-8 -*-
#
# mfp documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mfp'
copyright = u'2014, Simcha Levental'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'visualizerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'visualizer.tex',
u'mfp Documentation',
u'Simcha Levental', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'visualizer', u'mfp Documentation',
[u'Simcha Levental'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'visualizer', u'mfp Documentation',
u'Simcha Levental', 'mfp',
'Mapping and data visualization toolkit.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
levental/visualizer
|
docs/conf.py
|
Python
|
bsd-3-clause
| 7,793 | 0.001155 |
#!/usr/bin/env python2.5
from optparse import OptionParser
from rosettautil.rosetta import rosettaScore
usage = "%prog [options] --term=scoreterm silent files"
parser=OptionParser(usage)
parser.add_option("--term",dest="term",help="score term to use")
(options,args) = parser.parse_args()
if len(args) < 1:
parser.error("you must specify at least one silent file")
#score_gen = scores.score_generator(options.term)
best_models = {} # key is a structure ID, value is a pair in form (tag,score)
for silent_file in args:
#file = silent_file
scores=rosettaScore.SilentScoreTable()
scores.add_file(silent_file)
score_gen = scores.score_generator(options.term)
for tag,score in score_gen:
split_tag = tag.split("_")
model_id = "_".join(split_tag[0:len(split_tag)-1])
#file = scores.get_file_from_tag(tag)
try:
(current_file,current_best_tag,current_best_score) = best_models[model_id]
except KeyError:
best_models[model_id] = (silent_file,tag,score)
continue
if score < current_best_score:
#print "changed"
best_models[model_id] = (silent_file,tag,score)
#print best_models
#print silent_file
#print file,score , current_best_score
print "file","tag",options.term
for tag in best_models:
print best_models[tag][0],best_models[tag][1],best_models[tag][2]
|
decarboxy/py_protein_utils
|
scripts/best_models.py
|
Python
|
mit
| 1,426 | 0.018934 |
####
#### Give a report on the "sanity" of the users and groups YAML
#### metadata files.
####
#### Example usage to analyze the usual suspects:
#### python3 sanity-check-users-and-groups.py --help
#### Get report of current problems:
#### python3 ./scripts/sanity-check-users-and-groups.py --users metadata/users.yaml --groups metadata/groups.yaml
#### Attempt to repair file (note that we go through json2yaml as libyaml output does not seem compatible with kwalify):
#### python3 ./scripts/sanity-check-users-and-groups.py --users metadata/users.yaml --groups metadata/groups.yaml --repair --output /tmp/output.json && json2yaml --depth 10 /tmp/output.json > /tmp/users.yaml
#### Check new yaml:
#### kwalify -E -f metadata/users.schema.yaml /tmp/users.yaml
#### Run report on new yaml.
#### reset && python3 ./scripts/sanity-check-users-and-groups.py --users /tmp/users.yaml --groups metadata/groups.yaml
import sys
import argparse
import logging
import yaml
import json
## Logger basic setup.
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger('sanity')
LOGGER.setLevel(logging.WARNING)
## Make sure we exit in a way that will get Jenkins's attention.
DIED_SCREAMING_P = False
def die_screaming(string):
""" Die and take our toys home. """
global DIED_SCREAMING_P
LOGGER.error(string)
DIED_SCREAMING_P = True
#sys.exit(1)
def main():
## Deal with incoming.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='More verbose output')
parser.add_argument('-u', '--users',
help='The users.yaml file to act on')
parser.add_argument('-g', '--groups',
help='The groups.yaml file to act on')
parser.add_argument("-r", "--repair", action="store_true",
help="Attempt to repair groups and update old permissions")
parser.add_argument("-o", "--output",
help="The file to output internal structure to (if repairing)")
args = parser.parse_args()
if args.verbose:
LOGGER.setLevel(logging.INFO)
LOGGER.info('Verbose: on')
## Ensure targets.
if not args.users:
die_screaming('need a users argument')
LOGGER.info('Will operate on users: ' + args.users)
if not args.groups:
die_screaming('need a groups argument')
LOGGER.info('Will operate on groups: ' + args.groups)
## Read.
users = None
with open(args.users) as mhandle:
users = yaml.safe_load(mhandle.read())
groups_linear = None
with open(args.groups) as mhandle:
groups_linear = yaml.safe_load(mhandle.read())
## Switch linear groups to lookup by URI.
groups_lookup = {}
for group in groups_linear:
groups_lookup[group['id']] = group['label']
violations = {
"uri": [],
"groups": [],
}
## Cycle through users and see if we find any violations.
for index, user in enumerate(users):
nick = user.get('nickname', '???')
## Update old authorizations type.
if args.repair:
if user.get("authorizations", {}).get("noctua-go", False):
print('REPAIR?: Update perms for ' + nick)
auths = user["authorizations"]["noctua-go"]
del user["authorizations"]["noctua-go"] # delete old way
user["authorizations"]["noctua"] = {
"go": auths
}
users[index] = user # save new back into list
## Does the user have noctua perms?
if user.get('authorizations', False):
auth = user.get('authorizations', {})
if auth.get('noctua-go', False) or \
(auth.get('noctua', False) and auth['noctua'].get('go', False)):
#print('Has perms: ' + user.get('nickname', '???'))
## 1: If so, do they have a URI?
if not user.get('uri', False):
die_screaming(user.get('nickname', '???') +\
' has no "uri"')
#print(nick + ' has no "uri"')
violations["uri"].append(nick)
else:
## 2: Is it an ORCID?
if user.get('uri', 'NIL').find('orcid') == -1:
die_screaming(user.get('nickname', '???') +\
' "uri" is not an ORCID.')
#print(nick + ' "uri" is not an ORCID.')
violations["uri"].append(nick)
## 3: If so, do they have a populated groups?
if not user.get('groups', False) or len(user["groups"]) == 0:
die_screaming(user.get('nickname', '???') +\
' has no "groups"')
#print(nick + ' has no "groups"')
if user.get("organization", False):
org = user["organization"]
print(nick + " could try org {}".format(org))
matching_groups = list(filter(lambda g: org == g["label"] or org == g["shorthand"], groups_linear))
if len(matching_groups) > 0:
print("REPAIR?: Use group: {}".format(matching_groups[0]["id"]))
if args.repair:
user["groups"] = [matching_groups[0]["id"]]
users[index] = user
else:
violations["groups"].append(nick)
else:
## 4: If so, are all entries in groups?
for gid in user.get('groups'):
if not groups_lookup.get(gid, False):
die_screaming(user.get('nickname', '???') +\
' has mistaken group entry: ' + gid)
#print(nick + ' has mistaken group entry: ' + gid)
violates_both = set(violations["uri"]).intersection(violations["groups"])
just_uri = set(violations["uri"]).difference(violates_both)
just_groups = set(violations["groups"]).difference(violates_both)
## Check privs.
for index, user in enumerate(users):
if user["nickname"] in just_uri or user["nickname"] in just_groups:
# If we have an auth with noctua-go with allow-edit set to True
if user.get("authorizations", {}).get("noctua", {}).get("go", {}).get("allow-edit", False):
print("REPAIR?: Revoke {} noctua-go edit privileges.".format(user["nickname"]))
if args.repair:
del user["authorizations"]
users[index] = user
print("\nNo URI, or no ORCID:")
print("===================")
print("\n".join(just_uri))
print("\nNo Groups:")
print("===================")
print("\n".join(just_groups))
print("\nBoth Bad:")
print("===================")
print("\n".join(violates_both))
#print(json.dumps(users))
#print(yaml.dump(users, default_flow_style=False))
#yaml.dump(data, default_flow_style=False)
if args.output:
with open(args.output, 'w+') as fhandle:
fhandle.write(json.dumps(users, sort_keys=True, indent=4))
## TODO: implement hard checks above later.
if DIED_SCREAMING_P:
print('Errors happened, alert the sheriff.')
sys.exit(1)
else:
print('Non-failing run.')
## You saw it coming...
if __name__ == '__main__':
main()
|
geneontology/go-site
|
scripts/sanity-check-users-and-groups.py
|
Python
|
bsd-3-clause
| 7,704 | 0.007269 |
import requests
from Norman.errors import HttpMethodError
class BaseAPI(object):
"""
"""
_content_type = "application/json"
def __init__(self):
pass
def _json_parser(self, json_response):
response = json_response.json()
return response
def exec_request(self, method, url, data=None):
method_map = {
'GET': requests.get,
'POST': requests.post,
'PUT': requests.put,
'DELETE': requests.delete
}
payload = data if data else data
request = method_map.get(method)
if not request:
raise HttpMethodError(
"Request method not recognised or implemented")
response = request(
url=url, json=payload, verify=True)
return response.content
base = BaseAPI()
|
Olamyy/Norman
|
Norman/api/base.py
|
Python
|
bsd-3-clause
| 846 | 0.002364 |
from . import NetworkObject
import z3
class ErroneousAclWebProxy (NetworkObject):
"""A caching web proxy which enforces ACLs erroneously.
The idea here was to present something that is deliberately not path independent"""
def _init (self, node, network, context):
super(ErroneousAclWebProxy, self).init_fail(node)
self.proxy = node.z3Node
self.ctx = context
self.constraints = list ()
self.acls = list ()
network.SaneSend(self)
self._webProxyFunctions ()
self._webProxyConstraints ()
@property
def z3Node (self):
return self.proxy
def SetPolicy (self, policy):
"""Wrap add acls"""
self.AddAcls(policy)
def AddAcls(self, acls):
if not isinstance(acls, list):
acls = [acls]
self.acls.extend(acls)
@property
def ACLs (self):
return self.acls
def _addConstraints (self, solver):
self.constraints = list ()
self._webProxyFunctions ()
self._webProxyConstraints ()
solver.add(self.constraints)
def _webProxyConstraints (self):
eh = z3.Const('__webproxy_contraint_eh_%s'%(self.proxy), self.ctx.node)
eh2 = z3.Const('__webproxy_contraint_eh2_%s'%(self.proxy), self.ctx.node)
a = z3.Const('__webproxyfunc_cache_addr_%s'%(self.proxy), self.ctx.address)
i = z3.Const('__webproxyfunc_cache_body_%s'%(self.proxy), z3.IntSort())
p = z3.Const('__webproxy_req_packet_%s'%(self.proxy), self.ctx.packet)
p2 = z3.Const('__webproxy_req_packet_2_%s'%(self.proxy), self.ctx.packet)
p3 = z3.Const('__webproxy_res_packet_%s'%(self.proxy), self.ctx.packet)
e1 = z3.Const('__webproxy_e1_%s'%(self.proxy), self.ctx.node)
e2 = z3.Const('__webproxy_e2_%s'%(self.proxy), self.ctx.node)
e3 = z3.Const('__webproxy_e3_%s'%(self.proxy), self.ctx.node)
e4 = z3.Const('__webproxy_e4_%s'%(self.proxy), self.ctx.node)
e5 = z3.Const('__webproxy_e5_%s'%(self.proxy), self.ctx.node)
e6 = z3.Const('__webproxy_e6_%s'%(self.proxy), self.ctx.node)
# \forall e, p: send(w, e, p) \Rightarrow hostHasAddr(w, p.src)
# \forall e_1, p_1: send(w, e, p_1) \Rightarrow \exists e_2, p_2: recv(e_2, w, p_2) \land
# p_2.origin == p_1.origin \land p_2.dest == p_1.dest \land hostHasAddr(p_2.origin, p_2.src)
self.constraints.append(z3.ForAll([eh, p], z3.Implies(self.ctx.send(self.proxy, eh, p), \
self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p)))))
cached_packet = z3.And(self.cached(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \
self.ctx.etime(self.proxy, p2, self.ctx.recv_event) > \
self.ctime(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \
self.ctx.etime(self.proxy, p, self.ctx.send_event) > \
self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \
self.ctx.packet.body(p) == self.cresp(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \
self.ctx.packet.orig_body(p) == self.corigbody(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \
self.ctx.packet.dest(p) == self.ctx.packet.src(p2), \
self.ctx.dest_port(p) == self.ctx.src_port(p2), \
self.ctx.src_port(p) == self.ctx.dest_port(p2), \
self.ctx.packet.options(p) == 0, \
self.ctx.packet.origin(p) == self.corigin(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)))
request_constraints = [z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.dest(p2))), \
self.ctx.packet.origin(p2) == self.ctx.packet.origin(p),
self.ctx.packet.dest(p2) == self.ctx.packet.dest(p), \
self.ctx.packet.body(p2) == self.ctx.packet.body(p), \
self.ctx.packet.orig_body(p2) == self.ctx.packet.orig_body(p), \
self.ctx.packet.options(p) == 0, \
self.ctx.packet.seq(p2) == self.ctx.packet.seq(p), \
self.ctx.hostHasAddr(self.ctx.packet.origin(p2), self.ctx.packet.src(p2)), \
self.ctx.dest_port(p2) == self.ctx.dest_port(p), \
self.ctx.etime(self.proxy, p, self.ctx.send_event) > \
self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \
self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p))]
if len(self.acls) != 0:
acl_constraint = map(lambda (s, d): \
z3.Not(z3.And(self.ctx.packet.src(p2) == s, \
self.ctx.packet.dest(p2) == d)), self.acls)
request_constraints.extend(acl_constraint)
self.constraints.append(z3.ForAll([eh, p], z3.Implies(self.ctx.send(self.proxy, eh, p), \
z3.Or(\
z3.Exists([p2, eh2], \
z3.And(self.ctx.recv(eh2, self.proxy, p2), \
z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p2))),\
z3.And(request_constraints))), \
z3.Exists([p2, eh2], \
z3.And(self.ctx.recv(eh2, self.proxy, p2), \
z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p2))),\
cached_packet))))))
cache_conditions = \
z3.ForAll([a, i], \
z3.Implies(self.cached(a, i), \
z3.And(\
z3.Not(self.ctx.hostHasAddr (self.proxy, a)), \
z3.Exists([e1, e2, e3, p, p2, p3], \
z3.And(\
self.ctx.recv(e1, self.proxy, p2), \
self.ctx.packet.dest(p2) == a, \
self.ctx.packet.body(p2) == i, \
self.ctx.packet.body(p) == i, \
self.ctx.packet.dest(p) == a, \
self.ctx.dest_port(p) == self.ctx.dest_port(p2), \
self.creqpacket(a, i) == p2, \
self.creqopacket(a, i) == p, \
self.ctime(a, i) > self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \
self.ctx.send(self.proxy, e2, p), \
self.ctime(a, i) > self.ctx.etime(self.proxy, p, self.ctx.send_event), \
self.ctx.recv(e3, self.proxy, p3), \
self.crespacket(a, i) == p3, \
self.ctx.src_port(p3) == self.ctx.dest_port(p), \
self.ctx.dest_port(p3) == self.ctx.src_port(p), \
self.ctx.packet.src(p3) == self.ctx.packet.dest(p), \
self.ctx.packet.dest(p3) == self.ctx.packet.src(p), \
z3.Exists([e5, e6], \
z3.And(
self.ctx.hostHasAddr (e5, a), \
self.ctx.recv(e6, e5, p), \
z3.ForAll([e4], \
z3.Or(self.ctx.etime(e4, p3, self.ctx.send_event) == 0, \
self.ctx.etime(e4, p3, self.ctx.send_event) > self.ctx.etime(e5, p, self.ctx.recv_event))))), \
self.cresp(a, i) == self.ctx.packet.body(p3), \
self.corigbody(a, i) == self.ctx.packet.orig_body(p3), \
self.corigin(a, i) == self.ctx.packet.origin(p3), \
self.ctime(a, i) == self.ctx.etime(self.proxy, p3, self.ctx.recv_event), \
*request_constraints)))))
self.constraints.append(cache_conditions)
def _webProxyFunctions (self):
self.cached = z3.Function('__webproxy_cached_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.BoolSort())
self.ctime = z3.Function('__webproxy_ctime_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.IntSort())
self.cresp = z3.Function('__webproxy_cresp_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.IntSort())
self.corigbody = z3.Function('__webproxy_corigbody_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.IntSort())
self.corigin = z3.Function('__webproxy_corigin_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.node)
self.crespacket = z3.Function('__webproxy_crespacket_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet)
self.creqpacket = z3.Function('__webproxy_creqpacket_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet)
self.creqopacket = z3.Function('__webproxy_creqopacket_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet)
#self.corigbody = z3.Function('__webproxy_corigbody_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet)
a = z3.Const('__webproxyfunc_cache_addr_%s'%(self.proxy), self.ctx.address)
i = z3.Const('__webproxyfunc_cache_body_%s'%(self.proxy), z3.IntSort())
# Model cache as a function
# If not cached, cache time is 0
self.constraints.append(z3.ForAll([a, i], z3.Not(self.cached(a, i)) == (self.ctime(a, i) == 0)))
self.constraints.append(z3.ForAll([a, i], z3.Not(self.cached(a, i)) == (self.cresp(a, i) == 0)))
|
apanda/modeling
|
mcnet/components/erroneous_aclfull_proxy.py
|
Python
|
bsd-3-clause
| 10,082 | 0.019242 |
import sys
import random, string
import os
numberOfEmailsToGenerate = sys.argv[1]
try:
int(numberOfEmailsToGenerate)
print('Generating a CSV with ' + numberOfEmailsToGenerate + ' random emails')
print('This make take some time if the CSV is large ...')
except:
sys.exit('Please pass a number as the first arg')
numberOfEmailsToGenerate = int(numberOfEmailsToGenerate)
# Delete ./generated.csv, then create it
os.system('touch ./generated.csv')
for x in range(0, numberOfEmailsToGenerate):
randomString = ''.join(random.choice(string.lowercase) for i in range(20))
os.system('echo ' + randomString + '@email.com' ' >> ./generated.csv')
|
karuppiah7890/Mail-for-Good
|
utility/generateEmailCsv.py
|
Python
|
bsd-3-clause
| 664 | 0.004518 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Networking tools for network modules only
import re
import ast
import operator
import socket
import json
from itertools import chain
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils import basic
from ansible.module_utils.parsing.convert_bool import boolean
# Backwards compatibility for 3rd party modules
# TODO(pabelanger): With move to ansible.netcommon, we should clean this code
# up and have modules import directly themself.
from ansible.module_utils.common.network import ( # noqa: F401
to_bits, is_netmask, is_masklen, to_netmask, to_masklen, to_subnet, to_ipv6_network, VALID_MASKS
)
try:
from jinja2 import Environment, StrictUndefined
from jinja2.exceptions import UndefinedError
HAS_JINJA2 = True
except ImportError:
HAS_JINJA2 = False
OPERATORS = frozenset(['ge', 'gt', 'eq', 'neq', 'lt', 'le'])
ALIASES = frozenset([('min', 'ge'), ('max', 'le'), ('exactly', 'eq'), ('neq', 'ne')])
def to_list(val):
if isinstance(val, (list, tuple, set)):
return list(val)
elif val is not None:
return [val]
else:
return list()
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = to_text(item).split('\n')
yield item
def transform_commands(module):
transform = ComplexList(dict(
command=dict(key=True),
output=dict(),
prompt=dict(type='list'),
answer=dict(type='list'),
newline=dict(type='bool', default=True),
sendonly=dict(type='bool', default=False),
check_all=dict(type='bool', default=False),
), module)
return transform(module.params['commands'])
def sort_list(val):
if isinstance(val, list):
return sorted(val)
return val
class Entity(object):
"""Transforms a dict to with an argument spec
This class will take a dict and apply an Ansible argument spec to the
values. The resulting dict will contain all of the keys in the param
with appropriate values set.
Example::
argument_spec = dict(
command=dict(key=True),
display=dict(default='text', choices=['text', 'json']),
validate=dict(type='bool')
)
transform = Entity(module, argument_spec)
value = dict(command='foo')
result = transform(value)
print result
{'command': 'foo', 'display': 'text', 'validate': None}
Supported argument spec:
* key - specifies how to map a single value to a dict
* read_from - read and apply the argument_spec from the module
* required - a value is required
* type - type of value (uses AnsibleModule type checker)
* fallback - implements fallback function
* choices - set of valid options
* default - default value
"""
def __init__(self, module, attrs=None, args=None, keys=None, from_argspec=False):
args = [] if args is None else args
self._attributes = attrs or {}
self._module = module
for arg in args:
self._attributes[arg] = dict()
if from_argspec:
self._attributes[arg]['read_from'] = arg
if keys and arg in keys:
self._attributes[arg]['key'] = True
self.attr_names = frozenset(self._attributes.keys())
_has_key = False
for name, attr in iteritems(self._attributes):
if attr.get('read_from'):
if attr['read_from'] not in self._module.argument_spec:
module.fail_json(msg='argument %s does not exist' % attr['read_from'])
spec = self._module.argument_spec.get(attr['read_from'])
for key, value in iteritems(spec):
if key not in attr:
attr[key] = value
if attr.get('key'):
if _has_key:
module.fail_json(msg='only one key value can be specified')
_has_key = True
attr['required'] = True
def serialize(self):
return self._attributes
def to_dict(self, value):
obj = {}
for name, attr in iteritems(self._attributes):
if attr.get('key'):
obj[name] = value
else:
obj[name] = attr.get('default')
return obj
def __call__(self, value, strict=True):
if not isinstance(value, dict):
value = self.to_dict(value)
if strict:
unknown = set(value).difference(self.attr_names)
if unknown:
self._module.fail_json(msg='invalid keys: %s' % ','.join(unknown))
for name, attr in iteritems(self._attributes):
if value.get(name) is None:
value[name] = attr.get('default')
if attr.get('fallback') and not value.get(name):
fallback = attr.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
value[name] = fallback_strategy(*fallback_args, **fallback_kwargs)
except basic.AnsibleFallbackNotFound:
continue
if attr.get('required') and value.get(name) is None:
self._module.fail_json(msg='missing required attribute %s' % name)
if 'choices' in attr:
if value[name] not in attr['choices']:
self._module.fail_json(msg='%s must be one of %s, got %s' % (name, ', '.join(attr['choices']), value[name]))
if value[name] is not None:
value_type = attr.get('type', 'str')
type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(value[name])
elif value.get(name):
value[name] = self._module.params[name]
return value
class EntityCollection(Entity):
"""Extends ```Entity``` to handle a list of dicts """
def __call__(self, iterable, strict=True):
if iterable is None:
iterable = [super(EntityCollection, self).__call__(self._module.params, strict)]
if not isinstance(iterable, (list, tuple)):
self._module.fail_json(msg='value must be an iterable')
return [(super(EntityCollection, self).__call__(i, strict)) for i in iterable]
# these two are for backwards compatibility and can be removed once all of the
# modules that use them are updated
class ComplexDict(Entity):
def __init__(self, attrs, module, *args, **kwargs):
super(ComplexDict, self).__init__(module, attrs, *args, **kwargs)
class ComplexList(EntityCollection):
def __init__(self, attrs, module, *args, **kwargs):
super(ComplexList, self).__init__(module, attrs, *args, **kwargs)
def dict_diff(base, comparable):
""" Generate a dict object of differences
This function will compare two dict objects and return the difference
between them as a dict object. For scalar values, the key will reflect
the updated value. If the key does not exist in `comparable`, then then no
key will be returned. For lists, the value in comparable will wholly replace
the value in base for the key. For dicts, the returned value will only
return keys that are different.
:param base: dict object to base the diff on
:param comparable: dict object to compare against base
:returns: new dict object with differences
"""
if not isinstance(base, dict):
raise AssertionError("`base` must be of type <dict>")
if not isinstance(comparable, dict):
if comparable is None:
comparable = dict()
else:
raise AssertionError("`comparable` must be of type <dict>")
updates = dict()
for key, value in iteritems(base):
if isinstance(value, dict):
item = comparable.get(key)
if item is not None:
sub_diff = dict_diff(value, comparable[key])
if sub_diff:
updates[key] = sub_diff
else:
comparable_value = comparable.get(key)
if comparable_value is not None:
if sort_list(base[key]) != sort_list(comparable_value):
updates[key] = comparable_value
for key in set(comparable.keys()).difference(base.keys()):
updates[key] = comparable.get(key)
return updates
def dict_merge(base, other):
""" Return a new dict object that combines base and other
This will create a new dict object that is a combination of the key/value
pairs from base and other. When both keys exist, the value will be
selected from other. If the value is a list object, the two lists will
be combined and duplicate entries removed.
:param base: dict object to serve as base
:param other: dict object to combine with base
:returns: new combined dict object
"""
if not isinstance(base, dict):
raise AssertionError("`base` must be of type <dict>")
if not isinstance(other, dict):
raise AssertionError("`other` must be of type <dict>")
combined = dict()
for key, value in iteritems(base):
if isinstance(value, dict):
if key in other:
item = other.get(key)
if item is not None:
if isinstance(other[key], Mapping):
combined[key] = dict_merge(value, other[key])
else:
combined[key] = other[key]
else:
combined[key] = item
else:
combined[key] = value
elif isinstance(value, list):
if key in other:
item = other.get(key)
if item is not None:
try:
combined[key] = list(set(chain(value, item)))
except TypeError:
value.extend([i for i in item if i not in value])
combined[key] = value
else:
combined[key] = item
else:
combined[key] = value
else:
if key in other:
other_value = other.get(key)
if other_value is not None:
if sort_list(base[key]) != sort_list(other_value):
combined[key] = other_value
else:
combined[key] = value
else:
combined[key] = other_value
else:
combined[key] = value
for key in set(other.keys()).difference(base.keys()):
combined[key] = other.get(key)
return combined
def param_list_to_dict(param_list, unique_key="name", remove_key=True):
"""Rotates a list of dictionaries to be a dictionary of dictionaries.
:param param_list: The aforementioned list of dictionaries
:param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value
behind this key will be the key each dictionary can be found at in the new root dictionary
:param remove_key: If True, remove unique_key from the individual dictionaries before returning.
"""
param_dict = {}
for params in param_list:
params = params.copy()
if remove_key:
name = params.pop(unique_key)
else:
name = params.get(unique_key)
param_dict[name] = params
return param_dict
def conditional(expr, val, cast=None):
match = re.match(r'^(.+)\((.+)\)$', str(expr), re.I)
if match:
op, arg = match.groups()
else:
op = 'eq'
if ' ' in str(expr):
raise AssertionError('invalid expression: cannot contain spaces')
arg = expr
if cast is None and val is not None:
arg = type(val)(arg)
elif callable(cast):
arg = cast(arg)
val = cast(val)
op = next((oper for alias, oper in ALIASES if op == alias), op)
if not hasattr(operator, op) and op not in OPERATORS:
raise ValueError('unknown operator: %s' % op)
func = getattr(operator, op)
return func(val, arg)
def ternary(value, true_val, false_val):
''' value ? true_val : false_val '''
if value:
return true_val
else:
return false_val
def remove_default_spec(spec):
for item in spec:
if 'default' in spec[item]:
del spec[item]['default']
def validate_ip_address(address):
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
def validate_ip_v6_address(address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return False
return True
def validate_prefix(prefix):
if prefix and not 0 <= int(prefix) <= 32:
return False
return True
def load_provider(spec, args):
provider = args.get('provider') or {}
for key, value in iteritems(spec):
if key not in provider:
if 'fallback' in value:
provider[key] = _fallback(value['fallback'])
elif 'default' in value:
provider[key] = value['default']
else:
provider[key] = None
if 'authorize' in provider:
# Coerce authorize to provider if a string has somehow snuck in.
provider['authorize'] = boolean(provider['authorize'] or False)
args['provider'] = provider
return provider
def _fallback(fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except basic.AnsibleFallbackNotFound:
pass
def generate_dict(spec):
"""
Generate dictionary which is in sync with argspec
:param spec: A dictionary that is the argspec of the module
:rtype: A dictionary
:returns: A dictionary in sync with argspec with default value
"""
obj = {}
if not spec:
return obj
for key, val in iteritems(spec):
if 'default' in val:
dct = {key: val['default']}
elif 'type' in val and val['type'] == 'dict':
dct = {key: generate_dict(val['options'])}
else:
dct = {key: None}
obj.update(dct)
return obj
def parse_conf_arg(cfg, arg):
"""
Parse config based on argument
:param cfg: A text string which is a line of configuration.
:param arg: A text string which is to be matched.
:rtype: A text string
:returns: A text string if match is found
"""
match = re.search(r'%s (.+)(\n|$)' % arg, cfg, re.M)
if match:
result = match.group(1).strip()
else:
result = None
return result
def parse_conf_cmd_arg(cfg, cmd, res1, res2=None, delete_str='no'):
"""
Parse config based on command
:param cfg: A text string which is a line of configuration.
:param cmd: A text string which is the command to be matched
:param res1: A text string to be returned if the command is present
:param res2: A text string to be returned if the negate command
is present
:param delete_str: A text string to identify the start of the
negate command
:rtype: A text string
:returns: A text string if match is found
"""
match = re.search(r'\n\s+%s(\n|$)' % cmd, cfg)
if match:
return res1
if res2 is not None:
match = re.search(r'\n\s+%s %s(\n|$)' % (delete_str, cmd), cfg)
if match:
return res2
return None
def get_xml_conf_arg(cfg, path, data='text'):
"""
:param cfg: The top level configuration lxml Element tree object
:param path: The relative xpath w.r.t to top level element (cfg)
to be searched in the xml hierarchy
:param data: The type of data to be returned for the matched xml node.
Valid values are text, tag, attrib, with default as text.
:return: Returns the required type for the matched xml node or else None
"""
match = cfg.xpath(path)
if len(match):
if data == 'tag':
result = getattr(match[0], 'tag')
elif data == 'attrib':
result = getattr(match[0], 'attrib')
else:
result = getattr(match[0], 'text')
else:
result = None
return result
def remove_empties(cfg_dict):
"""
Generate final config dictionary
:param cfg_dict: A dictionary parsed in the facts system
:rtype: A dictionary
:returns: A dictionary by eliminating keys that have null values
"""
final_cfg = {}
if not cfg_dict:
return final_cfg
for key, val in iteritems(cfg_dict):
dct = None
if isinstance(val, dict):
child_val = remove_empties(val)
if child_val:
dct = {key: child_val}
elif (isinstance(val, list) and val
and all([isinstance(x, dict) for x in val])):
child_val = [remove_empties(x) for x in val]
if child_val:
dct = {key: child_val}
elif val not in [None, [], {}, (), '']:
dct = {key: val}
if dct:
final_cfg.update(dct)
return final_cfg
def validate_config(spec, data):
"""
Validate if the input data against the AnsibleModule spec format
:param spec: Ansible argument spec
:param data: Data to be validated
:return:
"""
params = basic._ANSIBLE_ARGS
basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': data}))
validated_data = basic.AnsibleModule(spec).params
basic._ANSIBLE_ARGS = params
return validated_data
def search_obj_in_list(name, lst, key='name'):
for item in lst:
if item.get(key) == name:
return item
return None
class Template:
def __init__(self):
if not HAS_JINJA2:
raise ImportError("jinja2 is required but does not appear to be installed. "
"It can be installed using `pip install jinja2`")
self.env = Environment(undefined=StrictUndefined)
self.env.filters.update({'ternary': ternary})
def __call__(self, value, variables=None, fail_on_undefined=True):
variables = variables or {}
if not self.contains_vars(value):
return value
try:
value = self.env.from_string(value).render(variables)
except UndefinedError:
if not fail_on_undefined:
return None
raise
if value:
try:
return ast.literal_eval(value)
except Exception:
return str(value)
else:
return None
def contains_vars(self, data):
if isinstance(data, string_types):
for marker in (self.env.block_start_string, self.env.variable_start_string, self.env.comment_start_string):
if marker in data:
return True
return False
|
kustodian/ansible
|
lib/ansible/module_utils/network/common/utils.py
|
Python
|
gpl-3.0
| 21,313 | 0.001314 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_storage_class import V1beta1StorageClass
class TestV1beta1StorageClass(unittest.TestCase):
""" V1beta1StorageClass unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1StorageClass(self):
"""
Test V1beta1StorageClass
"""
model = kubernetes.client.models.v1beta1_storage_class.V1beta1StorageClass()
if __name__ == '__main__':
unittest.main()
|
djkonro/client-python
|
kubernetes/test/test_v1beta1_storage_class.py
|
Python
|
apache-2.0
| 891 | 0.003367 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..histogrammatching import HistogramMatching
def test_HistogramMatching_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='%s',
position=-3,
),
numberOfHistogramLevels=dict(argstr='--numberOfHistogramLevels %d',
),
numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d',
),
outputVolume=dict(argstr='%s',
hash_files=False,
position=-1,
),
referenceVolume=dict(argstr='%s',
position=-2,
),
terminal_output=dict(nohash=True,
),
threshold=dict(argstr='--threshold ',
),
)
inputs = HistogramMatching.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_HistogramMatching_outputs():
output_map = dict(outputVolume=dict(position=-1,
),
)
outputs = HistogramMatching.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
FCP-INDI/nipype
|
nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py
|
Python
|
bsd-3-clause
| 1,372 | 0.019679 |
from ..errors import ErrorFolderNotFound, ErrorInvalidOperation, ErrorNoPublicFolderReplicaAvailable
from ..util import MNS, create_element
from .common import EWSAccountService, folder_ids_element, parse_folder_elem, shape_element
class GetFolder(EWSAccountService):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getfolder-operation"""
SERVICE_NAME = "GetFolder"
element_container_name = f"{{{MNS}}}Folders"
ERRORS_TO_CATCH_IN_RESPONSE = EWSAccountService.ERRORS_TO_CATCH_IN_RESPONSE + (
ErrorFolderNotFound,
ErrorNoPublicFolderReplicaAvailable,
ErrorInvalidOperation,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.folders = [] # A hack to communicate parsing args to _elems_to_objs()
def call(self, folders, additional_fields, shape):
"""Take a folder ID and returns the full information for that folder.
:param folders: a list of Folder objects
:param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects
:param shape: The set of attributes to return
:return: XML elements for the folders, in stable order
"""
# We can't easily find the correct folder class from the returned XML. Instead, return objects with the same
# class as the folder instance it was requested with.
self.folders = list(folders) # Convert to a list, in case 'folders' is a generator. We're iterating twice.
return self._elems_to_objs(
self._chunked_get_elements(
self.get_payload,
items=self.folders,
additional_fields=additional_fields,
shape=shape,
)
)
def _elems_to_objs(self, elems):
for folder, elem in zip(self.folders, elems):
if isinstance(elem, Exception):
yield elem
continue
yield parse_folder_elem(elem=elem, folder=folder, account=self.account)
def get_payload(self, folders, additional_fields, shape):
payload = create_element(f"m:{self.SERVICE_NAME}")
payload.append(
shape_element(
tag="m:FolderShape", shape=shape, additional_fields=additional_fields, version=self.account.version
)
)
payload.append(folder_ids_element(folders=folders, version=self.account.version))
return payload
|
ecederstrand/exchangelib
|
exchangelib/services/get_folder.py
|
Python
|
bsd-2-clause
| 2,503 | 0.004395 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Bar(Model):
"""
The URIs that are used to perform a retrieval of a public blob, queue or
table object.
:param recursive_point: Recursive Endpoints
:type recursive_point: :class:`Endpoints
<fixtures.acceptancetestsstoragemanagementclient.models.Endpoints>`
"""
_attribute_map = {
'recursive_point': {'key': 'RecursivePoint', 'type': 'Endpoints'},
}
def __init__(self, recursive_point=None, **kwargs):
self.recursive_point = recursive_point
|
jkonecki/autorest
|
AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/models/bar.py
|
Python
|
mit
| 1,021 | 0.000979 |
#
# gPrime - a web-based genealogy program
#
# Copyright (c) 2015 Gramps Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from .forms import Form
class NameForm(Form):
"""
A form for listing, viewing, and editing user settings.
"""
table = "Person"
def __init__(self, handler, instance, handle, row):
super().__init__(handler, instance)
self.tview = self._("Name")
self.view = "Name"
self.row = row
self.handle = handle
if int(row) == 1:
self.path = "primary_name"
else:
self.path = "alternate_name.%s" % (int(self.row) - 2)
self.edit_fields = []
if int(row) == 1:
for field in [
'primary_name.type',
'primary_name.first_name',
'primary_name.call',
'primary_name.nick',
'primary_name.famnick',
'primary_name.private',
'primary_name.date',
'primary_name.suffix',
'primary_name.title',
'primary_name.group_as',
'primary_name.sort_as',
'primary_name.display_as',
]:
self.edit_fields.append(field)
else:
for field in [
'alternate_names.%s.type',
'alternate_names.%s.first_name',
'alternate_names.%s.call',
'alternate_names.%s.nick',
'alternate_names.%s.famnick',
'alternate_names.%s.private',
'alternate_names.%s.date',
'alternate_names.%s.suffix',
'alternate_names.%s.title',
'alternate_names.%s.group_as',
'alternate_names.%s.sort_as',
'alternate_names.%s.display_as',
]:
self.edit_fields.append(field % (int(self.row) - 2))
|
sam-m888/gprime
|
gprime/app/forms/nameform.py
|
Python
|
gpl-2.0
| 2,701 | 0.00074 |
import calendar
import os
import subprocess
import tempfile
import shutil
import stat
import datetime
import time
import csv
from collections import defaultdict
from io import StringIO
import pytz
from pyquery import PyQuery as pq
from django import http
from django.core.cache import cache
from django.utils.timezone import utc
from django.shortcuts import render, get_object_or_404, redirect
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.conf import settings
from django.db import transaction
from django.contrib import messages
from funfactory.urlresolvers import reverse
from sorl.thumbnail import get_thumbnail
from . import forms
from .models import Visitor, Location, VisitorCount
from .utils import json_view, non_mortals_required
from peekaboo.base.utils import ajax_login_required
def robots_txt(request):
return http.HttpResponse(
'User-agent: *\n'
'%s: /' % ('Allow' if settings.ENGAGE_ROBOTS else 'Disallow'),
mimetype='text/plain',
)
@login_required
def home(request):
context = {
'count_users': User.objects.all().count(),
'count_locations': Location.objects.all().count(),
}
return render(request, 'main/home.html', context)
@non_mortals_required
def log_start(request):
data = {}
return render(request, 'main/log-start.html', data)
@non_mortals_required
def log(request, location):
location = get_object_or_404(Location, slug=location)
data = {
'edit_form': forms.SignInForm(),
'location': location,
}
request.session['default-location'] = location.slug
return render(request, 'main/log.html', data)
@json_view
@non_mortals_required
@ajax_login_required
def log_entries(request, location):
data = {
'latest': None,
'created': [],
'modified': []
}
location = get_object_or_404(Location, slug=location)
thumbnail_geometry = request.GET.get('thumbnail_geometry', '100')
def format_date(dt):
dt_date = dt.strftime('%m/%d/%Y')
dt_time = dt.strftime('%H:%M')
dt_tz = dt.tzname() or 'UTC'
return ' '.join([dt_date, dt_time, dt_tz])
qs = Visitor.objects.filter(location=location)
if request.GET.get('latest'):
latest = datetime.datetime.utcfromtimestamp(
float(request.GET['latest'])
)
latest = latest.replace(tzinfo=utc)
# because latest is potentially lacking in microseconds
# add some to prevent fetching it again
latest += datetime.timedelta(seconds=1)
recently_created = qs.filter(created__gte=latest)
else:
latest = None
recently_created = qs
def make_row(visitor):
row = {
'id': visitor.pk,
'created': format_date(visitor.created),
'created_iso': visitor.created.isoformat(),
'modified_iso': visitor.modified.isoformat(),
'job_title': visitor.job_title,
'name': visitor.get_name(formal=True),
'thumbnail': None,
'visiting': visitor.visiting,
'company': visitor.company,
}
if visitor.picture and os.path.isfile(visitor.picture.path):
thumbnail = get_thumbnail(
visitor.picture,
thumbnail_geometry
)
row['thumbnail'] = {
'url': thumbnail.url,
'width': thumbnail.width,
'height': thumbnail.height,
}
row['picture_url'] = (
reverse('main:log_entry_picture', args=(visitor.pk,)) +
'?width=600&height=400'
)
return row
first = None
for visitor in recently_created.order_by('-created')[:100]:
row = make_row(visitor)
data['created'].append(row)
if first is None:
first = max(visitor.created, visitor.modified)
data['created'].reverse()
# now how about those recently updated
if latest:
recently_modified = qs.filter(
created__lt=latest,
modified__gt=latest
)
for visitor in recently_modified.order_by('modified'):
row = make_row(visitor)
assert row not in data['created']
data['modified'].append(row)
first = visitor.modified
if first:
data['latest'] = calendar.timegm(first.utctimetuple())
# from time import sleep
# sleep(1)
# from pprint import pprint
# pprint(data)
return data
@json_view
@csrf_exempt
@non_mortals_required
def log_entry(request, pk):
visitor = get_object_or_404(Visitor, pk=pk)
thumbnail_geometry = request.GET.get('thumbnail_geometry', '100')
if request.method == 'POST':
form = forms.SignInEditForm(request.POST, instance=visitor)
if form.is_valid():
form.save()
data = form.cleaned_data
data['name'] = visitor.get_name(formal=True)
else:
return {'errors': form.errors}
else:
data = {
'first_name': visitor.first_name,
'last_name': visitor.last_name,
'job_title': visitor.job_title,
'company': visitor.company,
'visiting': visitor.visiting,
'thumbnail_url': None,
}
if visitor.picture:
thumbnail = get_thumbnail(
visitor.picture,
thumbnail_geometry
)
data['thumbnail'] = {
'url': thumbnail.url,
'width': thumbnail.width,
'height': thumbnail.height,
}
return data
@json_view
@csrf_exempt
@non_mortals_required
def log_entry_picture(request, pk, format):
visitor = get_object_or_404(Visitor, pk=pk)
if not visitor.picture:
return http.HttpResponseBadRequest("Doesn't have a picture")
geometry = (
'%sx%s' %
(request.GET.get('width', 600),
request.GET.get('width', 500))
)
picture = get_thumbnail(
visitor.picture,
geometry
)
return http.HttpResponse(picture.read(), mimetype='image/jpeg')
@login_required
@json_view
@require_POST
@non_mortals_required
def delete_entry(request, pk):
visitor = get_object_or_404(Visitor, pk=pk)
visitor.delete()
# XXX delete all images too??
return {'deleted': True}
@non_mortals_required
def print_entry(request, pk):
visitor = get_object_or_404(Visitor, pk=pk)
data = {
'visitor': visitor,
'print': request.GET.get('print', False)
}
response = render(request, 'main/print-entry.html', data)
if request.GET.get('iframe'):
response['X-Frame-Options'] = 'SAMEORIGIN'
response["Access-Control-Allow-Origin"] = "*"
return response
@non_mortals_required
def print_entry_pdf(request, pk):
visitor = get_object_or_404(Visitor, pk=pk)
data = {
'visitor': visitor,
}
response = render(request, 'main/print-entry.pdf.html', data)
html = response.content
tmp_dir = os.path.join(
tempfile.gettempdir(),
'peekaboopdfs'
)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
input_file = os.path.join(tmp_dir, 'visitor-%s.html' % visitor.pk)
output_file = os.path.join(tmp_dir, 'visitor-%s.debug.pdf' % visitor.pk)
if os.path.isfile(output_file):
os.remove(output_file)
dom = pq(html)
copied_media_files = []
for img in dom('img'):
src = img.attrib['src']
if settings.STATIC_URL in src:
source = os.path.join(
settings.STATIC_ROOT,
src.replace(settings.STATIC_URL, '')
)
else:
source = os.path.join(
settings.MEDIA_ROOT,
src.replace(settings.MEDIA_URL, '')
)
if not os.path.isfile(source):
raise IOError("Couldn't find %s (Tried: %s)" % (
img.attrib['src'],
source
))
filename = os.path.basename(source)
destination = os.path.join(
tmp_dir, filename
)
if os.path.isfile(destination):
age = time.time() - os.stat(destination)[stat.ST_MTIME]
if settings.DEBUG or age > 60 * 60:
shutil.copyfile(source, destination)
else:
shutil.copyfile(source, destination)
if settings.STATIC_URL not in src:
copied_media_files.append(destination)
html = html.replace(img.attrib['src'], filename)
with open(input_file, 'w') as f:
f.write(html)
_here = os.path.dirname(__file__)
rasterize_full_path = os.path.join(
_here,
'rasterize.js'
)
pdf_program = getattr(
settings,
'PDF_PROGRAM',
'phantomjs --debug=true %s' % rasterize_full_path
)
if 'rasterize.js' in pdf_program:
cmd = (
pdf_program +
' "%(input_file)s"'
' "%(output_file)s"'
' "10.2cm*5.7cm"'
)
else:
raise NotImplementedError(pdf_program)
cmd = cmd % {
'input_file': input_file,
'output_file': output_file,
'orientation': 'landscape',
}
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
if settings.DEBUG_PDF_PROGRAM:
stderr_output_file = output_file + '.stderr.log'
with open(stderr_output_file, 'w') as f:
f.write('COMMAND:\n')
f.write(cmd)
f.write('\n\n')
f.write(err)
stdout_output_file = output_file + '.stdout.log'
with open(stdout_output_file, 'w') as f:
f.write('COMMAND:\n')
f.write(cmd)
f.write('\n\n')
f.write(err)
print "For your debugging pleasures, created..."
print input_file
print output_file
print stdout_output_file
print stderr_output_file
print
if os.path.isfile(output_file):
# response['Content-Disposition'] = (
# 'filename="%s.pdf"' % os.path.basename(output_file)
# )
response = http.HttpResponse(mimetype='application/pdf')
# so we can print from an iframe
response['X-Frame-Options'] = 'SAMEORIGIN'
response.write(open(output_file).read())
if not settings.DEBUG_PDF_PROGRAM:
os.remove(input_file)
os.remove(output_file)
for media_file in copied_media_files:
os.remove(media_file)
return response
return http.HttpResponse("PDF could not be created")
@non_mortals_required
def stats_start(request):
data = {}
return render(request, 'main/stats-start.html', data)
@non_mortals_required
def stats(request, location=None):
if location == 'ALL':
location = None
if location:
location = get_object_or_404(Location, slug=location)
request.session['default-location'] = location.slug
_months = defaultdict(int)
visitors = VisitorCount.objects.all()
active_visitors = Visitor.objects.all()
if location:
visitors = visitors.filter(location=location)
active_visitors = active_visitors.filter(location=location)
_rows = defaultdict(list)
for v in active_visitors.order_by('created'):
_row_key = v.created.strftime('%Y-%m-%d')
before = _rows.get(_row_key, {'count': 0})
_rows[_row_key] = {
'year': v.created.year,
'month': v.created.month,
'day': v.created.day,
'date': v.created,
'count': 1 + before['count']
}
for vc in visitors.order_by('year', 'month', 'day'):
date = datetime.date(vc.year, vc.month, vc.day)
count = vc.count
_row_key = date.strftime('%Y-%m-%d')
before = _rows.get(_row_key, {'count': 0})
count = before['count'] + vc.count
_rows[_row_key] = {
'year': vc.year,
'month': vc.month,
'day': vc.day,
'date': date,
'count': count,
}
_month_key = date.strftime('%Y-%m')
_months[_month_key] += count
for v in active_visitors.order_by('created'):
_month_key = v.created.strftime('%Y-%m')
_months[_month_key] += 1
months = []
for key in sorted(_months.keys()):
y, m = [int(x) for x in key.split('-')]
date = datetime.date(y, m, 1)
months.append({
'year': date.year,
'month': date.month,
'date': date,
'count': _months[key],
})
rows = []
for _row_key in sorted(_rows):
rows.append(_rows[_row_key])
context = {
'location': location,
'days': int(settings.RECYCLE_MINIMUM_HOURS / 24.0),
'rows': rows,
'months': months,
}
return render(request, 'main/stats.html', context)
def debugger(request):
r = http.HttpResponse()
r.write('absolute_uri: %s\n' % request.build_absolute_uri())
r.write('DEBUG: %s\n\n' % settings.DEBUG)
if request.is_secure():
r.write('request.is_secure()\n')
r.write(
'Expect SITE_URL to contain HTTPS: %s\n' % (
settings.SITE_URL,
)
)
r.write(
'Expect SESSION_COOKIE_SECURE to be True: %s\n' % (
settings.SESSION_COOKIE_SECURE,
)
)
else:
r.write('NOT request.is_secure()\n')
r.write(
'Expect SITE_URL to contain HTTP: %s\n' % (
settings.SITE_URL,
)
)
r.write(
'Expect SESSION_COOKIE_SECURE to be False: %s\n' % (
settings.SESSION_COOKIE_SECURE,
)
)
if cache.get('foo'):
r.write('\nCache seems to work!\n')
else:
r.write('\nReload to see if cache works\n')
cache.set('foo', 'bar', 60)
r['content-type'] = 'text/plain'
return r
@transaction.commit_on_success
@non_mortals_required
def csv_upload(request):
context = {}
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
if request.method == 'POST':
form = forms.CSVUploadForm(request.POST, request.FILES)
if form.is_valid():
created = 0
location = form.cleaned_data['location']
tz = pytz.timezone(location.timezone)
if form.cleaned_data['format'] == 'eventbrite':
stream = StringIO(
unicode(form.cleaned_data['file'].read(), 'utf-8'),
newline='\r'
)
reader = unicode_csv_reader(stream)
first = True
for i, row in enumerate(reader):
if first:
first = False
continue
visitor = Visitor(
location=location,
first_name=row[0], # Name
job_title=row[2], # Title
)
if form.cleaned_data['date']:
date = form.cleaned_data['date']
date = date.replace(tzinfo=None)
date = tz.localize(date)
# Stagger the entries by 1 second each
# so they are loaded in the order they appeared
# in the CSV.
visitor.created = date + datetime.timedelta(seconds=i)
visitor.save()
created += 1
else:
raise NotImplementedError(form.cleaned_data['format'])
messages.success(
request,
'Created %d records from the CSV upload' % (created,)
)
return redirect('main:home')
else:
initial = {
'format': 'eventbrite', # will change once there are more choices
}
if request.session.get('default-location'):
try:
initial['location'] = Location.objects.get(
slug=request.session['default-location']
).id
except Location.DoesNotExist:
pass
form = forms.CSVUploadForm(initial=initial)
context['form'] = form
return render(request, 'main/csv_upload.html', context)
|
mozilla/peekaboo
|
peekaboo/main/views.py
|
Python
|
mpl-2.0
| 17,096 | 0 |
"""
Progress Tab Serializers
"""
from rest_framework import serializers
from rest_framework.reverse import reverse
class GradedTotalSerializer(serializers.Serializer):
earned = serializers.FloatField()
possible = serializers.FloatField()
class SubsectionSerializer(serializers.Serializer):
display_name = serializers.CharField()
due = serializers.DateTimeField()
format = serializers.CharField()
graded = serializers.BooleanField()
graded_total = GradedTotalSerializer()
# TODO: override serializer
percent_graded = serializers.FloatField()
problem_scores = serializers.SerializerMethodField()
show_correctness = serializers.CharField()
show_grades = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
def get_url(self, subsection):
relative_path = reverse('jump_to', args=[self.context['course_key'], subsection.location])
request = self.context['request']
return request.build_absolute_uri(relative_path)
def get_problem_scores(self, subsection):
problem_scores = [
{
'earned': score.earned,
'possible': score.possible,
}
for score in subsection.problem_scores.values()
]
return problem_scores
def get_show_grades(self, subsection):
return subsection.show_grades(self.context['staff_access'])
class ChapterSerializer(serializers.Serializer):
"""
Serializer for chapters in coursewaresummary
"""
display_name = serializers.CharField()
subsections = SubsectionSerializer(source='sections', many=True)
class CertificateDataSerializer(serializers.Serializer):
cert_status = serializers.CharField()
cert_web_view_url = serializers.CharField()
download_url = serializers.CharField()
msg = serializers.CharField()
title = serializers.CharField()
class CreditRequirementSerializer(serializers.Serializer):
"""
Serializer for credit requirement objects
"""
display_name = serializers.CharField()
min_grade = serializers.SerializerMethodField()
status = serializers.CharField()
status_date = serializers.DateTimeField()
def get_min_grade(self, requirement):
if requirement['namespace'] == 'grade':
return requirement['criteria']['min_grade'] * 100
else:
return None
class CreditCourseRequirementsSerializer(serializers.Serializer):
"""
Serializer for credit_course_requirements
"""
dashboard_url = serializers.SerializerMethodField()
eligibility_status = serializers.CharField()
requirements = CreditRequirementSerializer(many=True)
def get_dashboard_url(self, _):
relative_path = reverse('dashboard')
request = self.context['request']
return request.build_absolute_uri(relative_path)
class VerificationDataSerializer(serializers.Serializer):
"""
Serializer for verification data object
"""
link = serializers.URLField()
status = serializers.CharField()
status_date = serializers.DateTimeField()
class ProgressTabSerializer(serializers.Serializer):
"""
Serializer for progress tab
"""
certificate_data = CertificateDataSerializer()
credit_course_requirements = CreditCourseRequirementsSerializer()
credit_support_url = serializers.URLField()
courseware_summary = ChapterSerializer(many=True)
enrollment_mode = serializers.CharField()
studio_url = serializers.CharField()
user_timezone = serializers.CharField()
verification_data = VerificationDataSerializer()
|
stvstnfrd/edx-platform
|
lms/djangoapps/course_home_api/progress/v1/serializers.py
|
Python
|
agpl-3.0
| 3,620 | 0.000276 |
## p7.py - parallel processing microframework
## (c) 2017 by mobarski (at) gmail (dot) com
## licence: MIT
## version: ex4 (simple fan-in of subprocess outputs)
from __future__ import print_function
# CONFIG ###################################################################################
HEAD_LEN_IN = 2
HEAD_LEN_OUT = 100
BUFSIZE = 4096
CMD = "python -c 'import sys; sys.stdout.write(sys.stdin.read())'"
N = 4
# END OF CONFIG ############################################################################
import subprocess
import threading
import shlex
import sys
from select import select
from time import time
IN = sys.stdin
OUT = sys.stdout
OUT = open('test/out.txt','wb')
LOG = sys.stderr
ctx = {}
args = shlex.split(CMD)
PIPE = subprocess.PIPE
for i in range(N):
ctx[i] = {}
proc = subprocess.Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=BUFSIZE)
ctx[i]['proc'] = proc
# metadata
ctx[i]['pid'] = proc.pid
ctx[i]['t_start'] = time()
ctx[i]['head_cnt_in'] = 0
ctx[i]['head_cnt_out'] = 0
def pump_input():
while True:
for i in range(N):
p = ctx[i]['proc']
head = IN.read(HEAD_LEN_IN)
p.stdin.write(head)
ctx[i]['head_cnt_in'] += 1
if len(head)<HEAD_LEN_IN: # End Of File
break
tail = IN.readline()
p.stdin.write(tail)
else: continue # not EOF
# EOF -> close all input streams
for i in range(N):
ctx[i]['proc'].stdin.close()
break
def pump_output():
done = set()
while True:
for i in range(N):
if i in done: continue
p = ctx[i]['proc']
head = p.stdout.read(HEAD_LEN_OUT)
OUT.write(head)
ctx[i]['head_cnt_out'] += 1
if len(head)<HEAD_LEN_OUT: # End Of File
done.add(i)
p.wait() # End Of Process
ctx[i]['t_stop'] = time()
ctx[i]['run_time'] = ctx[i]['t_stop'] - ctx[i]['t_start']
continue
tail = p.stdout.readline()
OUT.write(tail)
if len(done)==N:
return
# RUN DATA PUMPS
input_pump = threading.Thread(target=pump_input)
output_pump = threading.Thread(target=pump_output)
input_pump.start()
output_pump.start()
input_pump.join()
output_pump.join()
from pprint import pprint
pprint(ctx)
|
mobarski/sandbox
|
parallel/p7ex4.py
|
Python
|
mit
| 2,120 | 0.032075 |
# -*- coding: utf-8 -*-
"""
flango.template
~~~~~~~~~~~~~~
template module provide a simple template system that compiles
templates to Python code which like django and tornado template
modules.
Usage
-----
Well, you can view the tests file directly for the usage under tests.
Basically::
>>> import template
>>> template.Template('Hello, {{ name }}').render(name='flango')
Hello, flango
If, else, for...::
>>> template.Template('''
... {% for i in l %}
... {% if i > 3 %}
... {{ i }}
... {% else %}
... less than 3
... {% endif %}
... {% endfor %})
... ''' ).render(l=[2, 4])
less than 3
4
Then, user define class object maybe also works well::
>>> class A(object):
...
... def __init__(self, a, b):
... self.a = a
... self.b = b
...
>>> o = A("I am o.a", [1, 2, 3])
>>> template.Template('''
... {{ o.a }}
... {% for i in o.b %}
... {{ i }}
... {% endfor %}
... ''').render(o=o)
I am o.a
1
2
3
and Wow, function maybe suprise you::
>>> template.Template('{{ abs(-3) }}').render()
'3'
>>> template.Template('{{ len([1, 2, 3]) }}').render()
'3'
>>> template.Template('{{ [1, 2, 3].index(2) }}').render()
'1'
and complex function like lambda expression maybe works::
>>> template.Template('{{ list(map(lambda x: x * 2, [1, 2, 3])) }}').render()
'[2, 4, 6]'
and lastly, inheritance of template, extends and include::
{% extends 'base.html' %}
{% include 'included.html' %}
Hacking with fun and joy.
"""
import re
import os
import collections
# LRU Cache capacity:
_CACHE_CAPACITY = 128
class Scanner(object):
""" Scanner is a inner class of Template which provide
custom template source reading operations.
"""
def __init__(self, source):
# pattern for variable, function, block, statement.
self.pattern = re.compile(r'''
{{\s*(?P<var>.+?)\s*}} # variable: {{ name }} or function like: {{ abs(-2) }}
| # or
{%\s*(?P<endtag>end(if|for|while|block))\s*%} # endtag: {% endfor %}
| # or
{%\s*(?P<statement>(?P<keyword>\w+)\s*(.+?))\s*%} # statement: {% for i in range(10) %}
''', re.VERBOSE)
# the pre-text before token.
self.pretext = ''
# the remaining text which have not been processed.
self.remain = source
def next_token(self):
""" Get the next token which match the pattern semantic.
return `None` if there is no more tokens, otherwise,
return matched regular expression group of token `t`, get
the pre-text and the remain text at the same time.
"""
t = self.pattern.search(self.remain)
if not t:
return None
self.pretext = self.remain[:t.start()]
self.remain = self.remain[t.end():]
return t
@property
def empty(self):
""" Return `True` if the source have been processed."""
return self.remain == ''
class BaseNode(object):
""" Base abstract class for nodes.
Subclass of BaseNode must implement 'generate' interface for
output Python intermediate code generating.
"""
def __init__(self, text, indent, block):
self.text = text
self.indent = indent
self.block = block
def generate(self):
raise NotImplementedError()
class TextNode(BaseNode):
""" Node for normal text. """
def generate(self):
return '{0}_stdout.append(\'\'\'{1}\'\'\')\n'.format(' '*self.indent, self.text)
class VariableNode(BaseNode):
""" Node for variables: such as {{ name }}. """
def generate(self):
return '{0}_stdout.append({1})\n'.format(' '*self.indent, self.text)
class KeyNode(BaseNode):
""" Node for keywords like if else... """
def generate(self):
return '{0}{1}\n'.format(' '*self.indent, self.text)
class TemplateException(Exception):
pass
class Template(object):
""" Main class for compiled template instance.
A initialized template instance will parse and compile
all the template source to Python intermediate code,
and instance function `render` will use Python builtin function
`exec` to execute the intermediate code in Python
runtime.
As function `exec` own very strong power and the ability to
execute all the python code in the runtime with given
namespace dict, so this template engine can perform all
the python features even lambda function. But, function
`exec` also has a huge problem in security, so be careful
and be serious, and I am very serious too.
"""
def __init__(self, source, path='', autoescape=False):
if not source:
raise ValueError('Invalid parameter')
self.scanner = Scanner(source)
# path for extends and include
self.path = path
self.nodes = []
# parent template
self.parent = None
self.autoescape = autoescape
self._parse()
# compiled intermediate code.
self.intermediate = self._compile()
def _parse(self):
python_keywords = ['if', 'for', 'while', 'try', 'else', 'elif', 'except', 'finally']
indent = 0
block_stack = []
def block_stack_top():
return block_stack[-1] if block_stack else None
while not self.scanner.empty:
token = self.scanner.next_token()
if not token:
self.nodes.append(TextNode(self.scanner.remain, indent, block_stack_top()))
break
# get the pre-text before token.
if self.scanner.pretext:
self.nodes.append(TextNode(self.scanner.pretext, indent, block_stack_top()))
variable, endtag, tag, statement, keyword, suffix = token.groups()
if variable:
node_text = 'escape(str({0}))'.format(variable) if self.autoescape else variable
self.nodes.append(VariableNode(node_text, indent, block_stack_top()))
elif endtag:
if tag != 'block':
indent -= 1
continue
# block placeholder in parent template nodes
if not self.parent:
node_text = 'endblock%{0}'.format(block_stack_top())
self.nodes.append(KeyNode(node_text, indent, block_stack_top()))
block_stack.pop()
elif statement:
if keyword == 'include':
filename = re.sub(r'\'|\"', '', suffix)
nodes = Loader(self.path).load(filename).nodes
for node in nodes:
node.indent += indent
self.nodes.extend(nodes)
elif keyword == 'extends':
if self.nodes:
raise TemplateException('Template syntax error: extends tag must be '
'at the beginning of the file.')
filename = re.sub(r'\'|\"', '', suffix)
self.parent = Loader(self.path).load(filename)
elif keyword == 'block':
block_stack.append(suffix)
if not self.parent:
node_text = 'block%{0}'.format(suffix)
self.nodes.append(KeyNode(node_text, indent, block_stack_top()))
elif keyword in python_keywords:
node_text = '{0}:'.format(statement)
if keyword in ['else', 'elif', 'except', 'finally']:
key_indent = indent - 1
else:
key_indent = indent
indent += 1
self.nodes.append(KeyNode(node_text, key_indent, block_stack_top()))
else:
raise TemplateException('Invalid keyword: {0}.'.format(keyword))
else:
raise TemplateException('Template syntax error.')
def _compile(self):
block = {}
if self.parent:
generate_code = ''.join(node.generate() for node in self.parent.nodes)
pattern = re.compile(r'block%(?P<start_block>\w+)(?P<block_code>.*?)endblock%(?P<end_block>\w+)', re.S)
for node in self.nodes:
block.setdefault(node.block, []).append(node.generate())
for token in pattern.finditer(generate_code):
block_name = token.group('start_block')
if block_name != token.group('end_block'):
raise TemplateException('Template syntax error.')
block_code = ''.join(block[block_name]) if block_name in block.keys() else token.group('block_code')
generate_code = generate_code.replace(token.group(), block_code)
else:
generate_code = ''.join(node.generate() for node in self.nodes)
return compile(generate_code, '<string>', 'exec')
def render(self, **context):
# `context['_stdout']`: Compiled template source code
# which is a Python list, contain all the output
# statement of Python code.
context.update({'_stdout': [], 'escape': escape})
exec(self.intermediate, context)
return re.sub(r'(\s+\n)+', r'\n', ''.join(map(str, context['_stdout'])))
class LRUCache(object):
""" Simple LRU cache for template instance caching.
in fact, the OrderedDict in collections module or
@functools.lru_cache is working well too.
"""
def __init__(self, capacity):
self.capacity = capacity
self.cache = collections.OrderedDict()
def get(self, key):
""" Return -1 if catched KeyError exception."""
try:
value = self.cache.pop(key)
self.cache[key] = value
return value
except KeyError:
return -1
def set(self, key, value):
try:
self.cache.pop(key)
except KeyError:
if len(self.cache) >= self.capacity:
self.cache.popitem(last=False)
self.cache[key] = value
class Loader(object):
""" A template Loader which loads the environments of
main application, or just give the template system a root
directory to search the template files.
loader = template.Loader("home/to/root/of/templates/")
loader.load("index.html").render()
Loader class use a LRU cache system to cache the recently used
templates for performance consideration.
"""
def __init__(self, path='', engine=Template, cache_capacity=_CACHE_CAPACITY):
self.path = path
self.engine = engine
self.cache = LRUCache(capacity=cache_capacity)
def load(self, filename):
if not self.path.endswith(os.sep) and self.path != '':
self.path = self.path + os.sep
p = ''.join([self.path, filename])
cache_instance = self.cache.get(p)
if cache_instance != -1:
return cache_instance
if not os.path.isfile(p):
raise TemplateException('Template file {0} is not exist.'.format(p))
with open(p) as f:
self.cache.set(p, self.engine(f.read(), path=self.path))
return self.cache.get(p)
def escape(content):
""" Escapes a string's HTML. """
return content.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
|
hziling/template
|
template.py
|
Python
|
mit
| 11,977 | 0.002004 |
"""Django middlewares."""
try:
# Python 2.x
from urlparse import urlsplit, urlunsplit
except ImportError:
# Python 3.x
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
from django.conf import settings
from django.http import HttpResponsePermanentRedirect
try:
# Django 1.10
from django.utils.deprecation import MiddlewareMixin
except ImportError:
# Django <1.10
class MiddlewareMixin(object):
def __init__(self, get_response=None):
self.get_response = get_response
super(MiddlewareMixin, self).__init__()
def __call__(self, request):
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
if not response:
response = self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
class SSLifyMiddleware(MiddlewareMixin):
"""Force all requests to use HTTPs. If we get an HTTP request, we'll just
force a redirect to HTTPs.
.. note::
You can also disable this middleware when testing by setting
``settings.SSLIFY_DISABLE`` to True.
"""
@staticmethod
def process_request(request):
# If the user has explicitly disabled SSLify, do nothing.
if getattr(settings, 'SSLIFY_DISABLE', False):
return None
# Evaluate callables that can disable SSL for the current request
per_request_disables = getattr(settings, 'SSLIFY_DISABLE_FOR_REQUEST', [])
for should_disable in per_request_disables:
if should_disable(request):
return None
# If we get here, proceed as normal.
if not request.is_secure():
url = request.build_absolute_uri(request.get_full_path())
url_split = urlsplit(url)
scheme = 'https' if url_split.scheme == 'http' else url_split.scheme
ssl_port = getattr(settings, 'SSLIFY_PORT', 443)
url_secure_split = (scheme, "%s:%d" % (url_split.hostname or '', ssl_port)) + url_split[2:]
secure_url = urlunsplit(url_secure_split)
return HttpResponsePermanentRedirect(secure_url)
|
rdegges/django-sslify
|
sslify/middleware.py
|
Python
|
unlicense
| 2,315 | 0.001296 |
import argparse
import codecs
import sys
from .auth import parse_authentication
from .confluence_api import create_confluence_api
from .confluence import ConfluencePageManager
from .constants import DEFAULT_CONFLUENCE_API_VERSION
def main():
parser = argparse.ArgumentParser(description='Dumps Confluence page in storage format')
parser.add_argument('page_id', type=str, help='Configuration file')
parser.add_argument('-u', '--url', type=str, help='Confluence Url')
auth_group = parser.add_mutually_exclusive_group(required=True)
auth_group.add_argument('-a', '--auth', type=str, help='Base64 encoded user:password string')
auth_group.add_argument('-U', '--user', type=str, help='Username (prompt password)')
parser.add_argument('-o', '--output', type=str, help='Output file|stdout|stderr', default='stdout')
args = parser.parse_args()
auth = parse_authentication(args.auth, args.user)
confluence_api = create_confluence_api(DEFAULT_CONFLUENCE_API_VERSION, args.url, auth)
page_manager = ConfluencePageManager(confluence_api)
page = page_manager.load(args.page_id)
if args.output.lower() == 'stdout':
f = sys.stdout
elif args.output.lower() == 'stderr':
f = sys.stderr
else:
f = codecs.open(args.output, 'w', encoding='utf-8')
with f:
f.write(page.body)
if __name__ == '__main__':
main()
|
dopuskh3/confluence-publisher
|
conf_publisher/page_dumper.py
|
Python
|
mit
| 1,397 | 0.003579 |
#!/usr/bin/env python3
import fnmatch
import os
import re
import ntpath
import sys
import argparse
def get_private_declare(content):
priv_declared = []
srch = re.compile('private.*')
priv_srch_declared = srch.findall(content)
priv_srch_declared = sorted(set(priv_srch_declared))
priv_dec_str = ''.join(priv_srch_declared)
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]')
priv_split = srch.findall(priv_dec_str)
priv_split = sorted(set(priv_split))
priv_declared += priv_split;
srch = re.compile('params \[.*\]|PARAMS_[0-9].*|EXPLODE_[0-9]_PVT.*|DEFAULT_PARAM.*|KEY_PARAM.*|IGNORE_PRIVATE_WARNING.*')
priv_srch_declared = srch.findall(content)
priv_srch_declared = sorted(set(priv_srch_declared))
priv_dec_str = ''.join(priv_srch_declared)
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]')
priv_split = srch.findall(priv_dec_str)
priv_split = sorted(set(priv_split))
priv_declared += priv_split;
srch = re.compile('(?i)[\s]*local[\s]+(_[\w\d]*)[\s]*=.*')
priv_local = srch.findall(content)
priv_local_declared = sorted(set(priv_local))
priv_declared += priv_local_declared;
return priv_declared
def check_privates(filepath):
bad_count_file = 0
def pushClosing(t):
closingStack.append(closing.expr)
closing << Literal( closingFor[t[0]] )
def popClosing():
closing << closingStack.pop()
with open(filepath, 'r') as file:
content = file.read()
priv_use = []
priv_use = []
# Regex search privates
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ =,\^\-\+\/\*\%\}\]\)";]')
priv_use = srch.findall(content)
priv_use = sorted(set(priv_use))
# Private declaration search
priv_declared = get_private_declare(content)
if '_this' in priv_declared: priv_declared.remove('_this')
if '_this' in priv_use: priv_use.remove('_this')
if '_x' in priv_declared: priv_declared.remove('_x')
if '_x' in priv_use: priv_use.remove('_x')
if '_forEachIndex' in priv_declared: priv_declared.remove('_forEachIndex')
if '_forEachIndex' in priv_use: priv_use.remove('_forEachIndex')
if '_foreachIndex' in priv_declared: priv_declared.remove('_foreachIndex')
if '_foreachIndex' in priv_use: priv_use.remove('_foreachIndex')
if '_foreachindex' in priv_declared: priv_declared.remove('_foreachindex')
if '_foreachindex' in priv_use: priv_use.remove('_foreachindex')
missing = []
for s in priv_use:
if s.lower() not in map(str.lower,priv_declared):
if s.lower() not in map(str.lower,missing):
missing.append(s)
if len(missing) > 0:
print (filepath)
private_output = 'private[';
first = True
for bad_priv in missing:
if first:
first = False
private_output = private_output + '"' + bad_priv
else:
private_output = private_output + '", "' + bad_priv
private_output = private_output + '"];';
print (private_output)
for bad_priv in missing:
print ('\t' + bad_priv)
bad_count_file = bad_count_file + 1
return bad_count_file
def main():
print("#########################")
print("# Search your Privates #")
print("#########################")
sqf_list = []
bad_count = 0
parser = argparse.ArgumentParser()
parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default=".")
args = parser.parse_args()
for root, dirnames, filenames in os.walk('../addons' + '/' + args.module):
for filename in fnmatch.filter(filenames, '*.sqf'):
sqf_list.append(os.path.join(root, filename))
for filename in sqf_list:
bad_count = bad_count + check_privates(filename)
print ("Bad Count {0}".format(bad_count))
if __name__ == "__main__":
main()
|
kerckasha/ACE3
|
tools/search_privates.py
|
Python
|
gpl-2.0
| 4,384 | 0.021908 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
class HelperShift(models.Model):
"""
n-m relation between helper and shift.
This model then can be used by other apps to "attach" more data with OneToOne fields and signals.
The fields `present` and `manual_presence` belong to the gifts app. They are directly inserted here as it
would be too complicated to add another model for just two booleans. Additionally, this has the advantage
that the `present` flag can directly used by other apps.
Columns:
:helper: The helper
:shift: The shift
:timestamp: Timestamp when the helper registered for this shift
:present: Flag set when the helper is there (manually or automatically)
:manual_presence: `present` flag was manually set
"""
class Meta:
unique_together = ('helper', 'shift',)
helper = models.ForeignKey(
'Helper',
on_delete=models.CASCADE,
)
shift = models.ForeignKey(
'Shift',
on_delete=models.CASCADE,
)
timestamp = models.DateTimeField(
auto_now_add=True,
verbose_name=_("Registration time for this shift")
)
present = models.BooleanField(
default=False,
verbose_name=_("Present"),
help_text=_("Helper was at shift")
)
manual_presence = models.BooleanField(
default=False,
editable=False,
verbose_name=_("Presence was manually set"),
)
def __str__(self):
return "{} - {} - {}".format(self.helper.event, self.helper, self.shift)
|
helfertool/helfertool
|
src/registration/models/helpershift.py
|
Python
|
agpl-3.0
| 1,614 | 0.002478 |
""" Tests the implementation of the solution to the Euclidean Minimum Spanning
Tree (EMST) problem """
import pytest
from exhaustive_search.point import Point
from exhaustive_search.euclidean_mst import solve, edist
def compare_solutions(actual, expected):
assert len(actual) == len(expected), "expected %d to equal %d" % (len(actual), len(expected))
assert sorted(actual, key=keyfunc) == sorted(expected, key=keyfunc)
def keyfunc(tpl):
left, right = tpl
return edist(left, right)
def test_empty_mst_list():
""" the (E)MST solution to an empty list is an empty list """
assert solve([]) == [], __doc__
def test_non_list():
""" this function should reject non-lists (invalid inputs) by raising
a TypeError """
with pytest.raises(TypeError):
solve(True)
def test_list_of_one():
""" the (E)MST solution to a list of one is an empty list """
assert solve([Point(0, 0)]) == [], __doc__
def test_list_of_two():
""" the (E)MST solution to a list of two points (i.e. [a, b]) is a list
containing a tuple of points (i.e. [(a, b)]) """
one, two = Point(3, 1), Point(1, 3)
actual = solve([one, two])
compare_solutions(actual, [(one, two)])
def test_triangle():
""" Given a list of points L:
L = [Point(0, 0), Point(3, 0), Point(0, 6)]
The solution is:
[(Point(0, 0), Point(3, 0)), (Point(3, 0), Point(6, 0))]
"""
graph = [Point(0, 0), Point(3, 0), Point(6, 0)]
actual = solve(graph)
compare_solutions(actual, [(Point(0, 0), Point(3, 0)), (Point(3, 0), Point(6, 0))])
for result in actual:
left, right = result
if left == Point(0, 0) or left == Point(6, 0):
assert right == Point(3, 0), \
"expected right (%s) to == %s (left is %s)" % (right, Point(3, 0), left)
else:
assert right == Point(0, 0) or right == Point(6, 0), \
"expected right (%s) to == %s or %s" % (right, Point(0, 0), Point(6, 0))
|
ciarand/exhausting-search-homework
|
test/test_euclidean_mst.py
|
Python
|
isc
| 2,000 | 0.0055 |
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Oliver J. Backhouse <olbackhouse@gmail.com>
# George H. Booth <george.booth@kcl.ac.uk>
#
import unittest
import numpy as np
from pyscf.agf2 import aux, _agf2
class KnownValues(unittest.TestCase):
@classmethod
def setUpClass(self):
self.nmo = 100
self.nocc = 20
self.nvir = 80
self.naux = 400
np.random.seed(1)
@classmethod
def tearDownClass(self):
del self.nmo, self.nocc, self.nvir, self.naux
np.random.seed()
def test_c_ragf2(self):
xija = np.random.random((self.nmo, self.nocc, self.nocc, self.nvir))
gf_occ = aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc))
gf_vir = aux.GreensFunction(np.random.random(self.nvir), np.eye(self.nmo, self.nvir))
vv1, vev1 = _agf2.build_mats_ragf2_outcore(xija, gf_occ.energy, gf_vir.energy)
vv2, vev2 = _agf2.build_mats_ragf2_incore(xija, gf_occ.energy, gf_vir.energy)
self.assertAlmostEqual(np.max(np.absolute(vv1-vv2)), 0.0, 10)
self.assertAlmostEqual(np.max(np.absolute(vev1-vev2)), 0.0, 10)
def test_c_dfragf2(self):
qxi = np.random.random((self.naux, self.nmo*self.nocc)) / self.naux
qja = np.random.random((self.naux, self.nocc*self.nvir)) / self.naux
gf_occ = aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc))
gf_vir = aux.GreensFunction(np.random.random(self.nvir), np.eye(self.nmo, self.nvir))
vv1, vev1 = _agf2.build_mats_dfragf2_outcore(qxi, qja, gf_occ.energy, gf_vir.energy)
vv2, vev2 = _agf2.build_mats_dfragf2_incore(qxi, qja, gf_occ.energy, gf_vir.energy)
self.assertAlmostEqual(np.max(np.absolute(vv1-vv2)), 0.0, 10)
self.assertAlmostEqual(np.max(np.absolute(vev1-vev2)), 0.0, 10)
def test_c_uagf2(self):
xija = np.random.random((2, self.nmo, self.nocc, self.nocc, self.nvir))
gf_occ = (aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc)),
aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc)))
gf_vir = (aux.GreensFunction(np.random.random(self.nvir), np.eye(self.nmo, self.nvir)),
aux.GreensFunction(np.random.random(self.nvir), np.eye(self.nmo, self.nvir)))
vv1, vev1 = _agf2.build_mats_uagf2_outcore(xija, (gf_occ[0].energy, gf_occ[1].energy), (gf_vir[0].energy, gf_vir[1].energy))
vv2, vev2 = _agf2.build_mats_uagf2_incore(xija, (gf_occ[0].energy, gf_occ[1].energy), (gf_vir[0].energy, gf_vir[1].energy))
self.assertAlmostEqual(np.max(np.absolute(vv1-vv2)), 0.0, 10)
self.assertAlmostEqual(np.max(np.absolute(vev1-vev2)), 0.0, 10)
def test_c_dfuagf2(self):
qxi = np.random.random((2, self.naux, self.nmo*self.nocc)) / self.naux
qja = np.random.random((2, self.naux, self.nocc*self.nvir)) / self.naux
gf_occ = (aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc)),
aux.GreensFunction(np.random.random(self.nocc), np.eye(self.nmo, self.nocc)))
gf_vir = (aux.GreensFunction(np.random.random(self.nvir), np.eye(self.nmo, self.nvir)),
aux.GreensFunction(np.random.random(self.nvir), np.eye(self.nmo, self.nvir)))
vv1, vev1 = _agf2.build_mats_dfuagf2_outcore(qxi, qja, (gf_occ[0].energy, gf_occ[1].energy), (gf_vir[0].energy, gf_vir[1].energy))
vv2, vev2 = _agf2.build_mats_dfuagf2_incore(qxi, qja, (gf_occ[0].energy, gf_occ[1].energy), (gf_vir[0].energy, gf_vir[1].energy))
self.assertAlmostEqual(np.max(np.absolute(vv1-vv2)), 0.0, 10)
self.assertAlmostEqual(np.max(np.absolute(vev1-vev2)), 0.0, 10)
if __name__ == '__main__':
print('AGF2 C implementations')
unittest.main()
|
sunqm/pyscf
|
pyscf/agf2/test/test_c_agf2.py
|
Python
|
apache-2.0
| 4,413 | 0.004759 |
#!/usr/bin/python
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(PauseAction(3000))
sequence.append(KeyComboAction("F10"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("space"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPresentationAction(
"1. Review current line",
["BRAILLE LINE: 'Start $l'",
" VISIBLE: 'Start $l', cursor=1",
"SPEECH OUTPUT: 'Start'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("space"))
sequence.append(utils.AssertPresentationAction(
"2. Activate timer",
["BRAILLE LINE: 'gnome-clocks application Clocks frame Pause push button'",
" VISIBLE: 'Pause push button', cursor=1",
"BRAILLE LINE: 'gnome-clocks application Clocks frame Pause push button'",
" VISIBLE: 'Pause push button', cursor=1",
"SPEECH OUTPUT: 'Clocks frame'",
"SPEECH OUTPUT: 'Pause push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPresentationAction(
"3. Review current line",
["BRAILLE LINE: 'Pause Reset $l'",
" VISIBLE: 'Pause Reset $l', cursor=1",
"SPEECH OUTPUT: 'Pause Reset'"]))
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_7"))
sequence.append(utils.AssertPresentationAction(
"4. Review previous line",
["BRAILLE LINE: '00 ∶ 04 ∶ 5[0-9] \\$l'",
" VISIBLE: '00 ∶ 04 ∶ 5[0-9] \\$l', cursor=1",
"SPEECH OUTPUT: '00 ∶ 04 ∶ 5[0-9]'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_7"))
sequence.append(utils.AssertPresentationAction(
"5. Review previous line",
["BRAILLE LINE: '& y World & y Alarm & y Stopwatch &=y Timer $l'",
" VISIBLE: '& y World & y Alarm & y Stopwatc', cursor=1",
"SPEECH OUTPUT: 'not selected World not selected Alarm not selected Stopwatch selected Timer'"]))
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_9"))
sequence.append(utils.AssertPresentationAction(
"6. Review next line",
["BRAILLE LINE: '00 ∶ 04 ∶ 4[0-9] \\$l'",
" VISIBLE: '00 ∶ 04 ∶ 4[0-9] \\$l', cursor=1",
"SPEECH OUTPUT: '00 ∶ 04 ∶ 4[0-9]'"]))
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPresentationAction(
"7. Review current line",
["BRAILLE LINE: '00 ∶ 04 ∶ 3[0-9] \\$l'",
" VISIBLE: '00 ∶ 04 ∶ 3[0-9] \\$l', cursor=1",
"SPEECH OUTPUT: '00 ∶ 04 ∶ 3[0-9]'"]))
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPresentationAction(
"8. Review current line",
["BRAILLE LINE: '00 ∶ 04 ∶ 2[0-9] \\$l'",
" VISIBLE: '00 ∶ 04 ∶ 2[0-9] \\$l', cursor=1",
"SPEECH OUTPUT: '00 ∶ 04 ∶ 2[0-9]'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
GNOME/orca
|
test/keystrokes/gnome-clocks/timer_flat_review.py
|
Python
|
lgpl-2.1
| 3,398 | 0.000896 |
from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from baseapp.models import Class_Studying
from django.contrib import auth, messages
class Class_StudyingView(object):
model = Class_Studying
def get_template_names(self):
"""Nest templates within class_studying directory."""
tpl = super(Class_StudyingView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'class_studying'
self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
return [self.template_name]
class Class_StudyingDateView(Class_StudyingView):
date_field = 'created_date'
month_format = '%m'
class Class_StudyingBaseListView(Class_StudyingView):
paginate_by = 10
class Class_StudyingArchiveIndexView(
Class_StudyingDateView, Class_StudyingBaseListView, ArchiveIndexView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingCreateView(Class_StudyingView, CreateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
messages.add_message(
self.request,
messages.SUCCESS,"Successfully created."
)
return reverse('baseapp_class_studying_list')
class Class_StudyingDateDetailView(Class_StudyingDateView, DateDetailView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingDayArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, DayArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingDeleteView(Class_StudyingView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingDetailView(Class_StudyingView, DetailView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingListView(Class_StudyingBaseListView, ListView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingMonthArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, MonthArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingTodayArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, TodayArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingUpdateView(Class_StudyingView, UpdateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingWeekArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, WeekArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingYearArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, YearArchiveView):
make_object_list = True
|
tnemis/staging-server
|
baseapp/views/class_studying_views.py
|
Python
|
mit
| 3,912 | 0.005624 |
### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXRA(gxapi_cy.WrapRA):
"""
GXRA class.
The `GXRA <geosoft.gxapi.GXRA>` class is used to access ASCII files sequentially or
by line number. The files are opened in read-only mode, so no
write operations are defined
"""
def __init__(self, handle=0):
super(GXRA, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXRA <geosoft.gxapi.GXRA>`
:returns: A null `GXRA <geosoft.gxapi.GXRA>`
:rtype: GXRA
"""
return GXRA()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
@classmethod
def create(cls, file):
"""
Creates `GXRA <geosoft.gxapi.GXRA>`
:param file: Name of the file
:type file: str
:returns: `GXRA <geosoft.gxapi.GXRA>` Object
:rtype: GXRA
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapRA._create(GXContext._get_tls_geo(), file.encode())
return GXRA(ret_val)
@classmethod
def create_sbf(cls, sbf, file):
"""
Creates `GXRA <geosoft.gxapi.GXRA>` on an `GXSBF <geosoft.gxapi.GXSBF>`
:param sbf: Storage
:param file: Name of the file
:type sbf: GXSBF
:type file: str
:returns: `GXRA <geosoft.gxapi.GXRA>` Object
:rtype: GXRA
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** This method allows you to open an `GXRA <geosoft.gxapi.GXRA>` in a structured file
storage (an `GXSBF <geosoft.gxapi.GXSBF>`). SBFs can be created inside other data
containers, such as workspaces, maps, images and databases.
This lets you store application specific information together
with the data to which it applies.
.. seealso::
sbf.gxh
"""
ret_val = gxapi_cy.WrapRA._create_sbf(GXContext._get_tls_geo(), sbf, file.encode())
return GXRA(ret_val)
def gets(self, strbuff):
"""
Get next full line from `GXRA <geosoft.gxapi.GXRA>`
:param strbuff: Buffer in which to place string
:type strbuff: str_ref
:returns: 0 - Ok
1 - End of file
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val, strbuff.value = self._gets(strbuff.value.encode())
return ret_val
def len(self):
"""
Returns the total number of lines in `GXRA <geosoft.gxapi.GXRA>`
:returns: # of lines in the `GXRA <geosoft.gxapi.GXRA>`.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._len()
return ret_val
def line(self):
"""
Returns current line #, 0 is the first
:returns: The current read line location.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** This will be the next line read.
"""
ret_val = self._line()
return ret_val
def seek(self, line):
"""
Position next read to specified line #
:param line: Line #, 0 is the first.
:type line: int
:returns: 0 if seeked line is within the range of lines,
1 if outside range, line pointer will not be moved.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._seek(line)
return ret_val
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer
|
GeosoftInc/gxpy
|
geosoft/gxapi/GXRA.py
|
Python
|
bsd-2-clause
| 5,242 | 0.00744 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.