text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tools configuration plugin."""
import os
import gettext
_ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup')
from otopi import constants as otopicons
from otopi import util
from otopi import filetransaction
from otopi import plugin
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common \
import constants as oengcommcons
@util.export
class Plugin(plugin.PluginBase):
"""Tools configuration plugin."""
# 'section' is not used here, left for reference - hopefully
# one day the code will be merged with the generation code
TOOLS_CONFIG = (
{
'dir': '{engine_sysconf}/isouploader.conf.d',
'section': 'ISOUploader',
},
{
'dir': '{engine_sysconf}/imageuploader.conf.d',
'section': 'ImageUploader',
},
{
'dir': '{engine_sysconf}/logcollector.conf.d',
'section': 'LogCollector',
},
)
def _entry_filename(self, entry):
return os.path.join(
entry['dir'],
'10-engine-setup.conf'
).format(
engine_sysconf=(
oenginecons.FileLocations.
OVIRT_ENGINE_SYSCONFDIR
),
)
def _content_with_renamed_fqdn(self, config):
with open(config, 'r') as f:
content = []
for line in f:
line = line.rstrip('\n')
if line.startswith('engine='):
line = (
'engine=%s:%s'
) % (
self.environment[
osetupcons.RenameEnv.FQDN
],
self.environment[
oengcommcons.ConfigEnv.PUBLIC_HTTPS_PORT
],
)
content.append(line)
return content
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
)
def _setup(self):
for entry in self.TOOLS_CONFIG:
self.environment[
osetupcons.RenameEnv.FILES_TO_BE_MODIFIED
].append(self._entry_filename(entry))
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
)
def _misc(self):
for entry in self.TOOLS_CONFIG:
name = self._entry_filename(entry)
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=name,
content=self._content_with_renamed_fqdn(name),
modifiedList=self.environment[
otopicons.CoreEnv.MODIFIED_FILES
],
)
)
# vim: expandtab tabstop=4 shiftwidth=4
|
phoenixsbk/kvmmgr
|
packaging/setup/plugins/ovirt-engine-rename/ovirt-engine/tools.py
|
Python
|
apache-2.0
| 3,586 | 0.002231 |
#!/usr/bin/env python
#
# email.py
# TurboHvZ
#
# Copyright (C) 2008 Ross Light
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Support for email
:Variables:
cell_providers : dict
Dictionary of cell phone supported providers. Each value is a
``(name, sms_domain)`` pair.
"""
import turbogears
import turbomail
from turbomail.message import Message
__author__ = 'Ross Light'
__date__ = 'April 16, 2008'
__docformat__ = 'reStructuredText'
__all__ = ['GenshiMessage',
'sendmail',
'send_generic_mail',
'send_sms',]
cell_providers = \
{
'att': (_("AT&T"), 'mms.att.net'),
'nextel': (_("Nextel"), 'messaging.nextel.com'),
'sprint': (_("Sprint"), 'messaging.sprintpcs.com'),
't-mobile': (_("T-Mobile"), 'tmomail.net'),
'verizon': (_("Verizon"), 'vtext.com'),
'virgin': (_("Virgin Mobile"), 'vmobl.com'),
'boost': (_("Boost"), 'myboostmobile.com'),
}
class GenshiMessage(Message):
"""A message created from a Genshi template."""
def __init__(self, sender, recipient, subject, template, variables={}, **kw):
"""
Store the additonal template and variable information.
:Parameters:
template : str
A dot-path to a valid Genshi template.
variables : dict
A dictionary containing named variables to pass to the
template engine.
"""
self.plain_only = kw.pop('plain_only', False)
self._template = template
self._variables = dict(sender=sender,
recipient=recipient,
subject=subject)
self._variables.update(variables)
super(GenshiMessage, self).__init__(sender, recipient, subject, **kw)
def _process(self):
"""Automatically generate the plain and rich text content."""
turbogears.view.base.load_engines()
data = dict()
for (i, j) in self._variables.iteritems():
if callable(j):
data[i] = j()
else:
data[i] = j
engine = turbogears.view.engines.get('genshi')
encoding = turbogears.config.get('genshi.encoding', 'utf-8')
data['email_format'] = 'plain'
self.plain = engine.render(data, template=self._template,
format="text")
self.plain = self._clean_plain(self.plain)
self.plain = self.plain.decode(encoding)
if not self.plain_only:
data['email_format'] = 'rich'
self.rich = engine.render(data, template=self._template)
self.rich = self.rich.decode(encoding)
return super(GenshiMessage, self)._process()
@staticmethod
def _clean_plain(text):
text = text.strip()
lines = []
for line in text.splitlines():
line = line.strip()
try:
last_line = lines[-1]
except IndexError:
last_line = None
if line or last_line:
# Only allow one blank line between text chunks
lines.append(line)
return '\r\n'.join(lines)
def sendmail(recipient, subject, template, variables={}, **kw):
"""
Conveniently sends an email.
This will immediately return if mail has been turned off. The sender is
set to the value of the configuration value ``hvz.webmaster_email``.
:Returns: The newly created message
:ReturnType: turbomail.message.Message
"""
if not turbogears.config.get('mail.on', False):
# Mail has been turned off, ignore it.
return
variables = variables.copy()
variables.setdefault('message_format', 'email')
from_address = turbogears.config.get('hvz.webmaster_email')
new_message = GenshiMessage(from_address, recipient, subject,
template, variables, **kw)
turbomail.enqueue(new_message)
return new_message
def send_generic_mail(recipients, subject, message):
"""
Conveniently sends a custom email.
This will immediately return if mail has been turned off. The sender is
set to the value of the configuration value ``hvz.webmaster_email``.
:Returns: The newly created message
:ReturnType: turbomail.message.Message
"""
return sendmail(recipients, subject, "hvz.templates.mail.generic",
dict(subject=subject,
content=message,))
def send_sms(numbers, subject, template, variables={}):
"""
Sends a text message.
:Parameters:
numbers : tuple or list of tuple
Numbers to send to. Each item must be a ``(number, provider)``
pair where number is a ten-digit US phone number.
subject : unicode
Subject to send with
template : unicode
Template to use
variables : dict
Variables to pass to template
:Returns: The newly created message
:ReturnType: turbomail.message.Message
"""
def _make_address(item):
number, provider = item
if len(number) != 10:
raise ValueError('Number is not a valid US phone number')
provider_name, provider_domain = cell_providers[provider]
return number + '@' + provider_domain
if not turbogears.config.get('hvz.notify_sms', True):
# SMS has been turned off, ignore it.
return
if isinstance(numbers, tuple):
numbers = [numbers]
addresses = [_make_address(item) for item in numbers]
variables = variables.copy()
variables.setdefault('message_format', 'sms')
return sendmail(addresses, subject, template, variables, plain_only=True)
|
zombiezen/turbohvz
|
hvz/email.py
|
Python
|
gpl-3.0
| 6,348 | 0.002205 |
import unittest
from unittest import mock
from betfairlightweight import APIClient
from betfairlightweight import resources
from betfairlightweight.endpoints.scores import Scores
from betfairlightweight.exceptions import APIError
from tests.tools import create_mock_json
class ScoresInit(unittest.TestCase):
def test_base_endpoint_init(self):
client = APIClient("username", "password", "app_key")
scores = Scores(client)
assert scores.connect_timeout == 3.05
assert scores._error == APIError
assert scores.client == client
assert scores.URI == "ScoresAPING/v1.0/"
class ScoresTest(unittest.TestCase):
def setUp(self):
client = APIClient("username", "password", "app_key", "UK")
self.scores = Scores(client)
@mock.patch("betfairlightweight.endpoints.scores.Scores.request")
def test_list_race_details(self, mock_response):
mock = create_mock_json("tests/resources/list_race_details.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.5)
response = self.scores.list_race_details()
assert mock.json.call_count == 1
mock_response.assert_called_with("ScoresAPING/v1.0/listRaceDetails", {}, None)
assert isinstance(response[0], resources.RaceDetails)
assert len(response) == 475
@mock.patch("betfairlightweight.endpoints.scores.Scores.request")
def test_list_available_events(self, mock_response):
mock = create_mock_json("tests/resources/availableevents.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
response = self.scores.list_available_events()
assert mock.json.call_count == 1
mock_response.assert_called_with(
"ScoresAPING/v1.0/listAvailableEvents", {}, None
)
assert all(isinstance(event, resources.AvailableEvent) for event in response)
@mock.patch("betfairlightweight.endpoints.scores.Scores.request")
def test_list_scores(self, mock_response):
mock = create_mock_json("tests/resources/score.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
mock_update_keys = mock.Mock()
response = self.scores.list_scores(mock_update_keys)
assert mock.json.call_count == 1
mock_response.assert_called_with(
"ScoresAPING/v1.0/listScores", {"updateKeys": mock_update_keys}, None
)
assert all(isinstance(event, resources.Score) for event in response)
@mock.patch("betfairlightweight.endpoints.scores.Scores.request")
def test_list_incidents(self, mock_response):
mock = create_mock_json("tests/resources/incidents.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
mock_update_keys = mock.Mock()
response = self.scores.list_incidents(mock_update_keys)
assert mock.json.call_count == 1
mock_response.assert_called_with(
"ScoresAPING/v1.0/listIncidents", {"updateKeys": mock_update_keys}, None
)
assert all(isinstance(event, resources.Incidents) for event in response)
def test_url(self):
assert self.scores.url == "%s%s" % (
self.scores.client.api_uri,
"scores/json-rpc/v1",
)
|
liampauling/betfairlightweight
|
tests/test_scores.py
|
Python
|
mit
| 3,269 | 0.00153 |
from sys import platform as sys_plat
import platform
import os
from ctypes import *
if sys_plat == "win32":
def find_win_dll(arch):
""" Finds the highest versioned windows dll for the specified architecture. """
dlls = []
filename = 'VimbaC.dll'
# look in local working directory first
if os.path.isfile(filename):
dlls.append(filename)
if not dlls:
if 'VIMBA_HOME' in os.environ:
candidate = os.environ['VIMBA_HOME'] + r'\VimbaC\Bin\Win%i\VimbaC.dll' % (arch)
if os.path.isfile(candidate):
dlls.append(candidate)
if not dlls:
bases = [
r'C:\Program Files\Allied Vision Technologies\AVTVimba_%i.%i\VimbaC\Bin\Win%i\VimbaC.dll',
r'C:\Program Files\Allied Vision\Vimba_%i.%i\VimbaC\Bin\Win%i\VimbaC.dll'
]
for base in bases:
for major in range(4):
for minor in range(10):
candidate = base % (major, minor, arch)
if os.path.isfile(candidate):
dlls.append(candidate)
if not dlls:
raise IOError("VimbaC.dll not found.")
return dlls[-1]
if '64' in platform.architecture()[0]:
vimbaC_path = find_win_dll(64)
else:
vimbaC_path = find_win_dll(32)
dll_loader = windll
else:
dll_loader = cdll
def find_so(platform, genicam_path):
vimbaC_found = False
for tlPath in [p for p in os.environ.get(genicam_path).split(":") if p]:
vimba_dir = "/".join(tlPath.split("/")[1:-3])
vimbaC_path = "/" + vimba_dir + "/VimbaC/DynamicLib/" + platform + "/libVimbaC.so"
if os.path.isfile(vimbaC_path):
vimbaC_found = True
break
if not vimbaC_found:
raise OSError('No libVimbaC.so found')
return vimbaC_path
if 'x86_64' in os.uname()[4]:
assert os.environ.get(
"GENICAM_GENTL64_PATH"), "you need your GENICAM_GENTL64_PATH environment set. Make sure you have Vimba installed, and you have loaded the /etc/profile.d/ scripts"
vimbaC_path = find_so('x86_64bit', "GENICAM_GENTL64_PATH")
elif 'x86_32' in os.uname()[4]:
print("Warning: x86_32 reached!")
assert os.environ.get(
"GENICAM_GENTL32_PATH"), "you need your GENICAM_GENTL32_PATH environment set. Make sure you have Vimba installed, and you have loaded the /etc/profile.d/ scripts"
vimbaC_path = find_so('x86_32bit', 'GENICAM_GENTL32_PATH')
elif 'arm' in os.uname()[4]:
assert os.environ.get(
"GENICAM_GENTL32_PATH"), "you need your GENICAM_GENTL32_PATH environment set. Make sure you have Vimba installed, and you have loaded the /etc/profile.d/ scripts"
vimbaC_path = find_so('arm_32bit', 'GENICAM_GENTL32_PATH')
elif 'aarch64' in os.uname()[4]:
assert os.environ.get(
"GENICAM_GENTL64_PATH"), "you need your GENICAM_GENTL64_PATH environment set. Make sure you have Vimba installed, and you have loaded the /etc/profile.d/ scripts"
vimbaC_path = find_so('arm_64bit', "GENICAM_GENTL64_PATH")
else:
raise ValueError("Pymba currently doesn't support %s" % os.uname()[4])
# Callback function type
if sys_plat == "win32":
CALLBACK_FUNCTYPE = WINFUNCTYPE
else:
CALLBACK_FUNCTYPE = CFUNCTYPE
class NiceStructure(Structure):
def __repr__(self):
field_names = (field[0] for field in self._fields_)
return '{}({})'.format(
type(self).__name__,
", ".join("=".join((field, str(getattr(self, field))))
for field in field_names)
)
class VmbVersionInfo(NiceStructure):
_fields_ = [
('major', c_uint32),
('minor', c_uint32),
('patch', c_uint32)]
class VmbInterfaceInfo(NiceStructure):
_fields_ = [
# Unique identifier for each interface
('interfaceIdString', c_char_p),
# Interface type, see VmbInterfaceType
('interfaceType', c_uint32),
# Interface name, given by the transport layer
('interfaceName', c_char_p),
# Serial number
('serialString', c_char_p),
# Used access mode, see VmbAccessModeType
('permittedAccess', c_uint32)]
class VmbCameraInfo(NiceStructure):
_fields_ = [
# Unique identifier for each camera
('cameraIdString', c_char_p),
# Name of the camera
('cameraName', c_char_p),
# Model name
('modelName', c_char_p),
# Serial number
('serialString', c_char_p),
# Used access mode, see VmbAccessModeType
('permittedAccess', c_uint32),
# Unique value for each interface or bus
('interfaceIdString', c_char_p)]
class VmbFeatureInfo(NiceStructure):
_fields_ = [
('name', c_char_p),
('featureDataType', c_uint32),
('featureFlags', c_uint32),
('category', c_char_p),
('displayName', c_char_p),
('pollingTime', c_uint32),
('unit', c_char_p),
('representation', c_char_p),
('visibility', c_uint32),
('tooltip', c_char_p),
('description', c_char_p),
('sfncNamespace', c_char_p),
('isStreamable', c_bool),
('hasAffectedFeatures', c_bool),
('hasSelectedFeatures', c_bool)]
class VmbFrame(Structure):
_fields_ = [
# ---- IN ----
# Comprises image and ancillary data
('buffer', c_void_p),
# Size of the data buffer
('bufferSize', c_uint32),
# User context filled during queuing
('context', c_void_p * 4),
# ---- OUT ----
# Resulting status of the receive operation
('receiveStatus', c_int32),
# Resulting flags of the receive operation
('receiveFlags', c_uint32),
# Size of the image data inside the data buffer
('imageSize', c_uint32),
# Size of the ancillary data inside the data buffer
('ancillarySize', c_uint32),
# Pixel format of the image
('pixelFormat', c_uint32),
# Width of an image
('width', c_uint32),
# Height of an image
('height', c_uint32),
# Horizontal offset of an image
('offsetX', c_uint32),
# Vertical offset of an image
('offsetY', c_uint32),
# Unique ID of this frame in this stream
('frameID', c_uint64),
# Timestamp of the data transfer
('timestamp', c_uint64)]
class VimbaFeaturePersistSettings(NiceStructure):
_fields_ = [
('persistType', c_uint32),
('maxIterations', c_uint32),
('loggingLevel', c_uint32)]
_vimba_lib = dll_loader.LoadLibrary(vimbaC_path)
# ----- The below function signatures are defined in VimbaC.h -----
# callback for frame queue
vmb_frame_callback_func = CALLBACK_FUNCTYPE(None,
c_void_p,
POINTER(VmbFrame))
# Callback for Invalidation events
vmb_feature_invalidation_callback_fun = CALLBACK_FUNCTYPE(None,
c_void_p, # const VmbHandle_t handle
c_char_p, # const char* name
c_void_p) # void* pUserContext
vmb_version_query = _vimba_lib.VmbVersionQuery
vmb_version_query.restype = c_int32
vmb_version_query.argtypes = (POINTER(VmbVersionInfo),
c_uint32)
vmb_startup = _vimba_lib.VmbStartup
vmb_startup.restype = c_int32
vmb_shutdown = _vimba_lib.VmbShutdown
vmb_cameras_list = _vimba_lib.VmbCamerasList
vmb_cameras_list.restype = c_int32
vmb_cameras_list.argtypes = (POINTER(VmbCameraInfo),
c_uint32,
POINTER(c_uint32),
c_uint32)
vmb_camera_info_query = _vimba_lib.VmbCameraInfoQuery
vmb_camera_info_query.restype = c_int32
vmb_camera_info_query.argtypes = (c_char_p,
POINTER(VmbCameraInfo),
c_uint32)
vmb_camera_open = _vimba_lib.VmbCameraOpen
vmb_camera_open.restype = c_int32
vmb_camera_open.argtypes = (c_char_p,
c_uint32,
c_void_p)
vmb_camera_close = _vimba_lib.VmbCameraClose
vmb_camera_close.restype = c_int32
vmb_camera_close.argtypes = (c_void_p,)
vmb_features_list = _vimba_lib.VmbFeaturesList
vmb_features_list.restype = c_int32
vmb_features_list.argtypes = (c_void_p,
POINTER(VmbFeatureInfo),
c_uint32,
POINTER(c_uint32),
c_uint32)
vmb_feature_info_query = _vimba_lib.VmbFeatureInfoQuery
vmb_feature_info_query.restype = c_int32
vmb_feature_info_query.argtypes = (c_void_p,
c_char_p,
POINTER(VmbFeatureInfo),
c_uint32)
# todo VmbFeatureListAffected
# todo VmbFeatureListSelected
# todo VmbFeatureAccessQuery
vmb_feature_int_get = _vimba_lib.VmbFeatureIntGet
vmb_feature_int_get.restype = c_int32
vmb_feature_int_get.argtypes = (c_void_p,
c_char_p,
POINTER(c_int64))
vmb_feature_int_set = _vimba_lib.VmbFeatureIntSet
vmb_feature_int_set.restype = c_int32
vmb_feature_int_set.argtypes = (c_void_p,
c_char_p,
c_int64)
vmb_feature_int_range_query = _vimba_lib.VmbFeatureIntRangeQuery
vmb_feature_int_range_query.restype = c_int32
vmb_feature_int_range_query.argtypes = (c_void_p,
c_char_p,
POINTER(c_int64),
POINTER(c_int64))
# todo VmbFeatureIntIncrementQuery
vmb_feature_float_get = _vimba_lib.VmbFeatureFloatGet
vmb_feature_float_get.restype = c_int32
vmb_feature_float_get.argtypes = (c_void_p,
c_char_p,
POINTER(c_double))
vmb_feature_float_set = _vimba_lib.VmbFeatureFloatSet
vmb_feature_float_set.restype = c_int32
vmb_feature_float_set.argtypes = (c_void_p,
c_char_p,
c_double)
vmb_feature_float_range_query = _vimba_lib.VmbFeatureFloatRangeQuery
vmb_feature_float_range_query.restype = c_int32
vmb_feature_float_range_query.argtypes = (c_void_p,
c_char_p,
POINTER(c_double),
POINTER(c_double))
# todo VmbFeatureFloatIncrementQuery
vmb_feature_enum_get = _vimba_lib.VmbFeatureEnumGet
vmb_feature_enum_get.restype = c_int32
vmb_feature_enum_get.argtypes = (c_void_p,
c_char_p,
POINTER(c_char_p))
vmb_feature_enum_set = _vimba_lib.VmbFeatureEnumSet
vmb_feature_enum_set.restype = c_int32
vmb_feature_enum_set.argtypes = (c_void_p,
c_char_p,
c_char_p)
vmb_feature_enum_range_query = _vimba_lib.VmbFeatureEnumRangeQuery
vmb_feature_enum_range_query.restype = c_int32
vmb_feature_enum_range_query.argtypes = (c_void_p,
c_char_p,
POINTER(c_char_p),
c_uint32,
POINTER(c_uint32))
# todo VmbFeatureEnumIsAvailable
# todo VmbFeatureEnumAsInt
# todo VmbFeatureEnumAsString
# todo VmbFeatureEnumEntryGet
vmb_feature_string_get = _vimba_lib.VmbFeatureStringGet
vmb_feature_string_get.restype = c_int32
vmb_feature_string_get.argtypes = (c_void_p,
c_char_p,
c_char_p,
c_uint32,
POINTER(c_uint32))
vmb_feature_string_set = _vimba_lib.VmbFeatureStringSet
vmb_feature_string_set.restype = c_int32
vmb_feature_string_set.argtypes = (c_void_p,
c_char_p,
c_char_p)
# todo VmbFeatureStringMaxlengthQuery
vmb_feature_bool_get = _vimba_lib.VmbFeatureBoolGet
vmb_feature_bool_get.restype = c_int32
vmb_feature_bool_get.argtypes = (c_void_p,
c_char_p,
POINTER(c_bool))
vmb_feature_bool_set = _vimba_lib.VmbFeatureBoolSet
vmb_feature_bool_set.restype = c_int32
vmb_feature_bool_set.argtypes = (c_void_p,
c_char_p,
c_bool)
vmb_feature_command_run = _vimba_lib.VmbFeatureCommandRun
vmb_feature_command_run.restype = c_int32
vmb_feature_command_run.argtypes = (c_void_p,
c_char_p)
vmb_feature_command_is_done = _vimba_lib.VmbFeatureCommandIsDone
vmb_feature_command_is_done.restype = c_int32
vmb_feature_command_is_done.argtypes = (c_void_p,
c_char_p,
POINTER(c_bool))
# todo VmbFeatureRawGet
# todo VmbFeatureRawSet
# todo VmbFeatureRawLengthQuery
vmb_feature_invalidation_register = _vimba_lib.VmbFeatureInvalidationRegister
vmb_feature_invalidation_register.restype = c_int32
vmb_feature_invalidation_register.argtypes = (c_void_p,
c_char_p,
vmb_feature_invalidation_callback_fun,
c_void_p)
vmb_feature_invalidation_unregister = _vimba_lib.VmbFeatureInvalidationUnregister
vmb_feature_invalidation_unregister.restype = c_int32
vmb_feature_invalidation_unregister.argtypes = (c_void_p,
c_char_p,
vmb_feature_invalidation_callback_fun)
vmb_frame_announce = _vimba_lib.VmbFrameAnnounce
vmb_frame_announce.restype = c_int32
vmb_frame_announce.argtypes = (c_void_p,
POINTER(VmbFrame),
c_uint32)
vmb_frame_revoke = _vimba_lib.VmbFrameRevoke
vmb_frame_revoke.restype = c_int32
vmb_frame_revoke.argtypes = (c_void_p,
POINTER(VmbFrame))
vmb_frame_revoke_all = _vimba_lib.VmbFrameRevokeAll
vmb_frame_revoke_all.restype = c_int32
vmb_frame_revoke_all.argtypes = (c_void_p,)
vmb_capture_start = _vimba_lib.VmbCaptureStart
vmb_capture_start.restype = c_int32
vmb_capture_start.argtypes = (c_void_p,)
vmb_capture_end = _vimba_lib.VmbCaptureEnd
vmb_capture_end.restype = c_int32
vmb_capture_end.argtypes = (c_void_p,)
vmb_capture_frame_queue = _vimba_lib.VmbCaptureFrameQueue
vmb_capture_frame_queue.restype = c_int32
vmb_capture_frame_queue.argtypes = (c_void_p,
POINTER(VmbFrame),
c_void_p)
vmb_capture_frame_wait = _vimba_lib.VmbCaptureFrameWait
vmb_capture_frame_wait.restype = c_int32
vmb_capture_frame_wait.argtypes = (c_void_p,
POINTER(VmbFrame),
c_uint32)
vmb_capture_queue_flush = _vimba_lib.VmbCaptureQueueFlush
vmb_capture_queue_flush.restype = c_int32
vmb_capture_queue_flush.argtypes = (c_void_p,)
vmb_interfaces_list = _vimba_lib.VmbInterfacesList
vmb_interfaces_list.restype = c_int32
vmb_interfaces_list.argtypes = (POINTER(VmbInterfaceInfo),
c_uint32,
POINTER(c_uint32),
c_uint32)
vmb_interface_open = _vimba_lib.VmbInterfaceOpen
vmb_interface_open.restype = c_int32
vmb_interface_open.argtypes = (c_char_p,
c_void_p)
vmb_interface_close = _vimba_lib.VmbInterfaceClose
vmb_interface_close.restype = c_int32
vmb_interface_close.argtypes = (c_void_p,)
vmb_ancillary_data_open = _vimba_lib.VmbAncillaryDataOpen
vmb_interface_close.restype = c_int32
vmb_interface_close.argtypes = (POINTER(VmbFrame), POINTER(c_void_p))
vmb_ancillary_data_close = _vimba_lib.VmbAncillaryDataClose
vmb_interface_close.restype = c_int32
vmb_interface_close.argtypes = (c_void_p, )
# todo VmbMemoryRead
# todo VmbMemoryWrite
vmb_registers_read = _vimba_lib.VmbRegistersRead
vmb_registers_read.restype = c_int32
vmb_registers_read.argtypes = (c_void_p,
c_uint32,
POINTER(c_uint64),
POINTER(c_uint64),
POINTER(c_uint32))
vmb_registers_write = _vimba_lib.VmbRegistersWrite
vmb_registers_write.restype = c_int32
vmb_registers_write.argtypes = (c_void_p,
c_uint32,
POINTER(c_uint64),
POINTER(c_uint64),
POINTER(c_uint32))
vmb_camera_settings_load = _vimba_lib.VmbCameraSettingsLoad
vmb_camera_settings_load.restype = c_int32
vmb_camera_settings_load.argtypes = (c_void_p,
c_char_p,
POINTER(VimbaFeaturePersistSettings),
c_uint32)
vmb_camera_settings_save = _vimba_lib.VmbCameraSettingsSave
vmb_camera_settings_save.restype = c_int32
vmb_camera_settings_save.argtypes = (c_void_p,
c_char_p,
POINTER(VimbaFeaturePersistSettings),
c_uint32)
|
morefigs/pymba
|
pymba/vimba_c.py
|
Python
|
mit
| 17,849 | 0.001345 |
"""Contains tests for oweb.views.updates.item_update"""
# Python imports
from unittest import skip
# Django imports
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.contrib.auth.models import User
# app imports
from oweb.tests import OWebViewTests
from oweb.models.account import Account
from oweb.models.research import Research
from oweb.models.ship import Ship
from oweb.models.planet import Planet, Moon
from oweb.models.building import Building
from oweb.models.defense import Defense
@override_settings(AUTH_USER_MODEL='auth.User')
class OWebViewsItemUpdateTests(OWebViewTests):
def test_login_required(self):
"""Unauthenticated users should be redirected to oweb:app_login"""
r = self.client.get(reverse('oweb:item_update'))
self.assertRedirects(r,
reverse('oweb:app_login'),
status_code=302,
target_status_code=200)
def test_account_owner(self):
"""Can somebody update an item he doesn't posess?"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
res_pre = Research.objects.filter(account=acc).first()
self.client.login(username='test02', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'research',
'item_id': res_pre.id,
'item_level': res_pre.level + 1 },
HTTP_REFERER=reverse('oweb:account_research',
args=[acc.id]))
self.assertEqual(r.status_code, 403)
self.assertTemplateUsed(r, 'oweb/403.html')
def test_no_post(self):
"""What if no POST data is supplied?"""
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'))
self.assertEqual(r.status_code, 500)
self.assertTemplateUsed(r, 'oweb/500.html')
def test_research_update(self):
"""Does ``item_update()`` correctly update researches?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
res_pre = Research.objects.filter(account=acc).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'research',
'item_id': res_pre.id,
'item_level': res_pre.level + 1 },
HTTP_REFERER=reverse('oweb:account_research',
args=[acc.id]))
self.assertRedirects(r,
reverse('oweb:account_research', args=[acc.id]),
status_code=302,
target_status_code=200)
res_post = Research.objects.get(pk=res_pre.pk)
self.assertEqual(res_pre.level + 1, res_post.level)
def test_ship_update(self):
"""Does ``item_update()`` correctly update ships?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
ship_pre = Ship.objects.filter(account=acc).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'ship',
'item_id': ship_pre.id,
'item_level': ship_pre.count + 1338 },
HTTP_REFERER=reverse('oweb:account_ships',
args=[acc.id]))
self.assertRedirects(r,
reverse('oweb:account_ships', args=[acc.id]),
status_code=302,
target_status_code=200)
ship_post = Ship.objects.get(pk=ship_pre.pk)
self.assertEqual(ship_pre.count + 1338, ship_post.count)
def test_building_update(self):
"""Does ``item_update()`` correctly update buildings?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).first()
b_pre = Building.objects.filter(astro_object=p).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'building',
'item_id': b_pre.id,
'item_level': b_pre.level - 1 },
HTTP_REFERER=reverse('oweb:planet_buildings',
args=[p.id]))
self.assertRedirects(r,
reverse('oweb:planet_buildings', args=[p.id]),
status_code=302,
target_status_code=200)
b_post = Building.objects.get(pk=b_pre.pk)
self.assertEqual(b_pre.level - 1, b_post.level)
def test_moon_building_update(self):
"""Does ``item_update()`` correctly update moon buildings?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).values_list('id', flat=True)
m = Moon.objects.filter(planet__in=p).first()
b_pre = Building.objects.filter(astro_object=m).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'moon_building',
'item_id': b_pre.id,
'item_level': b_pre.level + 2 },
HTTP_REFERER=reverse('oweb:moon_buildings',
args=[m.id]))
self.assertRedirects(r,
reverse('oweb:moon_buildings', args=[m.id]),
status_code=302,
target_status_code=200)
b_post = Building.objects.get(pk=b_pre.pk)
self.assertEqual(b_pre.level + 2, b_post.level)
def test_defense_update(self):
"""Does ``item_update()`` correctly update defense devices?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).first()
d_pre = Defense.objects.filter(astro_object=p).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'defense',
'item_id': d_pre.id,
'item_level': d_pre.count - 1 },
HTTP_REFERER=reverse('oweb:planet_defense',
args=[p.id]))
self.assertRedirects(r,
reverse('oweb:planet_defense', args=[p.id]),
status_code=302,
target_status_code=200)
d_post = Defense.objects.get(pk=d_pre.pk)
self.assertEqual(d_pre.count - 1, d_post.count)
def test_moon_defense_update(self):
"""Does ``item_update()`` correctly update moon defense devices?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).values_list('id', flat=True)
m = Moon.objects.filter(planet__in=p).first()
d_pre = Defense.objects.filter(astro_object=m).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'moon_defense',
'item_id': d_pre.id,
'item_level': d_pre.count - 10000 },
HTTP_REFERER=reverse('oweb:moon_defense',
args=[m.id]))
self.assertRedirects(r,
reverse('oweb:moon_defense', args=[m.id]),
status_code=302,
target_status_code=200)
d_post = Defense.objects.get(pk=d_pre.pk)
self.assertEqual(0, d_post.count)
def test_unknown_item_type(self):
"""Does ``item_update()`` correctly handle unknown item_types?"""
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={
'item_type': 'foobar',
'item_id': 1,
'item_level': 1
})
self.assertEqual(r.status_code, 500)
self.assertTemplateUsed(r, 'oweb/500.html')
|
Mischback/django-oweb
|
oweb/tests/views/item_update.py
|
Python
|
mit
| 10,246 | 0.001952 |
"""Emoji config functions"""
import json
import os
import re
from logging import getLogger
from card_py_bot import BASEDIR
__log__ = getLogger(__name__)
# Path where the emoji_config.json will be stored
EMOJI_CONFIG_PATH = os.path.join(BASEDIR, "emoji_config.json")
# Dictionary that is keyed by the Discord short emoji id and the web id
# of each emoji
MANA_ID_DICT = {
":15m:": "15",
":13m:": "13",
":wbm:": "White or Black",
":Energy:": "Energy",
":10m:": "10",
":7m:": "7",
":Untap:": "Untap",
":brm:": "Black or Red",
":bpm:": "Phyrexian Black",
":rgm:": "Red or Green",
":9m:": "9",
":8m:": "8",
":1m:": "1",
":gum:": "Green or Blue",
":2wm:": "Two or White",
":wpm:": "Phyrexian White",
":4m:": "4",
":12m:": "12",
":rm:": "Red",
":bm:": "Black",
":wum:": "White or Blue",
":rwm:": "Red or White",
":2bm:": "Two or Blue",
":gpm:": "Phyrexian Green",
":gm:": "Green",
":14m:": "14",
":bgm:": "Black or Green",
":3m:": "3",
":5m:": "5",
":Tap:": "Tap",
":1000000m:": "1000000",
":upm:": "Phyrexian Blue",
":2gm:": "Two or Green",
":rpm:": "Phyrexian Red",
":2m:": "2", ":6m:": "6",
":2rm:": "Two or Red",
":gwm:": "Green or White",
":wm:": "White",
":um:": "Blue",
":16m:": "16",
":urm:": "Blue or Red",
":ubm:": "Blue or Black",
":11m:": "11"
}
def get_emoji_config_string() -> str:
"""Return a string of all the mana ids (in order) for config setup
in discord"""
config_string = "?save_setup\n"
for short_emoji_id in MANA_ID_DICT:
config_string += ("\\\\{}\n".format(short_emoji_id))
return config_string
EMOJI_CONFIG_STRING = get_emoji_config_string()
def create_config_json() -> dict:
"""Create and save a blank default config json also return the dict that
created the json"""
emoji_config = dict()
for short_emoji_id in MANA_ID_DICT:
emoji_config[short_emoji_id] = {
"web_id": MANA_ID_DICT[short_emoji_id],
"discord_raw_id": None
}
with open(EMOJI_CONFIG_PATH, "w") as file:
json.dump(emoji_config, file, indent=4)
return emoji_config
def load_mana_dict() -> dict:
"""Load the emoji config into a mana dict"""
try:
with open(EMOJI_CONFIG_PATH, "r") as file:
emoji_config = json.load(file)
except FileNotFoundError:
emoji_config = create_config_json()
mana_dict = dict()
for short_emoji_id in emoji_config:
emoji = emoji_config[short_emoji_id]
if not emoji["discord_raw_id"]:
mana_dict[emoji["web_id"]] = "ERROR: NO ID Configured " \
"for {}".format(emoji["web_id"])
else:
mana_dict[emoji["web_id"]] = emoji["discord_raw_id"]
__log__.debug("WOTC Magic mana to Discord emoji "
"dictionary constructed: {}".format(mana_dict))
return mana_dict
MANA_DICT = load_mana_dict()
def parse_raw_emoji_id(raw_emoji_id: str) -> str:
"""Parse a raw emoji id to short emoji id"""
m = re.search(":[A-Za-z0-9]*:", raw_emoji_id)
return m.group(0)
def save_emoji_config(raw_emoji_ids):
"""Save the emoji mana config"""
try:
with open(EMOJI_CONFIG_PATH, "r") as file:
emoji_config = json.load(file)
except FileNotFoundError:
# if no mana config file is found initialize a new one
emoji_config = create_config_json()
for raw_emoji_id in raw_emoji_ids:
short_emoji_id = parse_raw_emoji_id(raw_emoji_id)
if short_emoji_id in MANA_ID_DICT:
emoji_config[short_emoji_id] = {
"web_id": MANA_ID_DICT[short_emoji_id],
"discord_raw_id": raw_emoji_id
}
else:
raise KeyError("Short Discord emoji id is unknown: "
"{}".format(short_emoji_id))
with open(EMOJI_CONFIG_PATH, "w") as file:
json.dump(emoji_config, file, indent=4)
# update MANA_DICT global
global MANA_DICT
MANA_DICT = load_mana_dict()
|
nklapste/card-py-bot
|
card_py_bot/config.py
|
Python
|
mit
| 4,162 | 0.00024 |
from __future__ import unicode_literals
import json
import requests
import six
from datetime import datetime
from six.moves.urllib.parse import parse_qs
from xml.etree.ElementTree import Element, SubElement, tostring
from xml.parsers.expat import ExpatError
from .auth import OAuth2Credentials
from .exceptions import (
XeroBadRequest,
XeroExceptionUnknown,
XeroForbidden,
XeroInternalError,
XeroNotAvailable,
XeroNotFound,
XeroNotImplemented,
XeroRateLimitExceeded,
XeroTenantIdNotSet,
XeroUnauthorized,
)
from .utils import isplural, json_load_object_hook, singular
class BaseManager(object):
DECORATED_METHODS = (
"get",
"save",
"filter",
"all",
"put",
"delete",
"get_history",
"put_history",
"get_attachments",
"get_attachment_data",
"put_attachment_data",
)
OBJECT_DECORATED_METHODS = {
"Invoices": ["email", "online_invoice"],
}
DATETIME_FIELDS = (
"UpdatedDateUTC",
"Updated",
"FullyPaidOnDate",
"DateTimeUTC",
"CreatedDateUTC",
"JournalDate",
)
DATE_FIELDS = (
"DueDate",
"Date",
"PaymentDate",
"StartDate",
"EndDate",
"PeriodLockDate",
"DateOfBirth",
"OpeningBalanceDate",
"PaymentDueDate",
"ReportingDate",
"DeliveryDate",
"ExpectedArrivalDate",
)
BOOLEAN_FIELDS = (
"IsSupplier",
"IsCustomer",
"IsDemoCompany",
"PaysTax",
"IsAuthorisedToApproveTimesheets",
"IsAuthorisedToApproveLeave",
"HasHELPDebt",
"AustralianResidentForTaxPurposes",
"TaxFreeThresholdClaimed",
"HasSFSSDebt",
"EligibleToReceiveLeaveLoading",
"IsExemptFromTax",
"IsExemptFromSuper",
"SentToContact",
"IsSubscriber",
"HasAttachments",
"ShowOnCashBasisReports",
"IncludeInEmails",
"SentToContact",
"CanApplyToRevenue",
"CanApplyToLiabilities",
"CanApplyToExpenses",
"CanApplyToEquity",
"CanApplyToAssets",
"IsReconciled",
"EnablePaymentsToAccount",
"ShowInExpenseClaims",
"DiscountEnteredAsPercent",
"IsPurchased",
"IsSold",
"IsTrackedAsInventory",
)
DECIMAL_FIELDS = (
"Hours",
"NumberOfUnit",
)
INTEGER_FIELDS = (
"FinancialYearEndDay",
"FinancialYearEndMonth",
)
NO_SEND_FIELDS = (
"UpdatedDateUTC",
"HasValidationErrors",
"IsDiscounted",
"DateString",
"HasErrors",
"DueDateString",
"HasAccount",
)
OPERATOR_MAPPINGS = {
"gt": ">",
"lt": "<",
"lte": "<=",
"gte": ">=",
"ne": "!=",
}
def __init__(self):
pass
def dict_to_xml(self, root_elm, data):
for key in data.keys():
# Xero will complain if we send back these fields.
if key in self.NO_SEND_FIELDS:
continue
sub_data = data[key]
elm = SubElement(root_elm, key)
# Key references a dict. Unroll the dict
# as it's own XML node with subnodes
if isinstance(sub_data, dict):
self.dict_to_xml(elm, sub_data)
# Key references a list/tuple
elif isinstance(sub_data, list) or isinstance(sub_data, tuple):
# key name is a plural. This means each item
# in the list needs to be wrapped in an XML
# node that is a singular version of the list name.
if isplural(key):
for d in sub_data:
self.dict_to_xml(SubElement(elm, singular(key)), d)
# key name isn't a plural. Just insert the content
# as an XML node with subnodes
else:
for d in sub_data:
self.dict_to_xml(elm, d)
# Normal element - just insert the data.
else:
if key in self.BOOLEAN_FIELDS:
val = "true" if sub_data else "false"
elif key in self.DATE_FIELDS:
val = sub_data.strftime("%Y-%m-%dT%H:%M:%S")
else:
val = six.text_type(sub_data)
elm.text = val
return root_elm
def _prepare_data_for_save(self, data):
if isinstance(data, list) or isinstance(data, tuple):
root_elm = Element(self.name)
for d in data:
sub_elm = SubElement(root_elm, self.singular)
self.dict_to_xml(sub_elm, d)
else:
root_elm = self.dict_to_xml(Element(self.singular), data)
# In python3 this seems to return a bytestring
return six.u(tostring(root_elm))
def _parse_api_response(self, response, resource_name):
data = json.loads(response.text, object_hook=json_load_object_hook)
assert data["Status"] == "OK", (
"Expected the API to say OK but received %s" % data["Status"]
)
try:
return data[resource_name]
except KeyError:
return data
def _get_data(self, func):
""" This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
"""
def wrapper(*args, **kwargs):
timeout = kwargs.pop("timeout", None)
uri, params, method, body, headers, singleobject = func(*args, **kwargs)
if headers is None:
headers = {}
headers["Content-Type"] = "application/xml"
if isinstance(self.credentials, OAuth2Credentials):
if self.credentials.tenant_id:
headers["Xero-tenant-id"] = self.credentials.tenant_id
else:
raise XeroTenantIdNotSet
# Use the JSON API by default, but remember we might request a PDF (application/pdf)
# so don't force the Accept header.
if "Accept" not in headers:
headers["Accept"] = "application/json"
# Set a user-agent so Xero knows the traffic is coming from pyxero
# or individual user/partner
headers["User-Agent"] = self.user_agent
response = getattr(requests, method)(
uri,
data=body,
headers=headers,
auth=self.credentials.oauth,
params=params,
timeout=timeout,
)
if response.status_code == 200:
# If we haven't got XML or JSON, assume we're being returned a
# binary file
if not response.headers["content-type"].startswith("application/json"):
return response.content
return self._parse_api_response(response, self.name)
elif response.status_code == 204:
return response.content
elif response.status_code == 400:
try:
raise XeroBadRequest(response)
except (ValueError, ExpatError):
raise XeroExceptionUnknown(
response, msg="Unable to parse Xero API response"
)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 429:
limit_reason = response.headers.get("X-Rate-Limit-Problem") or "unknown"
payload = {"oauth_problem": ["rate limit exceeded: " + limit_reason],
"oauth_problem_advice": ["please wait before retrying the xero api, "
"the limit exceeded is: " + limit_reason]}
raise XeroRateLimitExceeded(response, payload)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
return wrapper
def _get(self, id, headers=None, params=None):
uri = "/".join([self.base_url, self.name, id])
uri_params = self.extra_params.copy()
uri_params.update(params if params else {})
return uri, uri_params, "get", None, headers, True
def _get_history(self, id):
uri = "/".join([self.base_url, self.name, id, "history"]) + "/"
return uri, {}, "get", None, None, False
def _get_attachments(self, id):
"""Retrieve a list of attachments associated with this Xero object."""
uri = "/".join([self.base_url, self.name, id, "Attachments"]) + "/"
return uri, {}, "get", None, None, False
def _get_attachment_data(self, id, filename):
"""
Retrieve the contents of a specific attachment (identified by filename).
"""
uri = "/".join([self.base_url, self.name, id, "Attachments", filename])
return uri, {}, "get", None, None, False
def get_attachment(self, id, filename, file):
"""
Retrieve the contents of a specific attachment (identified by filename).
Writes data to file object, returns length of data written.
"""
data = self.get_attachment_data(id, filename)
file.write(data)
return len(data)
def _email(self, id):
uri = "/".join([self.base_url, self.name, id, "Email"])
return uri, {}, "post", None, None, True
def _online_invoice(self, id):
uri = "/".join([self.base_url, self.name, id, "OnlineInvoice"])
return uri, {}, "get", None, None, True
def save_or_put(self, data, method="post", headers=None, summarize_errors=True):
uri = "/".join([self.base_url, self.name])
body = self._prepare_data_for_save(data)
params = self.extra_params.copy()
if not summarize_errors:
params["summarizeErrors"] = "false"
return uri, params, method, body, headers, False
def _save(self, data):
return self.save_or_put(data, method="post")
def _put(self, data, summarize_errors=True):
return self.save_or_put(data, method="put", summarize_errors=summarize_errors)
def _delete(self, id):
uri = "/".join([self.base_url, self.name, id])
return uri, {}, "delete", None, None, False
def _put_history_data(self, id, details):
"""Add a history note to the Xero object."""
uri = "/".join([self.base_url, self.name, id, "history"])
details_data = {"Details": details}
root_elm = Element("HistoryRecord")
self.dict_to_xml(root_elm, details_data)
data = six.u(tostring(root_elm))
return uri, {}, "put", data, None, False
def _put_history(self, id, details):
"""Upload a history note to the Xero object."""
return self._put_history_data(id, details)
def _put_attachment_data(
self, id, filename, data, content_type, include_online=False
):
"""Upload an attachment to the Xero object."""
uri = "/".join([self.base_url, self.name, id, "Attachments", filename])
params = {"IncludeOnline": "true"} if include_online else {}
headers = {"Content-Type": content_type, "Content-Length": str(len(data))}
return uri, params, "put", data, headers, False
def put_attachment(self, id, filename, file, content_type, include_online=False):
"""Upload an attachment to the Xero object (from file object)."""
return self.put_attachment_data(
id, filename, file.read(), content_type, include_online=include_online
)
def prepare_filtering_date(self, val):
if isinstance(val, datetime):
val = val.strftime("%a, %d %b %Y %H:%M:%S GMT")
else:
val = '"%s"' % val
return {"If-Modified-Since": val}
def _filter(self, **kwargs):
params = self.extra_params.copy()
headers = None
uri = "/".join([self.base_url, self.name])
if kwargs:
if "since" in kwargs:
val = kwargs["since"]
headers = self.prepare_filtering_date(val)
del kwargs["since"]
# Accept IDs parameter for Invoices and Contacts endpoints
if "IDs" in kwargs:
params["IDs"] = ",".join(kwargs["IDs"])
del kwargs["IDs"]
def get_filter_params(key, value):
last_key = key.split("_")[-1]
if last_key.endswith("ID"):
return 'Guid("%s")' % six.text_type(value)
if key in self.BOOLEAN_FIELDS:
return "true" if value else "false"
elif key in self.DATE_FIELDS:
return "DateTime(%s,%s,%s)" % (value.year, value.month, value.day)
elif key in self.DATETIME_FIELDS:
return value.isoformat()
else:
return '"%s"' % six.text_type(value)
def generate_param(key, value):
parts = key.split("__")
field = key.replace("_", ".")
fmt = "%s==%s"
if len(parts) == 2:
# support filters:
# Name__Contains=John becomes Name.Contains("John")
if parts[1] in ["contains", "startswith", "endswith"]:
field = parts[0]
fmt = "".join(["%s.", parts[1], "(%s)"])
elif parts[1] in ["tolower", "toupper"]:
field = parts[0]
fmt = "".join(["%s.", parts[1], "()==%s"])
elif parts[1] in self.OPERATOR_MAPPINGS:
field = parts[0]
key = field
fmt = "%s" + self.OPERATOR_MAPPINGS[parts[1]] + "%s"
elif parts[1] in ["isnull"]:
sign = "=" if value else "!"
return "%s%s=null" % (parts[0], sign)
field = field.replace("_", ".")
return fmt % (field, get_filter_params(key, value))
# Move any known parameter names to the query string
KNOWN_PARAMETERS = ["order", "offset", "page", "includeArchived"]
for param in KNOWN_PARAMETERS:
if param in kwargs:
params[param] = kwargs.pop(param)
filter_params = []
if "raw" in kwargs:
raw = kwargs.pop("raw")
filter_params.append(raw)
# Treat any remaining arguments as filter predicates
# Xero will break if you search without a check for null in the first position:
# http://developer.xero.com/documentation/getting-started/http-requests-and-responses/#title3
sortedkwargs = sorted(
six.iteritems(kwargs), key=lambda item: -1 if "isnull" in item[0] else 0
)
for key, value in sortedkwargs:
filter_params.append(generate_param(key, value))
if filter_params:
params["where"] = "&&".join(filter_params)
return uri, params, "get", None, headers, False
def _all(self):
uri = "/".join([self.base_url, self.name])
return uri, {}, "get", None, None, False
|
freakboy3742/pyxero
|
xero/basemanager.py
|
Python
|
bsd-3-clause
| 16,413 | 0.001036 |
def pbj_while(slices):
output = ''
while (slices > 0):
slices = slices - 2
if slices >= 2:
output += 'I am making a sandwich! I have bread for {0} more sandwiches.\n'.format(slices / 2)
elif slices < 2:
output += 'I am making a sandwich! But, this is my last sandwich.'
return output
print pbj_while(int(raw_input('How many slices of bread do you have? ')))
|
hannahkwarren/CLaG-Sp2016
|
code-exercises-etc/section_xx_-misc/4-2.py
|
Python
|
mit
| 419 | 0.004773 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase
# Distributed under the terms of the GNU General Public License v2
# Author: Troy Melhase <troy@gci.net>
import sys
from PyQt4.QtCore import QVariant
from PyQt4.QtGui import (QApplication, QFrame, QIcon,
QStandardItem, QStandardItemModel)
from profit.lib import BasicHandler
from profit.lib import Signals, tickerIdRole
from profit.lib.widgets.ui_breadfan_train import Ui_BreadFanTrainTree
class SessionTreeItem(QStandardItem):
""" Session tree item.
"""
iconNameMap = {
'account':'identity',
'connection':'server',
'messages':'view_text',
'orders':'klipper_dock',
'portfolio':'bookcase',
'strategy':'services',
'tickers':'view_detailed',
}
def __init__(self, text):
""" Constructor.
@param text value for item display
"""
QStandardItem.__init__(self, text)
self.setEditable(False)
self.setIcon(self.lookupIcon(text))
hint = self.sizeHint()
hint.setHeight(20)
self.setSizeHint(hint)
def lookupIcon(self, key):
""" Locates icon for given key.
@param key item text
@return QIcon instance
"""
try:
name = self.iconNameMap[key]
icon = QIcon(':images/icons/%s.png' % name)
except (KeyError, ):
style = QApplication.style()
icon = style.standardIcon(style.SP_DirIcon)
return icon
class SessionTreeTickerItem(SessionTreeItem):
""" Specalized session tree item for ticker symbols.
"""
def lookupIcon(self, key):
""" Locates icon for given key.
@param key ticker symbol
@return QIcon instance
"""
return QIcon(':images/tickers/%s.png' % key.lower())
def setTickerId(self, tickerId):
""" Sets item data for ticker id.
@param tickerId id for ticker as integer
@return None
"""
self.setData(QVariant(tickerId), tickerIdRole)
class SessionTreeModel(QStandardItemModel):
def __init__(self, session, parent=None):
""" Constructor.
@param session Session instance
@param parent ancestor object
"""
QStandardItemModel.__init__(self)
self.session = session
root = self.invisibleRootItem()
for key, values in session.items():
item = SessionTreeItem(key)
root.appendRow(item)
for value in values:
if key == 'tickers':
subitem = SessionTreeTickerItem(value)
subitem.setTickerId(values[value])
else:
subitem = SessionTreeItem(value)
item.appendRow(subitem)
class BreadFanTrainTree(QFrame, Ui_BreadFanTrainTree):
""" Tree view of a Session object.
"""
def __init__(self, parent=None):
""" Constructor.
@param parent ancestor of this widget
"""
QFrame.__init__(self, parent)
self.setupUi(self)
connect = self.connect
def setupBasic(self, sender, newSignal):
self.connect(sender, newSignal, self.on_newBreadNet)
def on_newBreadNet(self, net):
print '### signaled network', net
## fill and trainAlgorithm combo
algorNames = [name for name in net.train_meta]
algorCombo = self.trainAlgorithm
algorCombo.clear()
algorCombo.addItems(algorNames)
if net.trained in algorNames:
algorCombo.setCurrentIndex(algorNames.index(net.trained))
## fill params
## don't touch fill training data sources
## set training progress bar value
|
InfiniteAlpha/profitpy
|
profit/neuralnetdesigner/train_test.py
|
Python
|
gpl-2.0
| 3,757 | 0.003194 |
"""
Tests of neo.io.igorproio
"""
import unittest
try:
import igor
HAVE_IGOR = True
except ImportError:
HAVE_IGOR = False
from neo.io.igorproio import IgorIO
from neo.test.iotest.common_io_test import BaseTestIO
@unittest.skipUnless(HAVE_IGOR, "requires igor")
class TestIgorIO(BaseTestIO, unittest.TestCase):
ioclass = IgorIO
entities_to_download = [
'igor'
]
entities_to_test = [
'igor/mac-version2.ibw',
'igor/win-version2.ibw'
]
if __name__ == "__main__":
unittest.main()
|
samuelgarcia/python-neo
|
neo/test/iotest/test_igorio.py
|
Python
|
bsd-3-clause
| 543 | 0 |
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# @author Kamin Whitehouse
#
import sys, math, Queue
import pytos.tools.Drain as Drain
import pytos.Comm as Comm
from struct import *
class Straw( object ) :
def __init__( self , app ) :
self.app=app
self.linkLatency = .1
if "StrawM" not in app._moduleNames:
raise Exception("The StrawM module is not compiled into the application.")
def read(self, nodeID, strawID, start, size):
data=[] #store the data in here
response=None
while response==None:
print "pinging node %d" % nodeID
response = self.app.StrawM.msgDataSize.peek(address=nodeID, timeout=3) #find num bytes/msg
dataSize = response[0].value['value'].value
numHops = self.app.enums.DRAIN_MAX_TTL - response[0].getParentMsg(self.app.enums.AM_DRAINMSG).ttl
self.app.StrawM.sendPeriod.poke(self.linkLatency * numHops * 1000, address=nodeID, responseDesired=False)
msgs = [0 for i in range(int(math.ceil(size/float(dataSize))))] #keep track of straw msgs in here
msgQueue = Comm.MessageQueue(10)
Drain.getDrainObject(self.app)[0].register(self.app.msgs.StrawMsg, msgQueue)
print "Sucking %d bytes from node %d through Straw %d:" % (size, nodeID, strawID)
while msgs.count(1) < len(msgs):
subStart = msgs.index(0) * dataSize
try:
subSize = min(size, (msgs.index(1, subStart)*dataSize - subStart) )
except:
subSize = size - subStart
response = []
#while response == []:
self.app.StrawM.read(strawID, subStart, subSize, address=nodeID)
sys.stdout.write("%d-%d: " % (subStart, subStart+subSize))
numPrintedChars=0
while True :
try:
(addr, msg) = msgQueue.get(block=True, timeout=self.linkLatency * numHops * 4)
if msg.parentMsg.source == nodeID :#and msgs[msg.startIndex//dataSize] == 0:
msgs[msg.startIndex//dataSize] = 1
data[msg.startIndex:msg.startIndex+dataSize-1] = msg.data[:]
strg = ""
for i in range(numPrintedChars) :
strg += "\b"
strg += "%s/%s" % (msgs.count(1),len(msgs))
sys.stdout.write(strg)
sys.stdout.flush()
numPrintedChars = len(strg)-numPrintedChars
except Queue.Empty:
print ""
break
#now, pack the data so that it can be easily unpacked
for i in range(len(data)):
data[i] = pack('B',data[i])
return ''.join(data[0:size])
|
ekiwi/tinyos-1.x
|
contrib/ucb/tools/python/pytos/tools/Straw.py
|
Python
|
bsd-3-clause
| 3,841 | 0.012757 |
from webhelpers import *
from datetime import datetime
def time_ago( x ):
return date.distance_of_time_in_words( x, datetime.utcnow() )
def iff( a, b, c ):
if a:
return b
else:
return c
|
dbcls/dbcls-galaxy
|
lib/galaxy/web/framework/helpers/__init__.py
|
Python
|
mit
| 220 | 0.045455 |
# coding: utf8
Paises=(
(4, 'AF', 'AFG', 93, 'Afganistán', 'Asia', '', 'AFN', 'Afgani afgano'),
(8, 'AL', 'ALB', 355, 'Albania', 'Europa', '', 'ALL', 'Lek albanés'),
(10, 'AQ', 'ATA', 672, 'Antártida', 'Antártida', '', '', ''),
(12, 'DZ', 'DZA', 213, 'Argelia', 'África', '', 'DZD', 'Dinar algerino'),
(16, 'AS', 'ASM', 1684, 'Samoa Americana', 'Oceanía', '', '', ''),
(20, 'AD', 'AND', 376, 'Andorra', 'Europa', '', 'EUR', 'Euro'),
(24, 'AO', 'AGO', 244, 'Angola', 'África', '', 'AOA', 'Kwanza angoleño'),
(28, 'AG', 'ATG', 1268, 'Antigua y Barbuda', 'América', 'El Caribe', '', ''),
(31, 'AZ', 'AZE', 994, 'Azerbaiyán', 'Asia', '', 'AZM', 'Manat azerbaiyano'),
(32, 'AR', 'ARG', 54, 'Argentina', 'América', 'América del Sur', 'ARS', 'Peso argentino'),
(36, 'AU', 'AUS', 61, 'Australia', 'Oceanía', '', 'AUD', 'Dólar australiano'),
(40, 'AT', 'AUT', 43, 'Austria', 'Europa', '', 'EUR', 'Euro'),
(44, 'BS', 'BHS', 1242, 'Bahamas', 'América', 'El Caribe', 'BSD', 'Dólar bahameño'),
(48, 'BH', 'BHR', 973, 'Bahréin', 'Asia', '', 'BHD', 'Dinar bahreiní'),
(50, 'BD', 'BGD', 880, 'Bangladesh', 'Asia', '', 'BDT', 'Taka de Bangladesh'),
(51, 'AM', 'ARM', 374, 'Armenia', 'Asia', '', 'AMD', 'Dram armenio'),
(52, 'BB', 'BRB', 1246, 'Barbados', 'América', 'El Caribe', 'BBD', 'Dólar de Barbados'),
(56, 'BE', 'BEL', 32, 'Bélgica', 'Europa', '', 'EUR', 'Euro'),
(60, 'BM', 'BMU', 1441, 'Bermudas', 'América', 'El Caribe', 'BMD', 'Dólar de Bermuda'),
(64, 'BT', 'BTN', 975, 'Bhután', 'Asia', '', 'BTN', 'Ngultrum de Bután'),
(68, 'BO', 'BOL', 591, 'Bolivia', 'América', 'América del Sur', 'BOB', 'Boliviano'),
(70, 'BA', 'BIH', 387, 'Bosnia y Herzegovina', 'Europa', '', 'BAM', 'Marco convertible de Bosnia-Herzegovina'),
(72, 'BW', 'BWA', 267, 'Botsuana', 'África', '', 'BWP', 'Pula de Botsuana'),
(74, 'BV', 'BVT', 0, 'Isla Bouvet', '', '', '', ''),
(76, 'BR', 'BRA', 55, 'Brasil', 'América', 'América del Sur', 'BRL', 'Real brasileño'),
(84, 'BZ', 'BLZ', 501, 'Belice', 'América', 'América Central', 'BZD', 'Dólar de Belice'),
(86, 'IO', 'IOT', 0, 'Territorio Británico del Océano Índico', '', '', '', ''),
(90, 'SB', 'SLB', 677, 'Islas Salomón', 'Oceanía', '', 'SBD', 'Dólar de las Islas Salomón'),
(92, 'VG', 'VGB', 1284, 'Islas Vírgenes Británicas', 'América', 'El Caribe', '', ''),
(96, 'BN', 'BRN', 673, 'Brunéi', 'Asia', '', 'BND', 'Dólar de Brunéi'),
(100, 'BG', 'BGR', 359, 'Bulgaria', 'Europa', '', 'BGN', 'Lev búlgaro'),
(104, 'MM', 'MMR', 95, 'Myanmar', 'Asia', '', 'MMK', 'Kyat birmano'),
(108, 'BI', 'BDI', 257, 'Burundi', 'África', '', 'BIF', 'Franco burundés'),
(112, 'BY', 'BLR', 375, 'Bielorrusia', 'Europa', '', 'BYR', 'Rublo bielorruso'),
(116, 'KH', 'KHM', 855, 'Camboya', 'Asia', '', 'KHR', 'Riel camboyano'),
(120, 'CM', 'CMR', 237, 'Camerún', 'África', '', '', ''),
(124, 'CA', 'CAN', 1, 'Canadá', 'América', 'América del Norte', 'CAD', 'Dólar canadiense'),
(132, 'CV', 'CPV', 238, 'Cabo Verde', 'África', '', 'CVE', 'Escudo caboverdiano'),
(136, 'KY', 'CYM', 1345, 'Islas Caimán', 'América', 'El Caribe', 'KYD', 'Dólar caimano de Islas Caimán'),
(140, 'CF', 'CAF', 236, 'República Centroafricana', 'África', '', '', ''),
(144, 'LK', 'LKA', 94, 'Sri Lanka', 'Asia', '', 'LKR', 'Rupia de Sri Lanka'),
(148, 'TD', 'TCD', 235, 'Chad', 'África', '', '', ''),
(152, 'CL', 'CHL', 56, 'Chile', 'América', 'América del Sur', 'CLP', 'Peso chileno'),
(156, 'CN', 'CHN', 86, 'China', 'Asia', '', 'CNY', 'Yuan Renminbi de China'),
(158, 'TW', 'TWN', 886, 'Taiwán', 'Asia', '', 'TWD', 'Dólar taiwanés'),
(162, 'CX', 'CXR', 61, 'Isla de Navidad', 'Oceanía', '', '', ''),
(166, 'CC', 'CCK', 61, 'Islas Cocos', 'Óceanía', '', '', ''),
(170, 'CO', 'COL', 57, 'Colombia', 'América', 'América del Sur', 'COP', 'Peso colombiano'),
(174, 'KM', 'COM', 269, 'Comoras', 'África', '', 'KMF', 'Franco comoriano de Comoras'),
(175, 'YT', 'MYT', 262, 'Mayotte', 'África', '', '', ''),
(178, 'CG', 'COG', 242, 'Congo', 'África', '', '', ''),
(180, 'CD', 'COD', 243, 'República Democrática del Congo', 'África', '', 'CDF', 'Franco congoleño'),
(184, 'CK', 'COK', 682, 'Islas Cook', 'Oceanía', '', '', ''),
(188, 'CR', 'CRI', 506, 'Costa Rica', 'América', 'América Central', 'CRC', 'Colón costarricense'),
(191, 'HR', 'HRV', 385, 'Croacia', 'Europa', '', 'HRK', 'Kuna croata'),
(192, 'CU', 'CUB', 53, 'Cuba', 'América', 'El Caribe', 'CUP', 'Peso cubano'),
(196, 'CY', 'CYP', 357, 'Chipre', 'Europa', '', 'CYP', 'Libra chipriota'),
(203, 'CZ', 'CZE', 420, 'República Checa', 'Europa', '', 'CZK', 'Koruna checa'),
(204, 'BJ', 'BEN', 229, 'Benín', 'África', '', '', ''),
(208, 'DK', 'DNK', 45, 'Dinamarca', 'Europa', '', 'DKK', 'Corona danesa'),
(212, 'DM', 'DMA', 1767, 'Dominica', 'América', 'El Caribe', '', ''),
(214, 'DO', 'DOM', 1809, 'República Dominicana', 'América', 'El Caribe', 'DOP', 'Peso dominicano'),
(218, 'EC', 'ECU', 593, 'Ecuador', 'América', 'América del Sur', '', ''),
(222, 'SV', 'SLV', 503, 'El Salvador', 'América', 'América Central', 'SVC', 'Colón salvadoreño'),
(226, 'GQ', 'GNQ', 240, 'Guinea Ecuatorial', 'África', '', '', ''),
(231, 'ET', 'ETH', 251, 'Etiopía', 'África', '', 'ETB', 'Birr etíope'),
(232, 'ER', 'ERI', 291, 'Eritrea', 'África', '', 'ERN', 'Nakfa eritreo'),
(233, 'EE', 'EST', 372, 'Estonia', 'Europa', '', 'EEK', 'Corona estonia'),
(234, 'FO', 'FRO', 298, 'Islas Feroe', 'Europa', '', '', ''),
(238, 'FK', 'FLK', 500, 'Islas Malvinas', 'América', 'América del Sur', 'FKP', 'Libra malvinense'),
(239, 'GS', 'SGS', 0, 'Islas Georgias del Sur y Sandwich del Sur', 'América', 'América del Sur', '', ''),
(242, 'FJ', 'FJI', 679, 'Fiyi', 'Oceanía', '', 'FJD', 'Dólar fijiano'),
(246, 'FI', 'FIN', 358, 'Finlandia', 'Europa', '', 'EUR', 'Euro'),
(248, 'AX', 'ALA', 0, 'Islas Gland', 'Europa', '', '', ''),
(250, 'FR', 'FRA', 33, 'Francia', 'Europa', '', 'EUR', 'Euro'),
(254, 'GF', 'GUF', 0, 'Guayana Francesa', 'América', 'América del Sur', '', ''),
(258, 'PF', 'PYF', 689, 'Polinesia Francesa', 'Oceanía', '', '', ''),
(260, 'TF', 'ATF', 0, 'Territorios Australes Franceses', '', '', '', ''),
(262, 'DJ', 'DJI', 253, 'Yibuti', 'África', '', 'DJF', 'Franco yibutiano'),
(266, 'GA', 'GAB', 241, 'Gabón', 'África', '', '', ''),
(268, 'GE', 'GEO', 995, 'Georgia', 'Europa', '', 'GEL', 'Lari georgiano'),
(270, 'GM', 'GMB', 220, 'Gambia', 'África', '', 'GMD', 'Dalasi gambiano'),
(275, 'PS', 'PSE', 0, 'Palestina', 'Asia', '', '', ''),
(276, 'DE', 'DEU', 49, 'Alemania', 'Europa', '', 'EUR', 'Euro'),
(288, 'GH', 'GHA', 233, 'Ghana', 'África', '', 'GHC', 'Cedi ghanés'),
(292, 'GI', 'GIB', 350, 'Gibraltar', 'Europa', '', 'GIP', 'Libra de Gibraltar'),
(296, 'KI', 'KIR', 686, 'Kiribati', 'Oceanía', '', '', ''),
(300, 'GR', 'GRC', 30, 'Grecia', 'Europa', '', 'EUR', 'Euro'),
(304, 'GL', 'GRL', 299, 'Groenlandia', 'América', 'América del Norte', '', ''),
(308, 'GD', 'GRD', 1473, 'Granada', 'América', 'El Caribe', '', ''),
(312, 'GP', 'GLP', 0, 'Guadalupe', 'América', 'El Caribe', '', ''),
(316, 'GU', 'GUM', 1671, 'Guam', 'Oceanía', '', '', ''),
(320, 'GT', 'GTM', 502, 'Guatemala', 'América', 'América Central', 'GTQ', 'Quetzal guatemalteco'),
(324, 'GN', 'GIN', 224, 'Guinea', 'África', '', 'GNF', 'Franco guineano'),
(328, 'GY', 'GUY', 592, 'Guyana', 'América', 'América del Sur', 'GYD', 'Dólar guyanés'),
(332, 'HT', 'HTI', 509, 'Haití', 'América', 'El Caribe', 'HTG', 'Gourde haitiano'),
(334, 'HM', 'HMD', 0, 'Islas Heard y McDonald', 'Oceanía', '', '', ''),
(336, 'VA', 'VAT', 39, 'Ciudad del Vaticano', 'Europa', '', '', ''),
(340, 'HN', 'HND', 504, 'Honduras', 'América', 'América Central', 'HNL', 'Lempira hondureño'),
(344, 'HK', 'HKG', 852, 'Hong Kong', 'Asia', '', 'HKD', 'Dólar de Hong Kong'),
(348, 'HU', 'HUN', 36, 'Hungría', 'Europa', '', 'HUF', 'Forint húngaro'),
(352, 'IS', 'ISL', 354, 'Islandia', 'Europa', '', 'ISK', 'Króna islandesa'),
(356, 'IN', 'IND', 91, 'India', 'Asia', '', 'INR', 'Rupia india'),
(360, 'ID', 'IDN', 62, 'Indonesia', 'Asia', '', 'IDR', 'Rupiah indonesia'),
(364, 'IR', 'IRN', 98, 'Irán', 'Asia', '', 'IRR', 'Rial iraní'),
(368, 'IQ', 'IRQ', 964, 'Iraq', 'Asia', '', 'IQD', 'Dinar iraquí'),
(372, 'IE', 'IRL', 353, 'Irlanda', 'Europa', '', 'EUR', 'Euro'),
(376, 'IL', 'ISR', 972, 'Israel', 'Asia', '', 'ILS', 'Nuevo shéquel israelí'),
(380, 'IT', 'ITA', 39, 'Italia', 'Europa', '', 'EUR', 'Euro'),
(384, 'CI', 'CIV', 225, 'Costa de Marfil', 'África', '', '', ''),
(388, 'JM', 'JAM', 1876, 'Jamaica', 'América', 'El Caribe', 'JMD', 'Dólar jamaicano'),
(392, 'JP', 'JPN', 81, 'Japón', 'Asia', '', 'JPY', 'Yen japonés'),
(398, 'KZ', 'KAZ', 7, 'Kazajstán', 'Asia', '', 'KZT', 'Tenge kazajo'),
(400, 'JO', 'JOR', 962, 'Jordania', 'Asia', '', 'JOD', 'Dinar jordano'),
(404, 'KE', 'KEN', 254, 'Kenia', 'África', '', 'KES', 'Chelín keniata'),
(408, 'KP', 'PRK', 850, 'Corea del Norte', 'Asia', '', 'KPW', 'Won norcoreano'),
(410, 'KR', 'KOR', 82, 'Corea del Sur', 'Asia', '', 'KRW', 'Won surcoreano'),
(414, 'KW', 'KWT', 965, 'Kuwait', 'Asia', '', 'KWD', 'Dinar kuwaití'),
(417, 'KG', 'KGZ', 996, 'Kirguistán', 'Asia', '', 'KGS', 'Som kirguís de Kirguistán'),
(418, 'LA', 'LAO', 856, 'Laos', 'Asia', '', 'LAK', 'Kip lao'),
(422, 'LB', 'LBN', 961, 'Líbano', 'Asia', '', 'LBP', 'Libra libanesa'),
(426, 'LS', 'LSO', 266, 'Lesotho', 'África', '', 'LSL', 'Loti lesotense'),
(428, 'LV', 'LVA', 371, 'Letonia', 'Europa', '', 'LVL', 'Lat letón'),
(430, 'LR', 'LBR', 231, 'Liberia', 'África', '', 'LRD', 'Dólar liberiano'),
(434, 'LY', 'LBY', 218, 'Libia', 'África', '', 'LYD', 'Dinar libio'),
(438, 'LI', 'LIE', 423, 'Liechtenstein', 'Europa', '', '', ''),
(440, 'LT', 'LTU', 370, 'Lituania', 'Europa', '', 'LTL', 'Litas lituano'),
(442, 'LU', 'LUX', 352, 'Luxemburgo', 'Europa', '', 'EUR', 'Euro'),
(446, 'MO', 'MAC', 853, 'Macao', 'Asia', '', 'MOP', 'Pataca de Macao'),
(450, 'MG', 'MDG', 261, 'Madagascar', 'África', '', 'MGA', 'Ariary malgache'),
(454, 'MW', 'MWI', 265, 'Malaui', 'África', '', 'MWK', 'Kwacha malauiano'),
(458, 'MY', 'MYS', 60, 'Malasia', 'Asia', '', 'MYR', 'Ringgit malayo'),
(462, 'MV', 'MDV', 960, 'Maldivas', 'Asia', '', 'MVR', 'Rufiyaa maldiva'),
(466, 'ML', 'MLI', 223, 'Malí', 'África', '', '', ''),
(470, 'MT', 'MLT', 356, 'Malta', 'Europa', '', 'MTL', 'Lira maltesa'),
(474, 'MQ', 'MTQ', 0, 'Martinica', 'América', 'El Caribe', '', ''),
(478, 'MR', 'MRT', 222, 'Mauritania', 'África', '', 'MRO', 'Ouguiya mauritana'),
(480, 'MU', 'MUS', 230, 'Mauricio', 'África', '', 'MUR', 'Rupia mauricia'),
(484, 'MX', 'MEX', 52, 'México', 'América', 'América del Norte', 'MXN', 'Peso mexicano'),
(492, 'MC', 'MCO', 377, 'Mónaco', 'Europa', '', '', ''),
(496, 'MN', 'MNG', 976, 'Mongolia', 'Asia', '', 'MNT', 'Tughrik mongol'),
(498, 'MD', 'MDA', 373, 'Moldavia', 'Europa', '', 'MDL', 'Leu moldavo'),
(499, 'ME', 'MNE', 382, 'Montenegro', 'Europa', '', '', ''),
(500, 'MS', 'MSR', 1664, 'Montserrat', 'América', 'El Caribe', '', ''),
(504, 'MA', 'MAR', 212, 'Marruecos', 'África', '', 'MAD', 'Dirham marroquí'),
(508, 'MZ', 'MOZ', 258, 'Mozambique', 'África', '', 'MZM', 'Metical mozambiqueño'),
(512, 'OM', 'OMN', 968, 'Omán', 'Asia', '', 'OMR', 'Rial omaní'),
(516, 'NA', 'NAM', 264, 'Namibia', 'África', '', 'NAD', 'Dólar namibio'),
(520, 'NR', 'NRU', 674, 'Nauru', 'Oceanía', '', '', ''),
(524, 'NP', 'NPL', 977, 'Nepal', 'Asia', '', 'NPR', 'Rupia nepalesa'),
(528, 'NL', 'NLD', 31, 'Países Bajos', 'Europa', '', 'EUR', 'Euro'),
(530, 'AN', 'ANT', 599, 'Antillas Holandesas', 'América', 'El Caribe', 'ANG', 'Florín antillano neerlandés'),
(533, 'AW', 'ABW', 297, 'Aruba', 'América', 'El Caribe', 'AWG', 'Florín arubeño'),
(540, 'NC', 'NCL', 687, 'Nueva Caledonia', 'Oceanía', '', '', ''),
(548, 'VU', 'VUT', 678, 'Vanuatu', 'Oceanía', '', 'VUV', 'Vatu vanuatense'),
(554, 'NZ', 'NZL', 64, 'Nueva Zelanda', 'Oceanía', '', 'NZD', 'Dólar neozelandés'),
(558, 'NI', 'NIC', 505, 'Nicaragua', 'América', 'América Central', 'NIO', 'Córdoba nicaragüense'),
(562, 'NE', 'NER', 227, 'Níger', 'África', '', '', ''),
(566, 'NG', 'NGA', 234, 'Nigeria', 'África', '', 'NGN', 'Naira nigeriana'),
(570, 'NU', 'NIU', 683, 'Niue', 'Oceanía', '', '', ''),
(574, 'NF', 'NFK', 0, 'Isla Norfolk', 'Oceanía', '', '', ''),
(578, 'NO', 'NOR', 47, 'Noruega', 'Europa', '', 'NOK', 'Corona noruega'),
(580, 'MP', 'MNP', 1670, 'Islas Marianas del Norte', 'Oceanía', '', '', ''),
(581, 'UM', 'UMI', 0, 'Islas Ultramarinas de Estados Unidos', '', '', '', ''),
(583, 'FM', 'FSM', 691, 'Micronesia', 'Oceanía', '', '', ''),
(584, 'MH', 'MHL', 692, 'Islas Marshall', 'Oceanía', '', '', ''),
(585, 'PW', 'PLW', 680, 'Palaos', 'Oceanía', '', '', ''),
(586, 'PK', 'PAK', 92, 'Pakistán', 'Asia', '', 'PKR', 'Rupia pakistaní'),
(591, 'PA', 'PAN', 507, 'Panamá', 'América', 'América Central', 'PAB', 'Balboa panameña'),
(598, 'PG', 'PNG', 675, 'Papúa Nueva Guinea', 'Oceanía', '', 'PGK', 'Kina de Papúa Nueva Guinea'),
(600, 'PY', 'PRY', 595, 'Paraguay', 'América', 'América del Sur', 'PYG', 'Guaraní paraguayo'),
(604, 'PE', 'PER', 51, 'Perú', 'América', 'América del Sur', 'PEN', 'Nuevo sol peruano'),
(608, 'PH', 'PHL', 63, 'Filipinas', 'Asia', '', 'PHP', 'Peso filipino'),
(612, 'PN', 'PCN', 870, 'Islas Pitcairn', 'Oceanía', '', '', ''),
(616, 'PL', 'POL', 48, 'Polonia', 'Europa', '', 'PLN', 'zloty polaco'),
(620, 'PT', 'PRT', 351, 'Portugal', 'Europa', '', 'EUR', 'Euro'),
(624, 'GW', 'GNB', 245, 'Guinea-Bissau', 'África', '', '', ''),
(626, 'TL', 'TLS', 670, 'Timor Oriental', 'Asia', '', '', ''),
(630, 'PR', 'PRI', 1, 'Puerto Rico', 'América', 'El Caribe', '', ''),
(634, 'QA', 'QAT', 974, 'Qatar', 'Asia', '', 'QAR', 'Rial qatarí'),
(638, 'RE', 'REU', 262, 'Reunión', 'África', '', '', ''),
(642, 'RO', 'ROU', 40, 'Rumania', 'Europa', '', 'RON', 'Leu rumano'),
(643, 'RU', 'RUS', 7, 'Rusia', 'Asia', '', 'RUB', 'Rublo ruso'),
(646, 'RW', 'RWA', 250, 'Ruanda', 'África', '', 'RWF', 'Franco ruandés'),
(654, 'SH', 'SHN', 290, 'Santa Helena', 'África', '', 'SHP', 'Libra de Santa Helena'),
(659, 'KN', 'KNA', 1869, 'San Cristóbal y Nieves', 'América', 'El Caribe', '', ''),
(660, 'AI', 'AIA', 1264, 'Anguila', 'América', 'El Caribe', '', ''),
(662, 'LC', 'LCA', 1758, 'Santa Lucía', 'América', 'El Caribe', '', ''),
(666, 'PM', 'SPM', 508, 'San Pedro y Miquelón', 'América', 'América del Norte', '', ''),
(670, 'VC', 'VCT', 1784, 'San Vicente y las Granadinas', 'América', 'El Caribe', '', ''),
(674, 'SM', 'SMR', 378, 'San Marino', 'Europa', '', '', ''),
(678, 'ST', 'STP', 239, 'Santo Tomé y Príncipe', 'África', '', 'STD', 'Dobra de Santo Tomé y Príncipe'),
(682, 'SA', 'SAU', 966, 'Arabia Saudí', 'Asia', '', 'SAR', 'Riyal saudí'),
(686, 'SN', 'SEN', 221, 'Senegal', 'África', '', '', ''),
(688, 'RS', 'SRB', 381, 'Serbia', 'Europa', '', '', ''),
(690, 'SC', 'SYC', 248, 'Seychelles', 'África', '', 'SCR', 'Rupia de Seychelles'),
(694, 'SL', 'SLE', 232, 'Sierra Leona', 'África', '', 'SLL', 'Leone de Sierra Leona'),
(702, 'SG', 'SGP', 65, 'Singapur', 'Asia', '', 'SGD', 'Dólar de Singapur'),
(703, 'SK', 'SVK', 421, 'Eslovaquia', 'Europa', '', 'SKK', 'Corona eslovaca'),
(704, 'VN', 'VNM', 84, 'Vietnam', 'Asia', '', 'VND', 'Dong vietnamita'),
(705, 'SI', 'SVN', 386, 'Eslovenia', 'Europa', '', '', ''),
(706, 'SO', 'SOM', 252, 'Somalia', 'África', '', 'SOS', 'Chelín somalí'),
(710, 'ZA', 'ZAF', 27, 'Sudáfrica', 'África', '', 'ZAR', 'Rand sudafricano'),
(716, 'ZW', 'ZWE', 263, 'Zimbabue', 'África', '', 'ZWL', 'Dólar zimbabuense'),
(724, 'ES', 'ESP', 34, 'España', 'Europa', '', 'EUR', 'Euro'),
(732, 'EH', 'ESH', 0, 'Sahara Occidental', 'África', '', '', ''),
(736, 'SD', 'SDN', 249, 'Sudán', 'África', '', 'SDD', 'Dinar sudanés'),
(740, 'SR', 'SUR', 597, 'Surinam', 'América', 'América del Sur', 'SRD', 'Dólar surinamés'),
(744, 'SJ', 'SJM', 0, 'Svalbard y Jan Mayen', 'Europa', '', '', ''),
(748, 'SZ', 'SWZ', 268, 'Suazilandia', 'África', '', 'SZL', 'Lilangeni suazi'),
(752, 'SE', 'SWE', 46, 'Suecia', 'Europa', '', 'SEK', 'Corona sueca'),
(756, 'CH', 'CHE', 41, 'Suiza', 'Europa', '', 'CHF', 'Franco suizo'),
(760, 'SY', 'SYR', 963, 'Siria', 'Asia', '', 'SYP', 'Libra siria'),
(762, 'TJ', 'TJK', 992, 'Tayikistán', 'Asia', '', 'TJS', 'Somoni tayik de Tayikistán'),
(764, 'TH', 'THA', 66, 'Tailandia', 'Asia', '', 'THB', 'Baht tailandés'),
(768, 'TG', 'TGO', 228, 'Togo', 'África', '', '', ''),
(772, 'TK', 'TKL', 690, 'Tokelau', 'Oceanía', '', '', ''),
(776, 'TO', 'TON', 676, 'Tonga', 'Oceanía', '', 'TOP', 'Pa''anga tongano'),
(780, 'TT', 'TTO', 1868, 'Trinidad y Tobago', 'América', 'El Caribe', 'TTD', 'Dólar de Trinidad y Tobago'),
(784, 'AE', 'ARE', 971, 'Emiratos Árabes Unidos', 'Asia', '', 'AED', 'Dirham de los Emiratos Árabes Unidos'),
(788, 'TN', 'TUN', 216, 'Túnez', 'África', '', 'TND', 'Dinar tunecino'),
(792, 'TR', 'TUR', 90, 'Turquía', 'Asia', '', 'TRY', 'Lira turca'),
(795, 'TM', 'TKM', 993, 'Turkmenistán', 'Asia', '', 'TMM', 'Manat turcomano'),
(796, 'TC', 'TCA', 1649, 'Islas Turcas y Caicos', 'América', 'El Caribe', '', ''),
(798, 'TV', 'TUV', 688, 'Tuvalu', 'Oceanía', '', '', ''),
(800, 'UG', 'UGA', 256, 'Uganda', 'África', '', 'UGX', 'Chelín ugandés'),
(804, 'UA', 'UKR', 380, 'Ucrania', 'Europa', '', 'UAH', 'Grivna ucraniana'),
(807, 'MK', 'MKD', 389, 'Macedonia', 'Europa', '', 'MKD', 'Denar macedonio'),
(818, 'EG', 'EGY', 20, 'Egipto', 'África', '', 'EGP', 'Libra egipcia'),
(826, 'GB', 'GBR', 44, 'Reino Unido', 'Europa', '', 'GBP', 'Libra esterlina libra de Gran Bretaña'),
(834, 'TZ', 'TZA', 255, 'Tanzania', 'África', '', 'TZS', 'Chelín tanzano'),
(840, 'US', 'USA', 1, 'Estados Unidos', 'América', 'América del Norte', 'USD', 'Dólar estadounidense'),
(850, 'VI', 'VIR', 1340, 'Islas Vírgenes de los Estados Unidos', 'América', 'El Caribe', '', ''),
(854, 'BF', 'BFA', 226, 'Burkina Faso', 'África', '', '', ''),
(858, 'UY', 'URY', 598, 'Uruguay', 'América', 'América del Sur', 'UYU', 'Peso uruguayo'),
(860, 'UZ', 'UZB', 998, 'Uzbekistán', 'Asia', '', 'UZS', 'Som uzbeko'),
(862, 'VE', 'VEN', 58, 'Venezuela', 'América', 'América del Sur', 'VEB', 'Bolívar venezolano'),
(876, 'WF', 'WLF', 681, 'Wallis y Futuna', 'Oceanía', '', '', ''),
(882, 'WS', 'WSM', 685, 'Samoa', 'Oceanía', '', 'WST', 'Tala samoana'),
(887, 'YE', 'YEM', 967, 'Yemen', 'Asia', '', 'YER', 'Rial yemení de Yemen'),
(894, 'ZM', 'ZMB', 260, 'Zambia', 'África', '', 'ZMK', 'Kwacha zambiano')
)
|
jredrejo/bancal
|
web2py/applications/bancal/modules/paises.py
|
Python
|
gpl-3.0
| 18,468 | 0.016688 |
"""Resolwe collection model."""
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.indexes import GinIndex
from django.contrib.postgres.search import SearchVectorField
from django.db import models, transaction
from resolwe.permissions.models import PermissionObject, PermissionQuerySet
from .base import BaseModel, BaseQuerySet
from .utils import DirtyError, bulk_duplicate, validate_schema
class BaseCollection(BaseModel):
"""Template for Postgres model for storing a collection."""
class Meta(BaseModel.Meta):
"""BaseCollection Meta options."""
abstract = True
#: detailed description
description = models.TextField(blank=True)
settings = models.JSONField(default=dict)
#: collection descriptor schema
descriptor_schema = models.ForeignKey(
"flow.DescriptorSchema", blank=True, null=True, on_delete=models.PROTECT
)
#: collection descriptor
descriptor = models.JSONField(default=dict)
#: indicate whether `descriptor` doesn't match `descriptor_schema` (is dirty)
descriptor_dirty = models.BooleanField(default=False)
#: tags for categorizing objects
tags = ArrayField(models.CharField(max_length=255), default=list)
#: field used for full-text search
search = SearchVectorField(null=True)
def save(self, *args, **kwargs):
"""Perform descriptor validation and save object."""
if self.descriptor_schema:
try:
validate_schema(self.descriptor, self.descriptor_schema.schema)
self.descriptor_dirty = False
except DirtyError:
self.descriptor_dirty = True
elif self.descriptor and self.descriptor != {}:
raise ValueError(
"`descriptor_schema` must be defined if `descriptor` is given"
)
super().save()
class CollectionQuerySet(BaseQuerySet, PermissionQuerySet):
"""Query set for ``Collection`` objects."""
@transaction.atomic
def duplicate(self, contributor):
"""Duplicate (make a copy) ``Collection`` objects."""
return bulk_duplicate(collections=self, contributor=contributor)
class Collection(BaseCollection, PermissionObject):
"""Postgres model for storing a collection."""
class Meta(BaseCollection.Meta):
"""Collection Meta options."""
permissions = (
("view", "Can view collection"),
("edit", "Can edit collection"),
("share", "Can share collection"),
("owner", "Is owner of the collection"),
)
indexes = [
models.Index(name="idx_collection_name", fields=["name"]),
GinIndex(
name="idx_collection_name_trgm",
fields=["name"],
opclasses=["gin_trgm_ops"],
),
models.Index(name="idx_collection_slug", fields=["slug"]),
GinIndex(name="idx_collection_tags", fields=["tags"]),
GinIndex(name="idx_collection_search", fields=["search"]),
]
#: manager
objects = CollectionQuerySet.as_manager()
#: duplication date and time
duplicated = models.DateTimeField(blank=True, null=True)
def is_duplicate(self):
"""Return True if collection is a duplicate."""
return bool(self.duplicated)
def duplicate(self, contributor):
"""Duplicate (make a copy)."""
return bulk_duplicate(
collections=self._meta.model.objects.filter(pk=self.pk),
contributor=contributor,
)[0]
|
genialis/resolwe
|
resolwe/flow/models/collection.py
|
Python
|
apache-2.0
| 3,575 | 0.000559 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .lecroyWRXIA import *
class lecroyWR204MXIA(lecroyWRXIA):
"Lecroy WaveRunner 204MXi-A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'WaveRunner 204MXi-A')
super(lecroy104MXiA, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._init_channels()
|
Diti24/python-ivi
|
ivi/lecroy/lecroyWR204MXIA.py
|
Python
|
mit
| 1,653 | 0.001815 |
#!/usr/bin/env python3
#This program calculates the entropy for a file, files in a folder or disk image
#The user sets a reporting threshold
#########################COPYRIGHT INFORMATION############################
#Copyright (C) 2011 dougkoster@hotmail.com #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
#
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
#
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see http://www.gnu.org/licenses/. #
#########################COPYRIGHT INFORMATION############################
#import modules
from easygui import *
from get_case_number import *
from get_output_location import *
from select_file_to_process import *
from select_folder_to_process import *
from parted import *
from mmls import *
from mount import *
from mount_ewf import *
from get_ntuser_paths import *
from get_usrclass_paths import *
from get_system_paths import *
from done import *
from unix2dos import *
from check_for_folder import *
from mount_encase_v6_l01 import *
from calculate_md5 import *
import os
import codecs
from os.path import join
import re
import io
import sys
import string
import subprocess
import pickle
import datetime
import base64
### process_folder #######################################################################################
def process_folder(folder_to_process, export_file, outfile):
for root,dirs,files in os.walk(folder_to_process):
for file_name in files:
abs_file_path = os.path.join(root,file_name)
quoted_abs_file_path = '"'+abs_file_path+'"'
file_name_print = file_name.encode('utf-8')
abs_file_path_print = abs_file_path.encode('utf-8')
#clean up printable variables
file_name_print = re.sub('b\'','',str(file_name_print))
file_name_print = re.sub("'",'',file_name_print)
abs_file_path_print = re.sub('b\'','',str(abs_file_path_print))
abs_file_path_print = re.sub("'",'', abs_file_path_print)
#don't process link files
if not (os.path.islink(abs_file_path)):
#get file size
try:
file_size = os.path.getsize(abs_file_path)
except:
print("Could not get filesize for file: " + abs_file_path)
outfile.write("Could not get filesize for file: " + abs_file_path + "\n")
if(file_size):
try:
ent = calc_entropy(abs_file_path)
print("Filename: " + file_name + "\t" + "Entropy: " + ent)
export_file.write(ent + "," + str(file_name_print) + "," + str(file_size) + "," + str(abs_file_path_print) + "\n")
except:
print("Could not get entropy for file: " + abs_file_path)
outfile.write("Could not get entropy for file: " + str(abs_file_path_print) + "\n")
else:
print("File: " + file_name + " has 0 file size....skipping")
outfile.write("File: " + file_name + "has 0 file size....skipping\n")
else:
print("File: " + file_name + " is link file....skipping")
outfile.write("File: " + file_name + "is link file....skipping\n")
##########################################################################################################
### calc_entropy #########################################################################################
def calc_entropy(file_to_process):
if(re.search("'", file_to_process)):
entropy = subprocess.check_output(['ent ' + '"' + file_to_process + '"' + " | grep Entropy | awk '{print $3}'"], shell=True)
else:
entropy = subprocess.check_output(['ent ' + "'" + file_to_process + "'" + " | grep Entropy | awk '{print $3}'"], shell=True)
entropy = entropy.strip()
entropy_string = entropy.decode(encoding='UTF-8')
return entropy_string
##########################################################################################################
def entropy_mr(item_to_process, case_number, root_folder_path, evidence):
print("The item to process is: " + item_to_process)
print("The case_name is: " + case_number)
print("The output folder is: " + root_folder_path)
print("The evidence to process is: " + evidence)
evidence_no_quotes = evidence
evidence = '"' + evidence + '"'
#get datetime
now = datetime.datetime.now()
#set Mount Point
mount_point = "/mnt/" + "MantaRay_" + now.strftime("%Y-%m-%d_%H_%M_%S_%f")
#create output folder path
folder_path = root_folder_path + "/" + "Entropy"
check_for_folder(folder_path, "NONE")
#open a log file for output
log_file = folder_path + "/Entropy_logfile.txt"
outfile = open(log_file, 'wt+')
#open file to write output
exp_file = folder_path + "/" + case_number +"_entropy.csv"
export_file = open(exp_file, 'a+', encoding='latin-1', errors="ignore")
#export_file = open(exp_file, 'a')
if(item_to_process == "Single File"):
ent = calc_entropy(evidence)
print(ent)
elif(item_to_process == "Directory"):
folder_to_process = evidence_no_quotes
process_folder(folder_to_process, export_file, outfile)
elif(item_to_process =="EnCase Logical Evidence File"):
file_to_process = evidence
mount_point = mount_encase_v6_l01(case_number, file_to_process, outfile)
process_folder(mount_point, export_file, outfile)
#umount
if(os.path.exists(mount_point)):
subprocess.call(['sudo umount -f ' + mount_point], shell=True)
os.rmdir(mount_point)
elif(item_to_process == "Bit-Stream Image"):
Image_Path = evidence
#process every file on every partition
#get datetime
now = datetime.datetime.now()
#set Mount Point
mount_point = "/mnt/" + now.strftime("%Y-%m-%d_%H_%M_%S_%f")
#check if Image file is in Encase format
if re.search(".E01", Image_Path):
#strip out single quotes from the quoted path
#no_quotes_path = Image_Path.replace("'","")
#print("THe no quotes path is: " + no_quotes_path)
#call mount_ewf function
Image_Path = mount_ewf(Image_Path, outfile,mount_point)
#call mmls function
partition_info_dict, temp_time = mmls(outfile, Image_Path)
partition_info_dict_temp = partition_info_dict
#get filesize of mmls_output.txt
file_size = os.path.getsize("/tmp/mmls_output_" + temp_time + ".txt")
#if filesize of mmls output is 0 then run parted
if(file_size == 0):
print("mmls output was empty, running parted")
outfile.write("mmls output was empty, running parted")
#call parted function
partition_info_dict, temp_time = parted(outfile, Image_Path)
else:
#read through the mmls output and look for GUID Partition Tables (used on MACS)
mmls_output_file = open("/tmp/mmls_output_" + temp_time + ".txt", 'r')
for line in mmls_output_file:
if re.search("GUID Partition Table", line):
print("We found a GUID partition table, need to use parted")
outfile.write("We found a GUID partition table, need to use parted\n")
#call parted function
partition_info_dict, temp_time = parted(outfile, Image_Path)
mmls_output_file.close()
#loop through the dictionary containing the partition info (filesystem is VALUE, offset is KEY)
for key,value in sorted(partition_info_dict.items()):
#disable auto-mount in nautilis - this stops a nautilis window from popping up everytime the mount command is executed
cmd_false = "sudo gsettings set org.gnome.desktop.media-handling automount false && sudo gsettings set org.gnome.desktop.media-handling automount-open false"
try:
subprocess.call([cmd_false], shell=True)
except:
print("Autmount false failed")
#call mount sub-routine
success_code, loopback_device_mount = mount(value,str(key),Image_Path, outfile, mount_point)
if(success_code):
print("Could not mount partition with filesystem: " + value + " at offset:" + str(key))
outfile.write("Could not mount partition with filesystem: " + value + " at offset:" + str(key))
else:
print("We just mounted filesystem: " + value + " at offset:" + str(key) + "\n")
outfile.write("We just mounted filesystem: " + value + " at offset:" + str(key) + "\n")
#call entropy function for each mount_point
process_folder(mount_point, export_file, outfile)
print("We just finished calculating the entropy for every file...sorting output")
#unmount and remove mount points
if(os.path.exists(mount_point)):
subprocess.call(['sudo umount -f ' + mount_point], shell=True)
os.rmdir(mount_point)
#unmount loopback device if this image was HFS+ - need to run losetup -d <loop_device> before unmounting
if not (loopback_device_mount == "NONE"):
losetup_d_command = "losetup -d " + loopback_device_mount
subprocess.call([losetup_d_command], shell=True)
#delete /tmp files created for each partition
if (os.path.exists("/tmp/mmls_output_" + temp_time + ".txt")):
os.remove("/tmp/mmls_output_" + temp_time + ".txt")
#close output file
export_file.close()
#sort output file
sort_command = "strings -a " + "'" + exp_file + "'" + " |sort -t\| -r -k 2n > " + "'" + folder_path + "'" + "/" + case_number +"_entropy_sorted.csv"
subprocess.call([sort_command], shell=True)
#write header row to export_file
#sed_command = "sed -i '1i\ Entropy,File Name,File Size,MD5,File Path' " + "'" + folder_path + "'" + "/" + case_number +"_entropy_sorted.csv"
sed_command = "sed -i '1i\ Entropy,File Name,File Size,FilePath' " + "'" + folder_path + "'" + "/" + case_number +"_entropy_sorted.csv"
subprocess.call([sed_command], shell=True)
#remove original output file
os.remove(exp_file)
#remove mount points created for this program
if(os.path.exists(mount_point)):
os.rmdir(mount_point)
if(os.path.exists(mount_point+"_ewf")):
subprocess.call(['sudo umount -f ' + mount_point + "_ewf"], shell=True)
os.rmdir(mount_point+"_ewf")
|
mantarayforensics/mantaray
|
Tools/Python/entropy_mr.py
|
Python
|
gpl-3.0
| 10,303 | 0.028827 |
#!/usr/bin/python3
import os
import sys
from merge_utils import *
xml_out = etree.Element("packages")
funtoo_staging_w = GitTree("funtoo-staging", "master", "repos@localhost:ports/funtoo-staging.git", root="/var/git/dest-trees/funtoo-staging", pull=False, xml_out=xml_out)
#funtoo_staging_w = GitTree("funtoo-staging-unfork", "master", "repos@localhost:ports/funtoo-staging-unfork.git", root="/var/git/dest-trees/funtoo-staging-unfork", pull=False, xml_out=None)
xmlfile="/home/ports/public_html/packages.xml"
nopush=False
funtoo_overlay = GitTree("funtoo-overlay", "master", "repos@localhost:funtoo-overlay.git", pull=True)
# We treat our Gentoo staging overlay specially, so it's listed separately. This overlay contains all Gentoo
# ebuilds, in a git repository. We use a special file in the funtoo-overlay/funtoo/scripts directory (next to
# this file) to provide a SHA1 of the commit of the gentoo-staging overlay that we want to use as a basis
# for our merges. Let's grab the SHA1 hash from that file:
p = os.path.join(funtoo_overlay.root,"funtoo/scripts/commit-staged")
if os.path.exists(p):
a = open(p,"r")
commit = a.readlines()[0].strip()
print("Using commit: %s" % commit)
else:
commit = None
gentoo_staging_r = GitTree("gentoo-staging", "master", "repos@localhost:ports/gentoo-staging.git", commit=commit, pull=True)
# These overlays are monitored for changes -- if there are changes in these overlays, we regenerate the entire
# tree. If there aren't changes in these overlays, we don't.
shards = {
"perl" : GitTree("gentoo-perl-shard", "1fc10379b04cb4aaa29e824288f3ec22badc6b33", "repos@localhost:gentoo-perl-shard.git", pull=True),
"kde" : GitTree("gentoo-kde-shard", "cd4e1129ddddaa21df367ecd4f68aab894e57b31", "repos@localhost:gentoo-kde-shard.git", pull=True),
"gnome" : GitTree("gentoo-gnome-shard", "ffabb752f8f4e23a865ffe9caf72f950695e2f26", "repos@localhost:ports/gentoo-gnome-shard.git", pull=True),
"x11" : GitTree("gentoo-x11-shard", "12c1bdf9a9bfd28f48d66bccb107c17b5f5af577", "repos@localhost:ports/gentoo-x11-shard.git", pull=True),
"office" : GitTree("gentoo-office-shard", "9a702057d23e7fa277e9626344671a82ce59442f", "repos@localhost:ports/gentoo-office-shard.git", pull=True),
"core" : GitTree("gentoo-core-shard", "56e5b9edff7dc27e828b71010d019dcbd8e176fd", "repos@localhost:gentoo-core-shard.git", pull=True)
}
# perl: 1fc10379b04cb4aaa29e824288f3ec22badc6b33 (Updated 6 Dec 2016)
# kde: cd4e1129ddddaa21df367ecd4f68aab894e57b31 (Updated 25 Dec 2016)
# gnome: ffabb752f8f4e23a865ffe9caf72f950695e2f26 (Updated 20 Sep 2016)
# x11: 12c1bdf9a9bfd28f48d66bccb107c17b5f5af577 (Updated 24 Dec 2016)
# office: 9a702057d23e7fa277e9626344671a82ce59442f (Updated 29 Nov 2016)
# core: 56e5b9edff7dc27e828b71010d019dcbd8e176fd (Updated 17 Dec 2016)
# funtoo-toolchain: b97787318b7ffcfeaacde82cd21ddd5e207ad1f4 (Updated 25 Dec 2016)
funtoo_overlays = {
"funtoo_media" : GitTree("funtoo-media", "master", "repos@localhost:funtoo-media.git", pull=True),
"plex_overlay" : GitTree("funtoo-plex", "master", "https://github.com/Ghent/funtoo-plex.git", pull=True),
#"gnome_fixups" : GitTree("gnome-3.16-fixups", "master", "repos@localhost:ports/gnome-3.16-fixups.git", pull=True),
"gnome_fixups" : GitTree("gnome-3.20-fixups", "master", "repos@localhost:ports/gnome-3.20-fixups.git", pull=True),
"funtoo_toolchain" : GitTree("funtoo-toolchain", "b97787318b7ffcfeaacde82cd21ddd5e207ad1f4", "repos@localhost:funtoo-toolchain-overlay.git", pull=True),
"ldap_overlay" : GitTree("funtoo-ldap", "master", "repos@localhost:funtoo-ldap-overlay.git", pull=True),
"deadbeef_overlay" : GitTree("deadbeef-overlay", "master", "https://github.com/damex/deadbeef-overlay.git", pull=True),
"gambas_overlay" : GitTree("gambas-overlay", "master", "https://github.com/damex/gambas-overlay.git", pull=True),
"wmfs_overlay" : GitTree("wmfs-overlay", "master", "https://github.com/damex/wmfs-overlay.git", pull=True),
"flora" : GitTree("flora", "master", "repos@localhost:flora.git", pull=True),
}
# These are other overlays that we merge into the Funtoo tree. However, we just pull in the most recent versions
# of these when we regenerate our tree.
other_overlays = {
"foo_overlay" : GitTree("foo-overlay", "master", "https://github.com/slashbeast/foo-overlay.git", pull=True),
"bar_overlay" : GitTree("bar-overlay", "master", "git://github.com/adessemond/bar-overlay.git", pull=True),
"squeezebox_overlay" : GitTree("squeezebox", "master", "git://anongit.gentoo.org/user/squeezebox.git", pull=True),
"pantheon_overlay" : GitTree("pantheon", "master", "https://github.com/pimvullers/elementary.git", pull=True),
"pinsard_overlay" : GitTree("pinsard", "master", "https://github.com/apinsard/sapher-overlay.git", pull=True),
"sabayon_for_gentoo" : GitTree("sabayon-for-gentoo", "master", "git://github.com/Sabayon/for-gentoo.git", pull=True),
"tripsix_overlay" : GitTree("tripsix", "master", "https://github.com/666threesixes666/tripsix.git", pull=True),
"faustoo_overlay" : GitTree("faustoo", "master", "https://github.com/fmoro/faustoo.git", pull=True),
"wltjr_overlay" : GitTree("wltjr", "master", "https://github.com/Obsidian-StudiosInc/os-xtoo", pull=True),
"vmware_overlay" : GitTree("vmware", "master", "git://anongit.gentoo.org/proj/vmware.git", pull=True)
}
funtoo_changes = False
if funtoo_overlay.changes:
funtoo_changes = True
elif gentoo_staging_r.changes:
funtoo_changes = True
else:
for fo in funtoo_overlays:
if funtoo_overlays[fo].changes:
funtoo_changes = True
break
# This next code regenerates the contents of the funtoo-staging tree. Funtoo's tree is itself composed of
# many different overlays which are merged in an automated fashion. This code does it all.
pull = True
if nopush:
push = False
else:
push = "master"
# base_steps define the initial steps that prepare our destination tree for writing. Checking out the correct
# branch, copying almost the full entirety of Gentoo's portage tree to our destination tree, and copying over
# funtoo overlay licenses, metadata, and also copying over GLSA's.
base_steps = [
GitCheckout("master"),
SyncFromTree(gentoo_staging_r, exclude=[
"/metadata/cache/**",
"ChangeLog",
"dev-util/metro",
"skel.ChangeLog",
]),
]
# Steps related to generating system profiles. These can be quite order-dependent and should be handled carefully.
# Generally, the funtoo_overlay sync should be first, then the gentoo_staging_r SyncFiles, which overwrites some stub
# files in the funtoo overlay.
profile_steps = [
SyncDir(funtoo_overlay.root, "profiles", "profiles", exclude=["categories", "updates"]),
CopyAndRename("profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/subarch", "profiles/funtoo/1.0/linux-gnu/arch/pure64/subarch", lambda x: os.path.basename(x) + "-pure64"),
SyncFiles(gentoo_staging_r.root, {
"profiles/package.mask":"profiles/package.mask/00-gentoo",
"profiles/arch/amd64/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/package.use.mask/01-gentoo",
"profiles/features/multilib/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/package.use.mask/02-gentoo",
"profiles/arch/amd64/use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/use.mask/01-gentoo",
"profiles/arch/x86/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-32bit/package.use.mask/01-gentoo",
"profiles/arch/x86/use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-32bit/use.mask/01-gentoo",
"profiles/default/linux/package.use.mask":"profiles/funtoo/1.0/linux-gnu/package.use.mask/01-gentoo",
"profiles/default/linux/use.mask":"profiles/funtoo/1.0/linux-gnu/use.mask/01-gentoo",
"profiles/arch/amd64/no-multilib/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/pure64/package.use.mask/01-gentoo",
"profiles/arch/amd64/no-multilib/package.mask":"profiles/funtoo/1.0/linux-gnu/arch/pure64/package.mask/01-gentoo",
"profiles/arch/amd64/no-multilib/use.mask":"profiles/funtoo/1.0/linux-gnu/arch/pure64/use.mask/01-gentoo"
}),
SyncFiles(funtoo_overlays["deadbeef_overlay"].root, {
"profiles/package.mask":"profiles/package.mask/deadbeef-mask"
}),
SyncFiles(funtoo_overlays["wmfs_overlay"].root, {
"profiles/package.mask":"profiles/package.mask/wmfs-mask"
}) ]
profile_steps += [
SyncFiles(funtoo_overlays["funtoo_toolchain"].root, {
"profiles/package.mask/funtoo-toolchain":"profiles/funtoo/1.0/linux-gnu/build/current/package.mask/funtoo-toolchain",
}),
SyncFiles(funtoo_overlays["funtoo_toolchain"].root, {
"profiles/package.mask/funtoo-toolchain":"profiles/funtoo/1.0/linux-gnu/build/stable/package.mask/funtoo-toolchain",
"profiles/package.mask/funtoo-toolchain-experimental":"profiles/funtoo/1.0/linux-gnu/build/experimental/package.mask/funtoo-toolchain",
}),
RunSed(["profiles/base/make.defaults"], ["/^PYTHON_TARGETS=/d", "/^PYTHON_SINGLE_TARGET=/d"]),
]
# Steps related to copying ebuilds. Note that order can make a difference here when multiple overlays are
# providing identical catpkgs.
# Ebuild additions -- these are less-risky changes because ebuilds are only added, and not replaced.
ebuild_additions = [
InsertEbuilds(other_overlays["bar_overlay"], select="all", skip=["app-emulation/qemu"], replace=False),
InsertEbuilds(other_overlays["squeezebox_overlay"], select="all", skip=None, replace=False),
InsertEbuilds(funtoo_overlays["deadbeef_overlay"], select="all", skip=None, replace=False),
InsertEbuilds(funtoo_overlays["gambas_overlay"], select="all", skip=None, replace=False),
InsertEbuilds(funtoo_overlays["wmfs_overlay"], select="all", skip=None, replace=False),
InsertEbuilds(funtoo_overlays["flora"], select="all", skip=None, replace=True, merge=True),
]
# Ebuild modifications -- these changes need to be treated more carefully as ordering can be important
# for wholesale replacing as well as merging.
ebuild_modifications = [
InsertEbuilds(other_overlays["vmware_overlay"], select=[ "app-emulation/vmware-modules" ], skip=None, replace=True, merge=True),
InsertEbuilds(other_overlays["pantheon_overlay"], select=[ "x11-libs/granite", "x11-libs/bamf", "x11-themes/plank-theme-pantheon", "pantheon-base/plank", "x11-wm/gala"], skip=None, replace=True, merge=True),
InsertEbuilds(other_overlays["faustoo_overlay"], select="all", skip=None, replace=True, merge=True),
InsertEbuilds(other_overlays["foo_overlay"], select="all", skip=["sys-fs/mdev-bb", "sys-fs/mdev-like-a-boss", "media-sound/deadbeef", "media-video/handbrake"], replace=["app-shells/rssh"]),
InsertEbuilds(funtoo_overlays["plex_overlay"], select=[ "media-tv/plex-media-server" ], skip=None, replace=True),
InsertEbuilds(other_overlays["sabayon_for_gentoo"], select=["app-admin/equo", "app-admin/matter", "sys-apps/entropy", "sys-apps/entropy-server", "sys-apps/entropy-client-services","app-admin/rigo", "sys-apps/rigo-daemon", "sys-apps/magneto-core", "x11-misc/magneto-gtk", "x11-misc/magneto-gtk3", "x11-themes/numix-icon-theme", "kde-misc/magneto-kde", "app-misc/magneto-loader", "media-video/kazam" ], replace=True),
InsertEbuilds(other_overlays["tripsix_overlay"], select=["media-sound/rakarrack"], skip=None, replace=True, merge=False),
InsertEbuilds(other_overlays["pinsard_overlay"], select=["app-portage/chuse", "dev-python/iwlib", "media-sound/pytify", "x11-wm/qtile"], skip=None, replace=True, merge=True),
InsertEbuilds(other_overlays["wltjr_overlay"], select=["mail-filter/assp", "mail-mta/netqmail"], skip=None, replace=True, merge=False),
]
ebuild_modifications += [
InsertEbuilds(funtoo_overlays["funtoo_media"], select="all", skip=None, replace=True),
InsertEbuilds(funtoo_overlays["ldap_overlay"], select="all", skip=["net-nds/openldap"], replace=True),
]
# Steps related to eclass copying:
eclass_steps = [
SyncDir(funtoo_overlays["deadbeef_overlay"].root,"eclass"),
]
# General tree preparation steps -- finishing touches. This is where you should put steps that require all ebuilds
# from all trees to all be inserted (like AutoGlobMask calls) as well as misc. copying of files like licenses and
# updates files. It also contains misc. tweaks like mirror fixups and Portage tree minification.
treeprep_steps = [
SyncDir(funtoo_overlays["plex_overlay"].root,"licenses"),
]
master_steps = [
InsertEbuilds(shards["perl"], select="all", skip=None, replace=True),
InsertEclasses(shards["perl"], select=re.compile(".*\.eclass")),
InsertEbuilds(shards["x11"], select="all", skip=None, replace=True),
InsertEbuilds(shards["office"], select="all", skip=None, replace=True),
InsertEbuilds(shards["kde"], select="all", skip=None, replace=True),
InsertEclasses(shards["kde"], select=re.compile(".*\.eclass")),
InsertEbuilds(shards["gnome"], select="all", skip=None, replace=True),
InsertEbuilds(funtoo_overlays["gnome_fixups"], select="all", skip=None, replace=True),
InsertEbuilds(shards["core"], select="all", skip=None, replace=True),
InsertEclasses(shards["core"], select=re.compile(".*\.eclass")),
InsertEbuilds(funtoo_overlays["funtoo_toolchain"], select="all", skip=None, replace=True, merge=False),
InsertEbuilds(funtoo_overlay, select="all", skip=None, replace=True),
SyncDir(funtoo_overlay.root, "eclass"),
SyncDir(funtoo_overlay.root,"licenses"),
SyncDir(funtoo_overlay.root,"metadata"),
SyncFiles(funtoo_overlay.root, {
"COPYRIGHT.txt":"COPYRIGHT.txt",
"LICENSE.txt":"LICENSE.txt",
"README.rst":"README.rst",
"header.txt":"header.txt",
}),
]
treeprep_steps += [
MergeUpdates(funtoo_overlay.root),
AutoGlobMask("dev-lang/python", "python*_pre*", "funtoo-python_pre"),
ThirdPartyMirrors(),
ProfileDepFix(),
Minify(),
# Set name of repository as "gentoo". Unset masters.
RunSed(["metadata/layout.conf"], ["s/^repo-name = .*/repo-name = gentoo/", "/^masters =/d"]),
RunSed(["profiles/repo_name"], ["s/.*/gentoo/"])
]
all_steps = [ base_steps, profile_steps, ebuild_additions, eclass_steps, master_steps, ebuild_modifications, treeprep_steps ]
for step in all_steps:
funtoo_staging_w.run(step)
funtoo_staging_w.gitCommit(message="glorious funtoo updates",branch=push)
if xmlfile:
a=open(xmlfile,"wb")
etree.ElementTree(xml_out).write(a, encoding='utf-8', xml_declaration=True, pretty_print=True)
a.close()
print("merge-funtoo-staging.py completed successfully.")
sys.exit(0)
# vim: ts=4 sw=4 noet
|
apinsard/funtoo-overlay
|
funtoo/scripts/merge-funtoo-staging.py
|
Python
|
gpl-2.0
| 14,296 | 0.020425 |
#exponent
#find 2^n
n = input("Enter n: ")
print 2**n
|
yusufshakeel/Python-Project
|
example/expo.py
|
Python
|
mit
| 55 | 0.036364 |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015 Percy Li
# See LICENSE for details.
import struct
import threading
import copy
class FrameBuffer(object):
def __init__(self,decoder = None):
self.data_buffer = bytes([])
self.decoder = decoder
def set_decoder (self,decoder = None):
self.decoder = decoder
def pop_frame (self):
if self.decoder is not None:
fe,df = self.decoder(self.data_buffer)
self.data_buffer = self.data_buffer[fe:]
if df:
#logger.ods ('Decoded frame : \n ' + str(df) ,lv='dev',cat = 'foundation.tcp')
pass
return df # None if fail
else:
# if no decoder was specified , return a single byte in the head of the buffer.
return self.pop_buffer()
def push_frame (self,frame):
if hasattr(frame,'marshal'):
self.data_buffer += frame.marshal()
return
if isinstance(frame,bytes):
self.data_buffer += frame
return
def append_buffer (self,buf):
self.data_buffer += buf
def pop_buffer (self):
if self.get_buffer_length() == 0:
return None
_head = self.data_buffer[0]
self.data_buffer = self.data_buffer[1:]
return _head
def get_buffer (self):
return self.data_buffer
def clear_buffer (self):
self.data_buffer = bytes([])
def get_buffer_length(self):
return len(self.data_buffer)
# thread safe version
class TSFrameBuffer(FrameBuffer):
def __init__(self,decoder = None):
FrameBuffer.__init__(self,decoder)
self.internal_lock = threading.RLock()
def set_decoder (self,decoder = None):
with self.internal_lock:
return FrameBuffer.set_decoder(self,decoder)
def pop_frame (self):
with self.internal_lock:
return FrameBuffer.pop_frame(self)
def push_frame (self,frame):
with self.internal_lock:
return FrameBuffer.push_frame(self,frame)
def append_buffer (self,buf):
with self.internal_lock:
return FrameBuffer.append_buffer(self,buf)
def pop_buffer (self):
with self.internal_lock:
return FrameBuffer.pop_buffer(self)
def get_buffer (self):
with self.internal_lock:
return copy.deepcopy( FrameBuffer.get_buffer(self) )
def clear_buffer (self):
with self.internal_lock:
return FrameBuffer.clear_buffer(self)
def get_buffer_length(self):
with self.internal_lock:
return FrameBuffer.get_buffer_length(self)
if __name__ == '__main__':
fb = FrameBuffer()
|
lzjever/pullot
|
pullot/framebuffer.py
|
Python
|
mit
| 2,966 | 0.020566 |
import re
def pythonize_camelcase_name(name):
"""
GetProperty -> get_property
"""
def repl(match):
return '_' + match.group(0).lower()
s = re.sub(r'([A-Z])', repl, name)
if s.startswith('_'):
return s[1:]
else:
return s
|
fredreichbier/babbisch-ooc
|
babbisch_ooc/wraplib/utils.py
|
Python
|
mit
| 279 | 0.007168 |
'''
Given: A protein string PP of length at most 1000 aa.
Return: The total weight of PP. Consult the monoisotopic mass table.
'''
def weight(protein):
# Build mass table from mass_table.txt
mass = {}
with open("mass_table.txt", "r") as m:
for line in m:
lst = line.split(" ")
mass[lst[0]] = float(lst[1].rstrip())
# Calculate the mass of protein
total = 0
for aa in protein:
total += mass[aa]
return total
|
jr55662003/My_Rosalind_solution
|
ProteinMass/PRTM.py
|
Python
|
gpl-3.0
| 450 | 0.026667 |
# coding: utf-8
# license: GPLv3
from enemies import *
from hero import *
def annoying_input_int(message =''):
answer = None
while answer == None:
try:
answer = int(input(message))
except ValueError:
print('Вы ввели недопустимые символы')
return answer
def game_tournament(hero, dragon_list):
for dragon in dragon_list:
print('Вышел', dragon._color, 'дракон!')
while dragon.is_alive() and hero.is_alive():
print('Вопрос:', dragon.question())
answer = annoying_input_int('Ответ:')
if dragon.check_answer(answer):
hero.attack(dragon)
print('Верно! \n** дракон кричит от боли **')
else:
dragon.attack(hero)
print('Ошибка! \n** вам нанесён удар... **')
if dragon.is_alive():
break
print('Дракон', dragon._color, 'повержен!\n')
if hero.is_alive():
print('Поздравляем! Вы победили!')
print('Ваш накопленный опыт:', hero._experience)
else:
print('К сожалению, Вы проиграли...')
def start_game():
try:
print('Добро пожаловать в арифметико-ролевую игру с драконами!')
print('Представьтесь, пожалуйста: ', end = '')
hero = Hero(input())
dragon_number = 3
dragon_list = generate_dragon_list(dragon_number)
assert(len(dragon_list) == 3)
print('У Вас на пути', dragon_number, 'драконов!')
game_tournament(hero, dragon_list)
except EOFError:
print('Поток ввода закончился. Извините, принимать ответы более невозможно.')
|
mipt-cs-on-python3/arithmetic_dragons
|
tournament.py
|
Python
|
gpl-3.0
| 1,950 | 0.004271 |
from django.core.urlresolvers import reverse
import django.http
import django.utils.simplejson as json
import functools
def make_url(request, reversible):
return request.build_absolute_uri(reverse(reversible))
def json_output(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
return django.http.HttpResponse(json.dumps(output),
content_type="application/json")
return wrapper
|
ukch/online_sabacc
|
src/sabacc/api/viewhelpers.py
|
Python
|
gpl-3.0
| 494 | 0 |
import time
from netCDF4 import Dataset
from oceansar.ocs_io import NETCDFHandler
class ProcFile(NETCDFHandler):
""" Processed raw data file generated by the OASIS Simulator
:param file_name: File name
:param mode: Access mode (w = write, r = read, r+ = read + append)
:param proc_dim: Processed raw data dimensions
:param format: netCDF format
.. note::
Refer to netCDF4 Python library for details on access mode and
available formats
"""
def __init__(self, file_name, mode, proc_dim=None, format='NETCDF4'):
self.__file__ = Dataset(file_name, mode, format)
# If writing, define file
if mode == 'w':
# Set file attributes
self.__file__.description = 'OCEANSAR Processed SLC Data File'
self.__file__.history = 'Created ' + time.ctime(time.time())
self.__file__.source = 'OCEANSAR Simulator'
# Dimensions
if not proc_dim:
raise ValueError('Processed raw data dimensions are needed when creating a new file!')
self.__file__.createDimension('ch_dim', proc_dim[0])
self.__file__.createDimension('pol_dim', proc_dim[1])
self.__file__.createDimension('az_dim', proc_dim[2])
self.__file__.createDimension('rg_dim', proc_dim[3])
num_ch = self.__file__.createVariable('num_ch', 'i4')
# Variables
slc_r = self.__file__.createVariable('slc_r',
'f8',
('ch_dim',
'pol_dim',
'az_dim',
'rg_dim'))
slc_i = self.__file__.createVariable('slc_i',
'f8',
('ch_dim',
'pol_dim',
'az_dim',
'rg_dim'))
slc_r.units = '[]'
slc_i.units = '[]'
inc_angle = self.__file__.createVariable('inc_angle', 'f8')
inc_angle.units = '[deg]'
f0 = self.__file__.createVariable('f0', 'f8')
f0.units = '[Hz]'
ant_L = self.__file__.createVariable('ant_L', 'f8')
ant_L.units = '[m]'
prf = self.__file__.createVariable('prf', 'f8')
prf.units = '[Hz]'
v_ground = self.__file__.createVariable('v_ground', 'f8')
v_ground.units = '[m/s]'
orbit_alt = self.__file__.createVariable('orbit_alt', 'f8')
orbit_alt.units = '[m]'
sr0 = self.__file__.createVariable('sr0', 'f8')
sr0.units = '[m]'
rg_sampling = self.__file__.createVariable('rg_sampling', 'f8')
rg_sampling.units = '[Hz]'
rg_bw = self.__file__.createVariable('rg_bw', 'f8')
rg_bw.units = '[Hz]'
b_ati = self.__file__.createVariable('b_ati', 'f8', 'ch_dim')
b_ati.units = '[m]'
b_xti = self.__file__.createVariable('b_xti', 'f8', 'ch_dim')
b_xti.units = '[m]'
|
pakodekker/oceansar
|
oceansar/ocs_io/processed.py
|
Python
|
gpl-3.0
| 3,344 | 0.000299 |
"""Regularizations.
Each regularization method is implemented as a subclass of
:class:`Regularizer`,
where the constructor takes the hyperparameters, and the `__call__` method
constructs the symbolic loss expression given a parameter.
These are made for use with :meth:`Model.regularize`, but can also be used
directly in the :meth:`loss` method of :class:`.Model` subclasses.
"""
import theano
import theano.tensor as T
from theano.ifelse import ifelse
class Regularizer: pass
class L2(Regularizer):
"""L2 loss."""
def __init__(self, penalty=0.01):
self.penalty = penalty
def __call__(self, p):
return T.sqrt(T.sqr(p).sum()) * T.as_tensor_variable(self.penalty)
class StateNorm(Regularizer):
"""Squared norm difference between recurrent states.
Note that this method seems to be unstable if the initial hidden state is
initialized to zero.
David Krueger & Roland Memisevic (2016).
`Regularizing RNNs by stabilizing activations. <http://arxiv.org/pdf/1511.08400v7.pdf>`_
"""
def __init__(self, penalty=50.0):
self.penalty = penalty
def __call__(self, p, p_mask):
"""Compute the squared norm difference of a sequence.
Example
-------
>>> def loss(self, outputs, outputs_mask):
... # loss() definition from a custom Model subclass
... loss = super().loss()
... pred_states, pred_symbols = self(outputs, outputs_mask)
... # Include transition from initial state
... pred_states = T.concatenate([initial_state, pred_states],
... axis=0)
... return loss + StateNorm(50.0)(pred_states, outputs_mask)
"""
mask = p_mask[:-1]
l2 = T.sqrt(T.sqr(p).sum(axis=2))
diff = (l2[1:] - l2[:-1]) * mask
return (self.penalty * T.sqr(diff).sum() /
mask.sum().astype(theano.config.floatX))
|
robertostling/bnas
|
bnas/regularize.py
|
Python
|
gpl-3.0
| 1,950 | 0.002564 |
# Print the version splitted in three components
import sys
verfile = sys.argv[1]
f = open(verfile)
version = f.read()
l = [a[0] for a in version.split('.') if a[0] in '0123456789']
# If no revision, '0' is added
if len(l) == 2:
l.append('0')
for i in l:
print i,
f.close()
|
cpcloud/PyTables
|
mswindows/get_pytables_version.py
|
Python
|
bsd-3-clause
| 300 | 0.003333 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import json
from warnings import warn
from uuid import uuid4
# External imports
# Bokeh imports
from .state import curstate
from ..util.serialization import make_id
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
HTML_MIME_TYPE = 'text/html'
JS_MIME_TYPE = 'application/javascript'
LOAD_MIME_TYPE = 'application/vnd.bokehjs_load.v0+json'
EXEC_MIME_TYPE = 'application/vnd.bokehjs_exec.v0+json'
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class CommsHandle(object):
'''
'''
_json = {}
def __init__(self, comms, cell_doc):
self._cellno = None
try:
from IPython import get_ipython
ip = get_ipython()
hm = ip.history_manager
p_prompt = list(hm.get_tail(1, include_latest=True))[0][1]
self._cellno = p_prompt
except Exception as e:
log.debug("Could not get Notebook cell number, reason: %s", e)
self._comms = comms
self._doc = cell_doc
# Our internal copy of the doc is in perpetual "hold". Events from the
# originating doc will be triggered and collected it it. Events are
# processed/cleared when push_notebook is called for this comms handle
self._doc.hold()
def _repr_html_(self):
if self._cellno is not None:
return "<p><code><Bokeh Notebook handle for <strong>In[%s]</strong>></code></p>" % str(self._cellno)
else:
return "<p><code><Bokeh Notebook handle></code></p>"
@property
def comms(self):
return self._comms
@property
def doc(self):
return self._doc
# Adding this method makes curdoc dispatch to this Comms to handle
# and Document model changed events. If we find that the event is
# for a model in our internal copy of the docs, then trigger the
# internal doc with the event so that it is collected (until a
# call to push_notebook processes and clear colleted events)
def _document_model_changed(self, event):
if event.model._id in self.doc._all_models:
self.doc._trigger_on_change(event)
def install_notebook_hook(notebook_type, load, show_doc, show_app, overwrite=False):
''' Install a new notebook display hook.
Bokeh comes with support for Jupyter notebooks built-in. However, there are
other kinds of notebooks in use by different communities. This function
provides a mechanism for other projects to instruct Bokeh how to display
content in other notebooks.
This function is primarily of use to developers wishing to integrate Bokeh
with new notebook types.
Args:
notebook_type (str) :
A name for the notebook type, e.e. ``'Jupyter'`` or ``'Zeppelin'``
If the name has previously been installed, a ``RuntimeError`` will
be raised, unless ``overwrite=True``
load (callable) :
A function for loading BokehJS in a notebook type. The function
will be called with the following arguments:
.. code-block:: python
load(
resources, # A Resources object for how to load BokehJS
verbose, # Whether to display verbose loading banner
hide_banner, # Whether to hide the output banner entirely
load_timeout # Time after which to report a load fail error
)
show_doc (callable) :
A function for displaying Bokeh standalone documents in the
notebook type. This function will be called with the following
arguments:
.. code-block:: python
show_doc(
obj, # the Bokeh object to display
state, # current bokeh.io "state"
notebook_handle # whether a notebook handle was requested
)
If the notebook platform is capable of supporting in-place updates
to plots then this function may return an opaque notebook handle
that can be used for that purpose. The handle will be returned by
``show()``, and can be used by as appropriate to update plots, etc.
by additional functions in the library that installed the hooks.
show_app (callable) :
A function for displaying Bokeh applications in the notebook
type. This function will be called with the following arguments:
.. code-block:: python
show_app(
app, # the Bokeh Application to display
state, # current bokeh.io "state"
notebook_url # URL to the current active notebook page
)
overwrite (bool, optional) :
Whether to allow an existing hook to be overwritten by a new
definition (default: False)
Returns:
None
Raises:
RuntimeError
If ``notebook_type`` is already installed and ``overwrite=False``
'''
if notebook_type in _HOOKS and not overwrite:
raise RuntimeError("hook for notebook type %r already exists" % notebook_type)
_HOOKS[notebook_type] = dict(load=load, doc=show_doc, app=show_app)
def push_notebook(document=None, state=None, handle=None):
''' Update Bokeh plots in a Jupyter notebook output cells with new data
or property values.
When working the the notebook, the ``show`` function can be passed the
argument ``notebook_handle=True``, which will cause it to return a
handle object that can be used to update the Bokeh output later. When
``push_notebook`` is called, any property updates (e.g. plot titles or
data source values, etc.) since the last call to ``push_notebook`` or
the original ``show`` call are applied to the Bokeh output in the
previously rendered Jupyter output cell.
Several example notebooks can be found in the GitHub repository in
the :bokeh-tree:`examples/howto/notebook_comms` directory.
Args:
document (Document, optional) :
A :class:`~bokeh.document.Document` to push from. If None,
uses ``curdoc()``. (default: None)
state (State, optional) :
A :class:`State` object. If None, then the current default
state (set by ``output_file``, etc.) is used. (default: None)
Returns:
None
Examples:
Typical usage is typically similar to this:
.. code-block:: python
from bokeh.plotting import figure
from bokeh.io import output_notebook, push_notebook, show
output_notebook()
plot = figure()
plot.circle([1,2,3], [4,6,5])
handle = show(plot, notebook_handle=True)
# Update the plot title in the earlier cell
plot.title.text = "New Title"
push_notebook(handle=handle)
'''
from ..protocol import Protocol
if state is None:
state = curstate()
if not document:
document = state.document
if not document:
warn("No document to push")
return
if handle is None:
handle = state.last_comms_handle
if not handle:
warn("Cannot find a last shown plot to update. Call output_notebook() and show(..., notebook_handle=True) before push_notebook()")
return
events = list(handle.doc._held_events)
# This is to avoid having an exception raised for attempting to create a
# PATCH-DOC with no events. In the notebook, we just want to silenty
# ignore calls to push_notebook when there are no new events
if len(events) == 0:
return
handle.doc._held_events = []
msg = Protocol("1.0").create("PATCH-DOC", events)
handle.comms.send(msg.header_json)
handle.comms.send(msg.metadata_json)
handle.comms.send(msg.content_json)
for header, payload in msg.buffers:
handle.comms.send(json.dumps(header))
handle.comms.send(buffers=[payload])
def run_notebook_hook(notebook_type, action, *args, **kw):
''' Run an installed notebook hook with supplied arguments.
Args:
noteboook_type (str) :
Name of an existing installed notebook hook
actions (str) :
Name of the hook action to execute, ``'doc'`` or ``'app'``
All other arguments and keyword arguments are passed to the hook action
exactly as supplied.
Returns:
Result of the hook action, as-is
Raises:
RunetimeError
If the hook or specific action is not installed
'''
if notebook_type not in _HOOKS:
raise RuntimeError("no display hook installed for notebook type %r" % notebook_type)
if _HOOKS[notebook_type][action] is None:
raise RuntimeError("notebook hook for %r did not install %r action" % notebook_type, action)
return _HOOKS[notebook_type][action](*args, **kw)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def destroy_server(server_id):
''' Given a UUID id of a div removed or replaced in the Jupyter
notebook, destroy the corresponding server sessions and stop it.
'''
server = curstate().uuid_to_server.get(server_id, None)
if server is None:
log.debug("No server instance found for uuid: %r" % server_id)
return
try:
for session in server.get_sessions():
session.destroy()
server.stop()
del curstate().uuid_to_server[server_id]
except Exception as e:
log.debug("Could not destroy server for id %r: %s" % (server_id, e))
def get_comms(target_name):
''' Create a Jupyter comms object for a specific target, that can
be used to update Bokeh documents in the Jupyter notebook.
Args:
target_name (str) : the target name the Comms object should connect to
Returns
Jupyter Comms
'''
# NOTE: must defer all IPython imports inside functions
from ipykernel.comm import Comm
return Comm(target_name=target_name, data={})
def install_jupyter_hooks():
'''
'''
install_notebook_hook('jupyter', load_notebook, show_doc, show_app)
def load_notebook(resources=None, verbose=False, hide_banner=False, load_timeout=5000):
''' Prepare the IPython notebook for displaying Bokeh plots.
Args:
resources (Resource, optional) :
how and where to load BokehJS from (default: CDN)
verbose (bool, optional) :
whether to report detailed settings (default: False)
hide_banner (bool, optional):
whether to hide the Bokeh banner (default: False)
load_timeout (int, optional) :
Timeout in milliseconds when plots assume load timed out (default: 5000)
.. warning::
Clearing the output cell containing the published BokehJS
resources HTML code may cause Bokeh CSS styling to be removed.
Returns:
None
'''
global _NOTEBOOK_LOADED
from .. import __version__
from ..core.templates import NOTEBOOK_LOAD
from ..util.serialization import make_id
from ..resources import CDN
from ..util.compiler import bundle_all_models
if resources is None:
resources = CDN
if not hide_banner:
if resources.mode == 'inline':
js_info = 'inline'
css_info = 'inline'
else:
js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files
css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files
warnings = ["Warning: " + msg['text'] for msg in resources.messages if msg['type'] == 'warn']
if _NOTEBOOK_LOADED and verbose:
warnings.append('Warning: BokehJS previously loaded')
element_id = make_id()
html = NOTEBOOK_LOAD.render(
element_id = element_id,
verbose = verbose,
js_info = js_info,
css_info = css_info,
bokeh_version = __version__,
warnings = warnings,
)
else:
element_id = None
_NOTEBOOK_LOADED = resources
custom_models_js = bundle_all_models()
nb_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=True)
jl_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=False)
if not hide_banner:
publish_display_data({'text/html': html})
publish_display_data({
JS_MIME_TYPE : nb_js,
LOAD_MIME_TYPE : jl_js
})
def publish_display_data(*args, **kw):
'''
'''
# This import MUST be deferred or it will introduce a hard dependency on IPython
from IPython.display import publish_display_data
return publish_display_data(*args, **kw)
def show_app(app, state, notebook_url, port=0):
''' Embed a Bokeh serer application in a Jupyter Notebook output cell.
Args:
app (Application or callable) :
A Bokeh Application to embed inline in a Jupyter notebook.
state (State) :
** Unused **
notebook_url (str or callable) :
The URL of the notebook server that is running the embedded app.
If ``notebook_url`` is a string, the value string is parsed to
construct the origin and full server URLs.
If notebook_url is a callable, it must accept one parameter,
which will be the server port, or None. If passed a port,
the callable must generate the server URL, otherwise if passed
None, it must generate the origin URL for the server.
port (int) :
A port for the embedded server will listen on.
By default the port is 0, which results in the server listening
on a random dynamic port.
Returns:
None
'''
logging.basicConfig()
from tornado.ioloop import IOLoop
from ..server.server import Server
loop = IOLoop.current()
if callable(notebook_url):
origin = notebook_url(None)
else:
origin = _origin_url(notebook_url)
server = Server({"/": app}, io_loop=loop, port=port, allow_websocket_origin=[origin])
server_id = uuid4().hex
curstate().uuid_to_server[server_id] = server
server.start()
if callable(notebook_url):
url = notebook_url(server.port)
else:
url = _server_url(notebook_url, server.port)
logging.debug("Server URL is %s" % url)
logging.debug("Origin URL is %s" % origin)
from ..embed import server_document
script = server_document(url, resources=None)
publish_display_data({
HTML_MIME_TYPE: script,
EXEC_MIME_TYPE: ""
}, metadata={
EXEC_MIME_TYPE: {"server_id": server_id}
})
def show_doc(obj, state, notebook_handle):
'''
'''
from ..embed.notebook import notebook_content
comms_target = make_id() if notebook_handle else None
(script, div, cell_doc) = notebook_content(obj, comms_target)
publish_display_data({HTML_MIME_TYPE: div})
publish_display_data({JS_MIME_TYPE: script, EXEC_MIME_TYPE: ""}, metadata={EXEC_MIME_TYPE: {"id": obj._id}})
# Comms handling relies on the fact that the cell_doc returned by
# notebook copy has models with the same IDs as the original curdoc
# they were copied from
if comms_target:
handle = CommsHandle(get_comms(comms_target), cell_doc)
state.document.on_change_dispatch_to(handle)
state.last_comms_handle = handle
return handle
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_HOOKS = {}
_NOTEBOOK_LOADED = None
def _loading_js(resources, element_id, custom_models_js, load_timeout=5000, register_mime=True):
'''
'''
from ..core.templates import AUTOLOAD_NB_JS
return AUTOLOAD_NB_JS.render(
elementid = element_id,
js_urls = resources.js_files,
css_urls = resources.css_files,
js_raw = resources.js_raw + [custom_models_js],
css_raw = resources.css_raw_str,
force = True,
timeout = load_timeout,
register_mime = register_mime
)
def _origin_url(url):
'''
'''
if url.startswith("http"):
url = url.split("//")[1]
return url
def _server_url(url, port):
'''
'''
if url.startswith("http"):
return '%s:%d%s' % (url.rsplit(':', 1)[0], port, "/")
else:
return 'http://%s:%d%s' % (url.split(':')[0], port, "/")
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
Karel-van-de-Plassche/bokeh
|
bokeh/io/notebook.py
|
Python
|
bsd-3-clause
| 18,064 | 0.005314 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Lyndor runs from here - contains the main functions '''
import sys, time, os
import module.message as message
import module.save as save
import module.cookies as cookies
import module.read as read
import install
import module.move as move
import module.draw as draw
import module.rename as rename
import module.exercise_file as exercise_file
from colorama import Fore, init
def main():
''' Main function '''
init()
message.animate_characters(Fore.LIGHTYELLOW_EX, draw.ROCKET, 0.02)
message.spinning_cursor()
message.print_line('\r1. Paste course url or\n' +
'2. Press enter for Bulk Download')
url = input()
print('')
start_time = time.time() #start time counter begins
if url == "":
# If user press Enter (i.e. url empty), get urls from Bulkdownload.txt
urls = read.bulk_download()
if not urls:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, 'Please paste urls in Bulk Download.txt\n'))
for url in urls:
schedule_download(url)
else:
# begin regular download
schedule_download(url)
try:
end_time = time.time()
message.animate_characters(Fore.LIGHTGREEN_EX, draw.COW, 0.02)
message.colored_message(Fore.LIGHTGREEN_EX, "\nThe whole process took {}\n".format(move.hms_string(end_time - start_time)))
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
def schedule_download(url):
''' Look for the scheduled time in settings.json '''
if not read.aria2_installed:
tip = '☝🏻 Tip: Install aria2c for faster downloads, read README.md to learn more.'
message.carriage_return_animate(tip)
if read.download_time == '':
# If download time not set, begin download
download_course(url)
return
else:
counter = True
message.colored_message(Fore.LIGHTGREEN_EX, 'Download time set to: ' + read.download_time + '\
in settings.json, you can change or remove this time in settings.json\n')
try:
while counter:
if time.strftime("%H:%M") == read.download_time:
download_course(url)
return
print(f'Download will start at: {read.download_time} leave this window open.')
time.sleep(60)
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
def download_course(url):
''' download course '''
# Check for a valid url
if url.find('.html') == -1:
sys.exit(message.animate_characters(Fore.LIGHTRED_EX, draw.ANONYMOUS, 0.02))
url = url[:url.find(".html")+5] #strip any extra text after .html in the url
# Folder/File paths
lynda_folder_path = read.location + '/'
course_folder_path = save.course_path(url, lynda_folder_path)
desktop_folder_path = install.get_path("Desktop")
download_folder_path = install.get_path("Downloads")
# Read preferences
use_cookie_for_download = read.course_download_pref
if use_cookie_for_download in ['cookies', 'cookie'] or read.exfile_download_method == 'aria2':
cookie_path = cookies.find_cookie(desktop_folder_path, download_folder_path)
downloading_from_cookie = message.return_colored_message(Fore.LIGHTBLUE_EX, '🍪 Downloading videos using cookies.txt')
message.carriage_return_animate(downloading_from_cookie)
else:
cookie_path = ''
usr_pass_message = message.return_colored_message(Fore.LIGHTGREEN_EX, '⛺ Using username and password combination for download\n')
message.carriage_return_animate(usr_pass_message)
try:
# main operations ->
save.course(url, lynda_folder_path) # Create course folder
save.info_file(url, course_folder_path) # Gather information
save.chapters(url, course_folder_path) # Create chapter folders
save.contentmd(url) # Create content.md
save.videos(url, cookie_path, course_folder_path) # Download videos
rename.videos(course_folder_path) # rename videos
rename.subtitles(course_folder_path) # rename subtitles
move.vid_srt_to_chapter(url, course_folder_path) # Move videos and subtitles to chapter folders
# Download exercise files
if save.check_exercise_file(url):
print('\nExercise file is available to download')
if not read.download_exercise_file:
# if user do not want to download ex-file
print("settings.json says you do not want to download ex-file -> 'download_exercise_file': false")
else:
# if user wants to download ex-file
if read.course_download_pref == 'regular-login':
exercise_file.download(url, course_folder_path, cookie_path)
elif read.exfile_download_pref == 'library-login':
if read.card_number == '':
print('\nTo download ex-file via library login -> Please save library card details in settings.json')
else:
exercise_file.download(url, course_folder_path, cookie_path)
else:
print('\nThe exercise file can only be downloaded through one of the below combinations:')
print('~ Regular login: username + password or')
print('~ Library login: card number, pin and org. url\n')
else: # if exercise file not present
print('This course does not include Exercise files.')
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
|
ankitsejwal/Lyndor
|
run.py
|
Python
|
mit
| 6,128 | 0.006538 |
{
'name': "Sale only available products on Website",
'summary': """Sale only available products on Website""",
'version': '1.0.0',
'author': 'IT-Projects LLC, Ivan Yelizariev',
'license': 'GPL-3',
'category': 'Custom',
'website': 'https://yelizariev.github.io',
'images': ['images/available.png'],
'price': 9.00,
'currency': 'EUR',
'depends': ['website_sale'],
'data': [
'website_sale_available_views.xml',
],
'installable': True,
}
|
ufaks/website-addons
|
website_sale_available/__openerp__.py
|
Python
|
lgpl-3.0
| 501 | 0 |
""" JobRunningWaitingRatioPolicy
Policy that calculates the efficiency following the formula:
( running ) / ( running + waiting + staging )
if the denominator is smaller than 10, it does not take any decision.
"""
from DIRAC import S_OK
from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase
__RCSID__ = '$Id: JobRunningWaitingRatioPolicy.py 60769 2013-01-18 11:50:36Z ubeda $'
class JobRunningWaitingRatioPolicy( PolicyBase ):
"""
The JobRunningWaitingRatioPolicy class is a policy that checks the efficiency of the
jobs according to what is on JobDB.
Evaluates the JobRunningWaitingRatioPolicy results given by the JobCommand.JobCommand
"""
@staticmethod
def _evaluate( commandResult ):
""" _evaluate
efficiency < 0.5 :: Banned
efficiency < 0.9 :: Degraded
"""
result = {
'Status' : None,
'Reason' : None
}
if not commandResult[ 'OK' ]:
result[ 'Status' ] = 'Error'
result[ 'Reason' ] = commandResult[ 'Message' ]
return S_OK( result )
commandResult = commandResult[ 'Value' ]
if not commandResult:
result[ 'Status' ] = 'Unknown'
result[ 'Reason' ] = 'No values to take a decision'
return S_OK( result )
commandResult = commandResult[ 0 ]
if not commandResult:
result[ 'Status' ] = 'Unknown'
result[ 'Reason' ] = 'No values to take a decision'
return S_OK( result )
running = float( commandResult[ 'Running' ] )
waiting = float( commandResult[ 'Waiting' ] )
staging = float( commandResult[ 'Staging' ] )
total = running + waiting + staging
#we want a minimum amount of jobs to take a decision ( at least 10 pilots )
if total < 10:
result[ 'Status' ] = 'Unknown'
result[ 'Reason' ] = 'Not enough jobs to take a decision'
return S_OK( result )
efficiency = running / total
if efficiency < 0.4:
result[ 'Status' ] = 'Banned'
elif efficiency < 0.65:
result[ 'Status' ] = 'Degraded'
else:
result[ 'Status' ] = 'Active'
result[ 'Reason' ] = 'Job Running / Waiting ratio of %.2f' % efficiency
return S_OK( result )
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
Sbalbp/DIRAC
|
ResourceStatusSystem/Policy/JobRunningWaitingRatioPolicy.py
|
Python
|
gpl-3.0
| 2,447 | 0.042092 |
complexe = importeur.salle.creer_etendue("complexe")
complexe.origine = (20, 20)
obstacle = importeur.salle.obstacles["falaise"]
coords = [
(20, 20),
(21, 20),
(22, 20),
(23, 20),
(24, 20),
(25, 20),
(20, 21),
(20, 22),
(20, 23),
(20, 24),
(20, 25),
(19, 25),
(19, 26),
(18, 26),
(17, 26),
(19, 27),
(17, 27),
(17, 28),
(18, 28),
(19, 28),
(20, 28),
(21, 28),
(22, 28),
(23, 28),
(24, 28),
(24, 27),
(24, 26),
(23, 26),
(23, 25),
(23, 24),
(24, 24),
(25, 24),
(25, 23),
(23, 23),
(25, 22),
(22, 22),
(23, 22),
(25, 21),
]
for coord in coords:
complexe.ajouter_obstacle(coord, obstacle)
complexe.trouver_contour()
|
vlegoff/tsunami
|
src/test/boostrap/salle/etendue/complexe.py
|
Python
|
bsd-3-clause
| 772 | 0 |
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from framework import api_select
def create_jobs(api_type):
api = api_select.api(__file__, api_type)
api.flow_job()
api.job('passwd_args', exec_time=0.5, max_fails=0, expect_invocations=1, expect_order=1,
params=(('s1', 'no-secret', 'desc'), ('passwd', 'p2', 'desc'), ('PASS', 'p3', 'desc')))
return api
if __name__ == '__main__':
create_jobs(api_select.ApiType.JENKINS)
|
lhupfeldt/jenkinsflow
|
demo/jobs/hide_password_jobs.py
|
Python
|
bsd-3-clause
| 545 | 0.00367 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Installation()
result.template = "object/installation/faction_perk/turret/shared_block_sm.iff"
result.attribute_template_id = -1
result.stfName("turret_n","block_small")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/installation/faction_perk/turret/shared_block_sm.py
|
Python
|
mit
| 460 | 0.047826 |
# -*- coding: utf-8 -*-
try:
f1 = open("input.txt","r",encoding="utf-8")
except IOError:
print("Не удалось найти входной файл input.txt")
try:
f2 = open("output.txt","w",encoding="utf-8")
except IOError:
print("Не удалось открыть выходной файл output.txt")
import re # импортируем модуль работы с регулярными выражениями
# --- регулярное выражение для заголовков вида: == ййй ==
zagolovok_level2 = re.compile("==.*==") # жадный квантификатор .*
# --- регулярные выражения для внутренних ссылок вида [[id**|**]], [[club**|**]], [[public**|**]]
#ssylka_inner_tpl = re.compile("\[\[.*?\|.*?\]\]") # [[ | ]] нежадный кватнификатор .*?
ssylka_inner_id = re.compile("\[\[id.*?\|.*?\]\]") # id
ssylka_inner_club = re.compile("\[\[club.*?\|.*?\]\]") # club
ssylka_inner_public = re.compile("\[\[public.*?\|.*?\]\]") # public
# --- регулярное выражение для внешних ссылок вида [http**|**]
ssylka_outer = re.compile("\[http.*?\|.*?\]")
# --- регулярное выражение для вставки переноса на другую строку (если заканчивается на ":" + пробелы)
perenos = re.compile(":\s*$")
# --------
for stroka in f1.readlines(): #читаем входной файл построчно
# ---- Замена заголовков
if re.match(zagolovok_level2, stroka):
stroka = stroka.replace("==","##",1)
stroka = stroka.replace("==", "")
# ---- Замена жирного шрифта и курсива ----
stroka = stroka.replace("'''",'**') # жирный шрифт - переделать в регулярные выражения!
stroka = stroka.replace("''",'*') # курсив - переделать в регулярные выражения!
# ---- Замена внутренних ссылок (id, club, public) ----
iskomoe = (re.findall(ssylka_inner_id, stroka) +
re.findall(ssylka_inner_club, stroka) +
re.findall(ssylka_inner_public, stroka)) # находим все id,club,public
if iskomoe:
for ssylka in iskomoe: # перебираем найденные ссылки в строке
ssylka_id = ssylka.split("|")[0].replace('[[','') #выделяем id ссылки
ssylka_name = ssylka.split("|")[1].replace(']]','') #выделяем имя ссылки
ssylka_new = ('['+ssylka_name+']('+'http://vk.com/'+ssylka_id+')')
stroka = stroka.replace(ssylka, ssylka_new) #заменяем старую ссылку на новую
# ---- Замена внешних ссылок [http**|**] ----
iskomoe2 = re.findall(ssylka_outer, stroka)
if iskomoe2:
for ssylka2 in iskomoe2:
ssylka2_id = ssylka2.split("|")[0].replace('[http','http')
ssylka2_name = ssylka2.split("|")[1].replace(']','')
ssylka2_new = '['+ssylka2_name+']('+ssylka2_id+')'
stroka = stroka.replace(ssylka2, ssylka2_new)
# ---- Запись преобразованной строки в выходной файл ----
if re.search(perenos, stroka):
f2.write('\n' + stroka)
else:
f2.write(stroka)
# --------
f1.close()
f2.close()
|
dimitrius-brest/katalog-poseleniy-RP
|
converter-vkwiki2md/convert2md.py
|
Python
|
cc0-1.0
| 3,623 | 0.019257 |
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
import datetime, time, requests, re, os
import bs4
from django.contrib.admin.views.decorators import staff_member_required
from decimal import *
# Create your views here.
from .models import Gas, Region, Station, Site, Ship, Harvester, Setup, APICheck
from .forms import GasForm, SiteForm, SiteAnalyzer
def about(request):
return render(request, 'home/about.html')
def home(request):
if request.method == "POST":
form = GasForm(data=request.POST)
if form.is_valid():
data = form.cleaned_data
harv = data['harvester']
cycle = harv.cycle
yld = harv.yld
ship = data['ship']
yld_bonus = ship.yld_bonus
if data['skill'] > 5:
skill = 5
if data['skill'] < 1:
skill = 1
else:
skill = data['skill']
cycle_bonus = skill * .05
else:
form = GasForm()
cycle = 40
yld = 20
cycle_bonus = 0.25
yld_bonus = 1
if cycle_bonus > .25:
cycle_bonus = Decimal(0.25)
c = cycle * (1 - cycle_bonus)
y = yld * (1 + yld_bonus)
gases = Gas.objects.all()
isk_min = {}
for gas in gases:
g = gas.name
vol = gas.volume
isk_min_val = ((Decimal(y) / Decimal(gas.volume)) * 2) * (60 / Decimal(c)) * Decimal(gas.last_price)
isk_mthree = Decimal(gas.last_price) / Decimal(gas.volume)
isk_min[g] = [isk_min_val, isk_mthree]
u = APICheck.objects.get(id=1)
context = {'isk_min': isk_min, 'form': form, 'updated': str(u.updated)}
return render(request, "home/home.html", context)
def sites(request):
if request.method == "POST":
form = SiteForm(data=request.POST)
if form.is_valid():
data = form.cleaned_data
harv = data['harvester']
cycle = Decimal(harv.cycle)
yld = Decimal(harv.yld)
ship = data['ship']
yld_bonus = Decimal(ship.yld_bonus)
cargo = Decimal(ship.cargo)
num = Decimal(data['num'])
if data['skill'] > 5:
skill = 5
if data['skill'] < 1:
skill = 1
else:
skill = data['skill']
cycle_bonus = skill * .05
extra_data = data['extra_data']
else:
form = SiteForm()
cycle = Decimal(40)
yld = Decimal(20)
cycle_bonus = Decimal(0.25)
yld_bonus = Decimal(1)
num = Decimal(1)
cargo = 10000
extra_data = False
c = cycle * (Decimal(1) - Decimal(cycle_bonus))
y = yld * (Decimal(1) + Decimal(yld_bonus))
sites = Site.objects.all()
sites_calc = {}
for site in sites:
p_price = site.p_gas.last_price
s_price = site.s_gas.last_price
p_vol = site.p_gas.volume
s_vol = site.s_gas.volume
p_isk_min = ((Decimal(y) / Decimal(p_vol)) * 2) * (60 / Decimal(c)) * Decimal(p_price) * num
s_isk_min = ((Decimal(y) / Decimal(s_vol)) * 2) * (60 / Decimal(c)) * Decimal(s_price) * num
if p_isk_min < s_isk_min:
best_gas = site.s_gas
best_gas_isk_min = s_isk_min
best_qty = site.s_qty
other_gas = site.p_gas
other_gas_isk_min = p_isk_min
other_qty = site.p_qty
else:
best_gas = site.p_gas
best_gas_isk_min = p_isk_min
best_qty = site.p_qty
other_gas = site.s_gas
other_gas_isk_min = s_isk_min
other_qty = site.s_qty
p_units_min = ((y / best_gas.volume) * 2) * (60 / c) * num
s_units_min = ((y / other_gas.volume) * 2) * (60 / c) * num
time_to_clear = (best_qty / p_units_min) + (other_qty / s_units_min)
isk_pres = (p_price * site.p_qty) + (s_price * site.s_qty)
site_isk_min = Decimal(isk_pres) / Decimal(time_to_clear)
#extra data calculations
primary_time_to_clear = (best_qty / p_units_min)
secondary_time_to_clear = (other_qty / s_units_min)
#blue_loot_isk
#time to kill site
ships_needed = ((site.p_qty * p_vol) + (site.s_qty * s_vol)) / (cargo)
sites_calc[site.name] = [isk_pres, best_gas, best_gas_isk_min, other_gas, other_gas_isk_min, site_isk_min, time_to_clear, primary_time_to_clear, secondary_time_to_clear, ships_needed]
u = APICheck.objects.get(id=1)
context = {'form': form, 'sites_calc': sites_calc, 'updated': str(u.updated), 'extra_data': extra_data}
return render(request, "home/sites.html", context)
def site_an(request):
if request.method == 'POST':
form = SiteAnalyzer(data=request.POST)
if form.is_valid():
data = form.cleaned_data
scan = data['scan']
num = Decimal(data['num'])
ship = data['ship']
harvester = data['harvester']
skill = Decimal(data['skill'])
show_data = True
else:
form = SiteAnalyzer()
show_data = False
skill = 0
yld = 0
num = 1
ship = Ship.objects.get(id=1)
harvester = Harvester.objects.get(id=1)
cycle_bonus = skill * Decimal(0.05)
yld = harvester.yld
c = harvester.cycle * (1 - cycle_bonus)
y = yld * (1 + ship.yld_bonus) * num
#parse Dscan
sites = []
proc_sites = []
if show_data == True:
#print(scan)
scan_re = re.compile(r'Gas Site *(\S* \S* \S*) *')
scan_re_b = re.compile(r'(Instrumental Core Reservoir|Ordinary Perimeter Reservoir|Minor Perimeter Reservoir|Bountiful Frontier Reservoir|Barren Perimeter Reservoir|Token Perimeter Reservoir|Sizable Perimeter Reservoir|Vast Frontier Reservoir|Vital Core Reservoir)')
scan_results = scan_re.findall(scan)
if scan_results == []:
scan_results = scan_re_b.findall(scan)
print(scan_results)
for res in scan_results:
sites.append(res)
for s in sites:
site = Site.objects.get(name=s)
site_name = site.name
site_isk = (site.p_gas.last_price * site.p_qty) + (site.s_gas.last_price * site.s_qty)
#ninja scanning
#determine best gas
p_isk_min = ((Decimal(y) / Decimal(site.p_gas.volume)) * 2) * (60 / Decimal(c)) * Decimal(site.p_gas.last_price)
s_isk_min = ((Decimal(y) / Decimal(site.s_gas.volume)) * 2) * (60 / Decimal(c)) * Decimal(site.s_gas.last_price)
if p_isk_min >= s_isk_min:
first_cloud = site.p_gas
first_qty = site.p_qty
sec_cloud = site.s_gas
sec_qty = site.s_qty
if p_isk_min <= s_isk_min:
first_cloud = site.s_gas
first_qty = site.s_qty
sec_cloud = site.p_gas
sec_qty = site.p_qty
#calculate how much you can get in 15 minutes
units_15 = ((Decimal(y) / Decimal(first_cloud.volume)) * 2) * (60 / Decimal(c)) * 15
if units_15 <= first_qty:
ninja_isk = units_15 * first_cloud.last_price
if ninja_isk > site_isk:
ninja_isk = site_isk
m_per_s = (units_15 / num) * first_cloud.volume
#if it is more than the qty in the best cloud, calculate the remaining time
if units_15 > first_qty:
min_left = 15 - (first_qty / (units_15 / 15))
sec_units_min = ((Decimal(y) / Decimal(sec_cloud.volume)) * 2) * (60 / Decimal(c))
rem_units = sec_units_min * min_left
ninja_isk = (rem_units * sec_cloud.last_price) + (first_qty * first_cloud.last_price)
if ninja_isk > site_isk:
ninja_isk = site_isk
m_per_s = ((units_15 / num) * first_cloud.volume) + ((rem_units / num) * sec_cloud.volume)
if m_per_s * num > (site.p_qty * site.p_gas.volume) + (site.s_qty * site.s_gas.volume):
m_per_s = ((site.p_qty * site.p_gas.volume) + (site.s_qty * site.s_gas.volume)) / num
sipm = ninja_isk / 15 / num
nips = ninja_isk / num
if site_name == 'Ordinary Perimeter Reservoir':
sipm = 0
m_per_s = 0
nips = 0
ninja_isk = 0
ninja_si = (site_name, site_isk, sipm, first_cloud.name, m_per_s, nips, ninja_isk)
#print(ninja_si)
proc_sites.append(ninja_si)
t_site_isk = 0
t_sipm = 0
t_sipm_c = 0
t_m_per_s = 0
t_nips = 0
t_ninja_isk = 0
for s in proc_sites:
t_site_isk = t_site_isk + s[1]
t_sipm = t_sipm + s[2]
if s[0] != "Ordinary Perimeter Reservoir":
t_sipm_c = t_sipm_c + 1
t_m_per_s = t_m_per_s + s[4]
t_nips = t_nips + s[5]
t_ninja_isk = t_ninja_isk + s[6]
ships = t_m_per_s / ship.cargo
if t_sipm_c == 0:
t_sipm_c = 1
if t_site_isk == 0:
t_site_isk = 1
percent = (t_ninja_isk / t_site_isk) * 100
totals = (t_site_isk, t_sipm / t_sipm_c, t_m_per_s, t_nips, t_ninja_isk, ships, percent)
t_min = t_sipm_c * 15
u = APICheck.objects.get(id=1)
#site clearing
#take sites
#isk present, blue loot isk present, time to fully clear site, rat dps, rat ehp
context = {'show_data': show_data, 'form': form, 'sites': sites, 'proc_sites': proc_sites, 'totals': totals, 't_min': t_min, 'updated': str(u.updated)}
return render(request, "home/site_an.html", context)
def pull_prices(request):
tag_re = re.compile(r'<.*>(.*)</.*>')
gs = Gas.objects.all()
id_str = ''
for g in gs:
gid = g.item_id
id_str = id_str+'&typeid='+gid
#r = Region.objects.get(id=1)
#r = r.region_id
r = '10000002'
url = 'http://api.eve-central.com/api/marketstat?'+id_str+'®ionlimit='+r
xml_raw = requests.get(url)
if xml_raw.status_code == requests.codes.ok:
path = 'data/prices.xml'
xml = open(path, 'w')
xml.write(xml_raw.text)
xml.close()
status = 'OK'
else:
status = 'Error'
xml_file = open(path, 'r')
xml = xml_file.read()
soup = bs4.BeautifulSoup(xml, 'xml')
types = soup.find_all('type')
for t in types:
t_dict = dict(t.attrs)
type_id = t_dict['id']
buy = t.buy
avg = buy.find_all('max')
avg_in = tag_re.search(str(avg))
avg_in = avg_in.group(1)
avg_price = Decimal(avg_in)
avg_price = round(avg_price, 2)
g = Gas.objects.get(item_id=type_id)
g.last_price = avg_price
g.save()
gases = Gas.objects.all()
a, c = APICheck.objects.get_or_create(id=1)
a.save()
context = {'status': status, 'gases': gases}
return render(request, "home/pull_prices.html", context)
@staff_member_required
def wipe_db(request):
s = Site.objects.all()
s.delete()
g = Gas.objects.all()
g.delete()
r = Region.objects.all()
r.delete()
s = Station.objects.all()
s.delete()
s = Ship.objects.all()
s.delete()
h = Harvester.objects.all()
h.delete()
s = Setup.objects.all()
s.delete()
return HttpResponseRedirect(reverse('home:home'))
@staff_member_required
def setup_site(request):
try:
s = Setup.objects.get(id=1)
if s==1:
return HttpResponseRedirect(reverse('home:home'))
except:
g = Gas(name='Fullerite-C28',item_id='30375', volume='2')
g.save()
g = Gas(name='Fullerite-C32',item_id='30376', volume='5')
g.save()
g = Gas(name='Fullerite-C320',item_id='30377', volume='5')
g.save()
g = Gas(name='Fullerite-C50',item_id='30370', volume='1')
g.save()
g = Gas(name='Fullerite-C540',item_id='30378', volume='10')
g.save()
g = Gas(name='Fullerite-C60',item_id='30371', volume='1')
g.save()
g = Gas(name='Fullerite-C70',item_id='30372', volume='1')
g.save()
g = Gas(name='Fullerite-C72',item_id='30373', volume='2')
g.save()
g = Gas(name='Fullerite-C84',item_id='30374', volume='2')
g.save()
r = Region(name='The Forge', region_id='10000002')
r.save()
s = Station(name='Jita IV - Moon 4 - Caldari Navy Assembly Plant ( Caldari Administrative Station )',station_id='60003760')
s.save()
s = Ship(name='Venture',cargo=5000,yld_bonus=1.00)
s.save()
s = Ship(name='Prospect',cargo=10000,yld_bonus=1.00)
s.save()
h = Harvester(name='Gas Cloud Harvester I',harv_id='25266',cycle=30,yld=10)
h.save()
h = Harvester(name='\'Crop\' Gas Cloud Harvester',harv_id='25540',cycle=30,yld=10)
h.save()
h = Harvester(name='\'Plow\' Gas Cloud Harvester',harv_id='25542',cycle=30,yld=10)
h.save()
h = Harvester(name='Gas Cloud Harvester II',harv_id='25812',cycle=40,yld=20)
h.save()
h = Harvester(name='Syndicate Gas Cloud Harvester',harv_id='28788',cycle=30,yld=10)
h.save()
c50 = Gas.objects.get(name='Fullerite-C50')
c60 = Gas.objects.get(name='Fullerite-C60')
c70 = Gas.objects.get(name='Fullerite-C70')
c72 = Gas.objects.get(name='Fullerite-C72')
c84 = Gas.objects.get(name='Fullerite-C84')
c28 = Gas.objects.get(name='Fullerite-C28')
c32 = Gas.objects.get(name='Fullerite-C32')
c320 = Gas.objects.get(name='Fullerite-C320')
c540 = Gas.objects.get(name='Fullerite-C540')
s = Site(name='Barren Perimeter Reservoir',p_gas=c50,s_gas=c60,p_qty=3000,s_qty=1500)
s.save()
s = Site(name='Token Perimeter Reservoir',p_gas=c60,s_gas=c70,p_qty=3000,s_qty=1500)
s.save()
s = Site(name='Ordinary Perimeter Reservoir',p_gas=c72,s_gas=c84,p_qty=3000,s_qty=1500)
s.save()
s = Site(name='Sizable Perimeter Reservoir',p_gas=c84,s_gas=c50,p_qty=3000,s_qty=1500)
s.save()
s = Site(name='Minor Perimeter Reservoir',p_gas=c70,s_gas=c72,p_qty=3000,s_qty=1500)
s.save()
s = Site(name='Bountiful Frontier Reservoir',p_gas=c28,s_gas=c32,p_qty=5000,s_qty=1000)
s.save()
s = Site(name='Vast Frontier Reservoir',p_gas=c32,s_gas=c28,p_qty=5000,s_qty=1000)
s.save()
s = Site(name='Instrumental Core Reservoir',p_gas=c320,s_gas=c540,p_qty=6000,s_qty=500)
s.save()
s = Site(name='Vital Core Reservoir',p_gas=c540,s_gas=c320,p_qty=6000,s_qty=500)
s.save()
try:
os.mkdir('data/')
except:
pass
s = Setup(setup=1)
s.save()
return HttpResponseRedirect(reverse('home:home'))
|
inspectorbean/gasbuddy
|
home/views.py
|
Python
|
mit
| 14,960 | 0.008757 |
from quanthistling.tests import *
class TestBookController(TestController):
def test_index(self):
response = self.app.get(url(controller='book', action='index'))
# Test response...
|
FrankNagel/qlc
|
src/webapp/quanthistling/quanthistling/tests/functional/test_book.py
|
Python
|
gpl-3.0
| 203 | 0.004926 |
"""The WaveBlocks Project
Compute some observables like norm, kinetic and potential energy
of Hagedorn wavepackets. This class implements the mixed case
where the bra does not equal the ket.
@author: R. Bourquin
@copyright: Copyright (C) 2014, 2016 R. Bourquin
@license: Modified BSD License
"""
from functools import partial
from numpy import squeeze, sum
from WaveBlocksND.Observables import Observables
__all__ = ["ObservablesMixedHAWP"]
class ObservablesMixedHAWP(Observables):
r"""This class implements the mixed case observable computation
:math:`\langle \Psi | \cdot | \Psi^{\prime} \rangle` for Hagedorn
wavepackets :math:`\Psi` where the bra :math:`\Psi` does not equal
the ket :math:`\Psi^{\prime}`.
"""
def __init__(self, *, innerproduct=None, gradient=None):
r"""Initialize a new :py:class:`ObservablesMixedHAWP` instance for observable computation of Hagedorn wavepackets.
"""
self._innerproduct = None
self._gradient = None
def set_innerproduct(self, innerproduct):
r"""Set the innerproduct.
:param innerproduct: An inner product for computing the integrals. The inner product is used
for the computation of all brakets
:math:`\langle \Psi | \cdot | \Psi^{\prime} \rangle`.
:type innerproduct: A :py:class:`InnerProduct` subclass instance.
.. note:: Make sure to use an inhomogeneous inner product here.
"""
self._innerproduct = innerproduct
def set_gradient(self, gradient):
r"""Set the gradient.
:param gradient: A gradient operator. The gradient is only used for the computation of the kinetic
energy :math:`\langle \Psi | T | \Psi^{\prime} \rangle`.
:type gradient: A :py:class:`Gradient` subclass instance.
"""
self._gradient = gradient
def overlap(self, pacbra, packet, *, component=None, summed=False):
r"""Calculate the overlap :math:`\langle \Psi | \Psi^{\prime} \rangle` of the wavepackets
:math:`\Psi` and :math:`\Psi^{\prime}`.
:param pacbra: The wavepacket :math:`\Psi` which takes part in the overlap integral.
:type pacbra: A :py:class:`HagedornWavepacketBase` subclass instance.
:param packet: The wavepacket :math:`\Psi^{\prime}` which takes part in the overlap integral.
:type packet: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the components :math:`\Phi_i` of :math:`\Psi`
and :math:`\Phi_i^{\prime}` of :math:`\Psi^{\prime}` whose overlap is
computed. The default value is ``None`` which means to compute the
overlaps with all :math:`N` components involved.
:type component: Integer or ``None``.
:param summed: Whether to sum up the overlaps :math:`\langle \Phi_i | \Phi_i^{\prime} \rangle`
of the individual components :math:`\Phi_i` and :math:`\Phi_i^{\prime}`.
:type summed: Boolean, default is ``False``.
:return: The overlap of :math:`\Psi` with :math:`\Psi^{\prime}` or the overlap of :math:`\Phi_i`
with :math:`\Phi_i^{\prime}` or a list with the :math:`N` overlaps of all components.
(Depending on the optional arguments.)
"""
return self._innerproduct.quadrature(pacbra, packet, diag_component=component, diagonal=True, summed=summed)
def norm(self, wavepacket, *, component=None, summed=False):
r"""Calculate the :math:`L^2` norm :math:`\langle \Psi | \Psi \rangle` of the wavepacket :math:`\Psi`.
:param wavepacket: The wavepacket :math:`\Psi` of which we compute the norm.
:type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the component :math:`\Phi_i` whose norm is computed.
The default value is ``None`` which means to compute the norms of all :math:`N` components.
:type component: int or ``None``.
:param summed: Whether to sum up the norms :math:`\langle \Phi_i | \Phi_i \rangle` of the
individual components :math:`\Phi_i`.
:type summed: Boolean, default is ``False``.
:return: The norm of :math:`\Psi` or the norm of :math:`\Phi_i` or a list with the :math:`N`
norms of all components. (Depending on the optional arguments.)
.. note:: This method just redirects to a call to :py:meth:`HagedornWavepacketBase.norm`.
"""
return wavepacket.norm(component=component, summed=summed)
def kinetic_overlap_energy(self, pacbra, packet, *, component=None, summed=False):
r"""Compute the kinetic energy overlap :math:`\langle \Psi | T | \Psi^{\prime} \rangle`
of the different components :math:`\Phi_i` and :math:`\Phi_i^{\prime}` of the
wavepackets :math:`\Psi` and :math:`\Psi^{\prime}`.
:param pacbra: The wavepacket :math:`\Psi` which takes part in the kinetic energy integral.
:type pacbra: A :py:class:`HagedornWavepacketBase` subclass instance.
:param packet: The wavepacket :math:`\Psi^{\prime}` which takes part in the kinetic energy integral.
:type packet: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the components :math:`\Phi_i` of :math:`\Psi`
and :math:`\Phi_i^{\prime}` of :math:`\Psi^{\prime}` which take part in the
kinetic energy integral. If set to ``None`` the computation is performed for
all :math:`N` components of :math:`\Psi` and :math:`\Psi^{\prime}`.
:type component: Integer or ``None``.
:param summed: Whether to sum up the kinetic energies :math:`E_i` of the individual
components :math:`\Phi_i` and :math:`\Phi_i^{\prime}`.
:type summed: Boolean, default is ``False``.
:return: A list of the kinetic energy overlap integrals of the individual components or
the overall kinetic energy overlap of the wavepackets. (Depending on the optional arguments.)
"""
Nbra = pacbra.get_number_components()
Nket = packet.get_number_components()
if not Nbra == Nket:
# TODO: Drop this requirement, should be easy when zip(...) exhausts
raise ValueError("Number of components in bra (%d) and ket (%d) differs!" % (Nbra, Nket))
if component is None:
components = range(Nbra)
else:
components = [component]
ekin = []
for n in components:
gradpacbra = self._gradient.apply_gradient(pacbra, component=n)
gradpacket = self._gradient.apply_gradient(packet, component=n)
Q = [self._innerproduct.quadrature(gpb, gpk, diag_component=n) for gpb, gpk in zip(gradpacbra, gradpacket)]
ekin.append(0.5 * sum(Q))
if summed is True:
ekin = sum(ekin)
elif component is not None:
# Do not return a list for specific single components
ekin = ekin[0]
return ekin
def kinetic_energy(self, wavepacket, *, component=None, summed=False):
r"""Compute the kinetic energy :math:`E_{\text{kin}} := \langle \Psi | T | \Psi \rangle`
of the different components :math:`\Phi_i` of the wavepacket :math:`\Psi`.
:param wavepacket: The wavepacket :math:`\Psi` of which we compute the kinetic energy.
:type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the component :math:`\Phi_i` whose
kinetic energy we compute. If set to ``None`` the
computation is performed for all :math:`N` components.
:type component: Integer or ``None``.
:param summed: Whether to sum up the kinetic energies :math:`E_i` of the individual
components :math:`\Phi_i`.
:type summed: Boolean, default is ``False``.
:return: A list of the kinetic energies of the individual components or the
overall kinetic energy of the wavepacket. (Depending on the optional arguments.)
.. note:: This method just expands to a call of the :py:meth:`ObservablesMixedHAWP.kinetic_overlap_energy`
method. Better use :py:meth:`ObservablesHAWP.kinetic_energy`.
"""
return self.kinetic_overlap_energy(wavepacket, wavepacket, component=component, summed=summed)
def potential_overlap_energy(self, pacbra, packet, potential, *, component=None, summed=False):
r"""Compute the potential energy overlap :math:`\langle \Psi | V(x) | \Psi^{\prime} \rangle`
of the different components :math:`\Phi_i` and :math:`\Phi_i^{\prime}` of the
wavepackets :math:`\Psi` and :math:`\Psi^{\prime}`.
:param pacbra: The wavepacket :math:`\Psi` which takes part in the potential energy integral.
:type pacbra: A :py:class:`HagedornWavepacketBase` subclass instance.
:param packet: The wavepacket :math:`\Psi^{\prime}` which takes part in the potential energy integral.
:type packet: A :py:class:`HagedornWavepacketBase` subclass instance.
:param potential: The potential :math:`V(x)`. (Actually, not the potential object itself
but one of its ``V.evaluate_*`` methods.)
:param component: The index :math:`i` of the components :math:`\Phi_i` of :math:`\Psi`
and :math:`\Phi_i^{\prime}` of :math:`\Psi^{\prime}` which take part in the
potential energy integral. If set to ``None`` the computation is performed for
all :math:`N` components of :math:`\Psi` and :math:`\Psi^{\prime}`.
:type component: Integer or ``None``.
:param summed: Whether to sum up the potential energies :math:`E_i` of the individual
components :math:`\Phi_i` and :math:`\Phi_i^{\prime}`.
:type summed: Boolean, default is ``False``.
:return: A list of the potential energy overlap integrals of the individual components or
the overall potential energy overlap of the wavepackets. (Depending on the optional arguments.)
"""
Nbra = pacbra.get_number_components()
Nket = packet.get_number_components()
if not Nbra == Nket:
# TODO: Drop this requirement, should be easy when zip(...) exhausts
raise ValueError("Number of components in bra (%d) and ket (%d) differs!" % (Nbra, Nket))
# TODO: Better take 'V' instead of 'V.evaluate_at' as argument?
# f = partial(potential.evaluate_at, as_matrix=True)
f = partial(potential, as_matrix=True)
# Compute the brakets for each component
if component is not None:
Q = self._innerproduct.quadrature(pacbra, packet, operator=f, diag_component=component, eval_at_once=True)
Q = [squeeze(Q)]
else:
Q = self._innerproduct.quadrature(pacbra, packet, operator=f, eval_at_once=True)
Q = list(map(squeeze, Q))
# And don't forget the summation in the matrix multiplication of 'operator' and 'ket'
# TODO: Should this go inside the innerproduct?
epot = [sum(Q[i * Nket:(i + 1) * Nket]) for i in range(Nbra)]
if summed is True:
epot = sum(epot)
elif component is not None:
# Do not return a list for specific single components
epot = epot[0]
return epot
def potential_energy(self, wavepacket, potential, *, component=None, summed=False):
r"""Compute the potential energy :math:`E_{\text{pot}} := \langle \Psi | V(x) | \Psi \rangle`
of the different components :math:`\Phi_i` of the wavepacket :math:`\Psi`.
:param wavepacket: The wavepacket :math:`\Psi` of which we compute the potential energy.
:type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance.
:param potential: The potential :math:`V(x)`. (Actually, not the potential object itself
but one of its ``V.evaluate_*`` methods.)
:param component: The index :math:`i` of the component :math:`\Phi_i` whose
potential energy we compute. If set to ``None`` the
computation is performed for all :math:`N` components.
:type component: Integer or ``None``.
:param summed: Whether to sum up the potential energies :math:`E_i` of the individual
components :math:`\Phi_i`.
:type summed: Boolean, default is ``False``.
:return: A list of the potential energies of the individual components or the
overall potential energy of the wavepacket. (Depending on the optional arguments.)
.. note:: This method just expands to a call of the :py:meth:`ObservablesMixedHAWP.potential_overlap_energy`
method. Better use :py:meth:`ObservablesHAWP.potential_energy`.
"""
return self.potential_overlap_energy(wavepacket, wavepacket, potential, component=component, summed=summed)
|
WaveBlocks/WaveBlocksND
|
WaveBlocksND/ObservablesMixedHAWP.py
|
Python
|
bsd-3-clause
| 13,370 | 0.006806 |
#!/usr/bin/env python3
#
# hyperv_wmi_generator.py: generates most of the WMI type mapping code
#
# Copyright (C) 2011 Matthias Bolte <matthias.bolte@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see
# <http://www.gnu.org/licenses/>.
#
import sys
import os
import os.path
separator = "/*" + ("*" * 50) + "*\n"
wmi_classes_by_name = {}
class WmiClass:
"""Represents WMI class and provides methods to generate C code."""
def __init__(self, name, properties, uri_info):
self.name = name
self.properties = properties
self.uri_info = uri_info
def generate_classes_header(self):
"""Generate C header code and return it as string
Declares:
<class_name>_Data - used as hypervObject->data
<class_name>_TypeInfo - used as wsman XmlSerializerInfo
<class_name> - "inherits" hypervObject struct
"""
name_upper = self.name.upper()
header = separator
header += " * %s\n" % self.name
header += " */\n"
header += "\n"
header += "#define %s_WQL_SELECT \\\n" % name_upper
header += " \"SELECT * FROM %s \"\n" % self.name
header += "\n"
header += "extern hypervWmiClassInfo *%s_WmiInfo;\n\n" % self.name
header += self._declare_data_structs()
header += self._declare_hypervObject_struct()
return header
def generate_classes_source(self):
"""Returns a C code string defining wsman data structs
Defines:
<class_name>_Data struct
<class_name>_WmiInfo - list holding metadata (e.g. request URIs) for the WMI class
"""
source = separator
source += " * %s\n" % self.name
source += " */\n"
source += "SER_START_ITEMS(%s_Data)\n" % self.name
for property in self.properties:
source += property.generate_classes_source(self.name)
source += "SER_END_ITEMS(%s_Data);\n\n" % self.name
# also generate typemap data while we're here
source += "hypervCimType %s_Typemap[] = {\n" % self.name
for property in self.properties:
source += property.generate_typemap()
source += ' { "", "", 0 },\n' # null terminated
source += '};\n\n'
source += self._define_WmiInfo_struct()
source += "\n\n"
return source
def generate_classes_typedef(self):
"""Returns C string for typedefs"""
typedef = "typedef struct _%s %s;\n" % (self.name, self.name)
typedef += "typedef struct _%s_Data %s_Data;\n" % (self.name, self.name)
typedef += "G_DEFINE_AUTOPTR_CLEANUP_FUNC(%s, hypervFreeObject);\n" % self.name
typedef += "\n"
return typedef
def _declare_data_structs(self):
"""Returns string C code declaring data structs.
The *_Data structs are used as hypervObject->data. Each one has
corresponding *_TypeInfo that is used for wsman unserialization of
response XML into the *_Data structs.
"""
header = "#define %s_RESOURCE_URI \\\n" % self.name.upper()
header += " \"%s\"\n" % self.uri_info.resourceUri
header += "\n"
header += "struct _%s_Data {\n" % self.name
for property in self.properties:
header += property.generate_classes_header()
header += "};\n\n"
header += "SER_DECLARE_TYPE(%s_Data);\n" % self.name
return header
def _declare_hypervObject_struct(self):
"""Return string for C code declaring hypervObject instance"""
header = "\n/* must match hypervObject */\n"
header += "struct _%s {\n" % self.name
header += " %s_Data *data;\n" % self.name
header += " hypervWmiClassInfo *info;\n"
header += " %s *next;\n" % self.name
header += "};\n"
header += "\n\n\n"
return header
def _define_WmiInfo_struct(self):
"""Return string for C code defining *_WmiInfo struct
This struct holds info with meta-data needed to make wsman requests for the WMI class.
"""
source = "hypervWmiClassInfo *%s_WmiInfo = &(hypervWmiClassInfo) {\n" % self.name
source += " .name = \"%s\",\n" % self.name
source += " .rootUri = %s,\n" % self.uri_info.rootUri
source += " .resourceUri = %s_RESOURCE_URI,\n" % self.name.upper()
source += " .serializerInfo = %s_Data_TypeInfo,\n" % self.name
source += " .propertyInfo = %s_Typemap\n" % self.name
source += "};\n"
return source
class ClassUriInfo:
"""Prepares URI information needed for wsman requests."""
def __init__(self, wmi_name):
if wmi_name.startswith("Msvm_"):
self.rootUri = "ROOT_VIRTUALIZATION_V2"
baseUri = "http://schemas.microsoft.com/wbem/wsman/1/wmi/root/virtualization/v2"
else:
self.rootUri = "ROOT_CIMV2"
baseUri = "http://schemas.microsoft.com/wbem/wsman/1/wmi/root/cimv2"
self.resourceUri = "%s/%s" % (baseUri, wmi_name)
class Property:
typemap = {
"boolean": "BOOL",
"string": "STR",
"datetime": "STR",
"int8": "INT8",
"sint8": "INT8",
"int16": "INT16",
"sint16": "INT16",
"int32": "INT32",
"sint32": "INT32",
"int64": "INT64",
"sint64": "INT64",
"uint8": "UINT8",
"uint16": "UINT16",
"uint32": "UINT32",
"uint64": "UINT64"
}
def __init__(self, type, name, is_array):
if type not in Property.typemap:
report_error("unhandled property type %s" % type)
self.type = type
self.name = name
self.is_array = is_array
def generate_classes_header(self):
if self.is_array:
return " XML_TYPE_DYN_ARRAY %s;\n" % self.name
else:
return " XML_TYPE_%s %s;\n" \
% (Property.typemap[self.type], self.name)
def generate_classes_source(self, class_name):
if self.is_array:
return " SER_NS_DYN_ARRAY(%s_RESOURCE_URI, \"%s\", 0, 0, %s),\n" \
% (class_name.upper(), self.name, self.type)
else:
return " SER_NS_%s(%s_RESOURCE_URI, \"%s\", 1),\n" \
% (Property.typemap[self.type], class_name.upper(), self.name)
def generate_typemap(self):
return ' { "%s", "%s", %s },\n' % (self.name, self.type.lower(), str(self.is_array).lower())
def open_file(filename):
return open(filename, "wt")
def report_error(message):
print("error: " + message)
sys.exit(1)
def parse_class(block, number):
# expected format: class <name> : <optional parent>
header_items = block[0][1].split()
if len(header_items) not in [2, 4]:
report_error("line %d: invalid block header" % (number))
assert header_items[0] == "class"
name = header_items[1]
if name in wmi_classes_by_name:
report_error("class '%s' has already been defined" % name)
if len(header_items) == 4:
parent_class = header_items[3]
if parent_class not in wmi_classes_by_name:
report_error("nonexistent parent class specified: %s" % parent_class)
properties = wmi_classes_by_name[parent_class].properties.copy()
else:
properties = []
for line in block[1:]:
# expected format: <type> <name>
items = line[1].split()
if len(items) != 2:
report_error("line %d: invalid property" % line[0])
if items[1].endswith("[]"):
items[1] = items[1][:-2]
is_array = True
else:
is_array = False
properties.append(Property(type=items[0], name=items[1], is_array=is_array))
wmi_classes_by_name[name] = WmiClass(name, properties, ClassUriInfo(name))
def main():
if len(sys.argv) != 3:
report_error("usage: %s srcdir builddir" % sys.argv[0])
input_filename = os.path.join(sys.argv[1], "hyperv", "hyperv_wmi_generator.input")
output_dirname = os.path.join(sys.argv[2], "hyperv")
classes_typedef = open_file(os.path.join(output_dirname, "hyperv_wmi_classes.generated.typedef"))
classes_header = open_file(os.path.join(output_dirname, "hyperv_wmi_classes.generated.h"))
classes_source = open_file(os.path.join(output_dirname, "hyperv_wmi_classes.generated.c"))
# parse input file
number = 0
block = None
for line in open(input_filename, "rt").readlines():
number += 1
if "#" in line:
line = line[:line.index("#")]
line = line.lstrip().rstrip()
if len(line) < 1:
continue
if line.startswith("class"):
if block is not None:
report_error("line %d: nested block found" % (number))
else:
block = []
if block is not None:
if line == "end":
if block[0][1].startswith("class"):
parse_class(block, number)
block = None
else:
block.append((number, line))
# write output files
notice = "/* Generated by hyperv_wmi_generator.py */\n\n\n\n"
classes_typedef.write(notice)
classes_header.write(notice)
classes_source.write(notice)
classes_typedef.write("void hypervFreeObject(void *object);\n\n\n")
names = sorted(wmi_classes_by_name.keys())
for name in names:
cls = wmi_classes_by_name[name]
classes_typedef.write(cls.generate_classes_typedef())
classes_header.write(cls.generate_classes_header())
classes_source.write(cls.generate_classes_source())
if __name__ == "__main__":
main()
|
crobinso/libvirt
|
scripts/hyperv_wmi_generator.py
|
Python
|
lgpl-2.1
| 10,347 | 0.001546 |
#!/usr/bin/python
from PyQt4 import QtCore, QtGui
class Bubble(QtGui.QLabel):
def __init__(self,text):
super(Bubble,self).__init__(text)
self.setContentsMargins(5,5,5,5)
def paintEvent(self, e):
p = QtGui.QPainter(self)
p.setRenderHint(QtGui.QPainter.Antialiasing,True)
p.drawRoundedRect(0,0,self.width()-1,self.height()-1,5,5)
super(Bubble,self).paintEvent(e)
class MyWidget(QtGui.QWidget):
def __init__(self,text,left=True):
super(MyWidget,self).__init__()
hbox = QtGui.QHBoxLayout()
label = Bubble(text)
if left is not True:
hbox.addSpacerItem(QtGui.QSpacerItem(1,1,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Preferred))
hbox.addWidget(label)
if left is True:
hbox.addSpacerItem(QtGui.QSpacerItem(1,1,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Preferred))
hbox.setContentsMargins(0,0,0,0)
self.setLayout(hbox)
self.setContentsMargins(0,0,0,0)
if __name__ == '__main__':
a = QtGui.QApplication([])
w = QtGui.QWidget()
vbox = QtGui.QVBoxLayout()
vbox.addWidget(MyWidget("Left side.. and also check everything needed to fuck around\n\n\n"))
vbox.addWidget(MyWidget("Right side",left=False))
vbox.addWidget(MyWidget("Left side"))
vbox.addWidget(MyWidget("Left side"))
w.setLayout(vbox)
w.show()
a.exec_()
|
shrinidhi666/rbhus
|
tests/conversationBox.py
|
Python
|
gpl-3.0
| 1,444 | 0.025623 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RSva(RPackage):
"""Surrogate Variable Analysis."""
homepage = "https://www.bioconductor.org/packages/sva/"
git = "https://git.bioconductor.org/packages/sva.git"
version('3.24.4', commit='ed2ebb6e33374dc9ec50e6ea97cc1d9aef836c73')
depends_on('r@3.4.0:3.4.9', when='@3.24.4')
depends_on('r-mgcv', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
|
mfherbst/spack
|
var/spack/repos/builtin/packages/r-sva/package.py
|
Python
|
lgpl-2.1
| 1,827 | 0.000547 |
import logging
import sys
import traceback
from django.conf import settings
from django.core.cache import cache
try:
from django.utils.module_loading import import_string
except ImportError:
# compatibility with django < 1.7
from django.utils.module_loading import import_by_path
import_string = import_by_path
from mohawk import Receiver
from mohawk.exc import BadHeaderValue, HawkFail, TokenExpired
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import AuthenticationFailed
from hawkrest.util import get_auth_header, is_hawk_request
log = logging.getLogger(__name__)
# Number of seconds until a Hawk message expires.
default_message_expiration = 60
def default_credentials_lookup(cr_id):
if cr_id not in settings.HAWK_CREDENTIALS:
raise LookupError('No Hawk ID of {id}'.format(id=cr_id))
return settings.HAWK_CREDENTIALS[cr_id]
def default_user_lookup(request, credentials):
return HawkAuthenticatedUser(), None
class HawkAuthentication(BaseAuthentication):
def hawk_credentials_lookup(self, cr_id):
lookup = default_credentials_lookup
lookup_name = getattr(
settings,
'HAWK_CREDENTIALS_LOOKUP',
None)
if lookup_name:
log.debug('Using custom credentials lookup from: {}'
.format(lookup_name))
lookup = import_string(lookup_name)
return lookup(cr_id)
def hawk_user_lookup(self, request, credentials):
lookup = default_user_lookup
lookup_name = getattr(
settings,
'HAWK_USER_LOOKUP',
None)
if lookup_name:
log.debug('Using custom user lookup from: {}'
.format(lookup_name))
lookup = import_string(lookup_name)
return lookup(request, credentials)
def authenticate(self, request):
# In case there is an exception, tell others that the view passed
# through Hawk authorization. The META dict is used because
# middleware may not get an identical request object.
# A dot-separated key is to work around potential environ var
# pollution of META.
request.META['hawk.receiver'] = None
http_authorization = get_auth_header(request)
if not http_authorization:
log.debug('no authorization header in request')
return None
elif not is_hawk_request(request):
log.debug('ignoring non-Hawk authorization header: {} '
.format(http_authorization))
return None
try:
receiver = Receiver(
lambda cr_id: self.hawk_credentials_lookup(cr_id),
http_authorization,
request.build_absolute_uri(),
request.method,
content=request.body,
seen_nonce=(seen_nonce
if getattr(settings, 'USE_CACHE_FOR_HAWK_NONCE',
True)
else None),
content_type=request.META.get('CONTENT_TYPE', ''),
timestamp_skew_in_seconds=getattr(settings,
'HAWK_MESSAGE_EXPIRATION',
default_message_expiration))
except HawkFail as e:
etype, val, tb = sys.exc_info()
log.debug(traceback.format_exc())
log.warning('access denied: {etype.__name__}: {val}'
.format(etype=etype, val=val))
# The exception message is sent to the client as part of the
# 401 response, so we're intentionally vague about the original
# exception type/value, to avoid assisting attackers.
msg = 'Hawk authentication failed'
if isinstance(e, BadHeaderValue):
msg += ': The request header was malformed'
elif isinstance(e, TokenExpired):
msg += ': The token has expired. Is your system clock correct?'
raise AuthenticationFailed(msg)
# Pass our receiver object to the middleware so the request header
# doesn't need to be parsed again.
request.META['hawk.receiver'] = receiver
return self.hawk_user_lookup(request, receiver.resource.credentials)
def authenticate_header(self, request):
return 'Hawk'
# Added for Django compatibility, allowing use of this class as a
# normal Django authentication backend as well (for views outside
# Django Rest Framework)
def get_user(self, user_id):
return HawkAuthenticatedUser()
class HawkAuthenticatedUser(object):
"""
A real-ish user like AbstractBaseUser but not a real Django model.
This passes the DRF is_authenticated permission check but it may cause
other problems. If you need to work with a real Django model user
you might need to subclass HawkAuthentication.
"""
is_active = True
def get_full_name(self):
return str(self.__class__.__name__)
def get_short_name(self):
return str(self.__class__.__name__)
def get_username(self):
return str(self.__class__.__name__)
def clean(self):
# There's nothing to clean, since the name is `self.__class__.__name__`.
pass
def save(self, *args, **kwargs):
raise NotImplementedError()
def natural_key(self):
return str(self.__class__.__name__)
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def set_password(self, password):
raise NotImplementedError()
def check_password(self, password):
raise NotImplementedError()
def set_unusable_password(self):
pass
def has_usable_password(self):
return False
def get_session_auth_hash(self):
raise NotImplementedError()
# -----------------------------------------------
# These methods are in older Django versions only:
# -----------------------------------------------
def get_previous_by_last_login(self, *args, **kw):
raise NotImplementedError()
def get_next_by_last_login(self, *args, **kw):
raise NotImplementedError()
def seen_nonce(id, nonce, timestamp):
"""
Returns True if the Hawk nonce has been seen already.
"""
key = '{id}:{n}:{ts}'.format(id=id, n=nonce, ts=timestamp)
if cache.get(key):
log.warning('replay attack? already processed nonce {k}'
.format(k=key))
return True
else:
log.debug('caching nonce {k}'.format(k=key))
cache.set(key, True,
# We only need the nonce until the message itself expires.
# This also adds a little bit of padding.
timeout=getattr(settings, 'HAWK_MESSAGE_EXPIRATION',
default_message_expiration) + 5)
return False
|
kumar303/hawkrest
|
hawkrest/__init__.py
|
Python
|
bsd-3-clause
| 7,011 | 0.000428 |
# -*- coding: UTF-8 -*-
# translation.py
#
# Copyright (C) 2013 Cleany
#
# Author(s): Cédric Gaspoz <cga@cleany.ch>
#
# This file is part of cleany.
#
# Cleany is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cleany is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cleany. If not, see <http://www.gnu.org/licenses/>.
# Stdlib imports
# Core Django imports
# Third-party app imports
from modeltranslation.translator import translator, TranslationOptions
# Cleany imports
#from .models import
# class AppellationTranslationOptions(TranslationOptions):
# fields = ('name', 'description',)
#
# translator.register(Appellation, AppellationTranslationOptions)
|
Degustare/cleany
|
cleany/maps/translation.py
|
Python
|
gpl-3.0
| 1,115 | 0.001795 |
"""
Misago-native rehash of Django's createsuperuser command that
works with double authentication fields on user model
"""
import sys
from getpass import getpass
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand
from django.db import DEFAULT_DB_ALIAS, IntegrityError
from django.utils.encoding import force_str
from django.utils.six.moves import input
from ...validators import validate_email, validate_password, validate_username
class NotRunningInTTYException(Exception):
pass
class Command(BaseCommand):
help = 'Used to create a superuser.'
def add_arguments(self, parser):
parser.add_argument('--username', dest='username', default=None,
help='Specifies the username for the superuser.')
parser.add_argument('--email', dest='email', default=None,
help='Specifies the username for the superuser.')
parser.add_argument('--password', dest='password', default=None,
help='Specifies the username for the superuser.')
parser.add_argument('--noinput', action='store_false', dest='interactive',
default=True,
help=('Tells Misago to NOT prompt the user for input '
'of any kind. You must use --username with '
'--noinput, along with an option for any other '
'required field. Superusers created with '
'--noinput will not be able to log in until '
'they\'re given a valid password.'))
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help=('Specifies the database to use. '
'Default is "default".'))
def execute(self, *args, **options):
self.stdin = options.get('stdin', sys.stdin) # Used for testing
return super(Command, self).execute(*args, **options)
def handle(self, *args, **options):
username = options.get('username')
email = options.get('email')
password = options.get('password')
interactive = options.get('interactive')
verbosity = int(options.get('verbosity', 1))
# Validate initial inputs
if username is not None:
try:
username = username.strip()
validate_username(username)
except ValidationError as e:
self.stderr.write(e.messages[0])
username = None
if email is not None:
try:
email = email.strip()
validate_email(email)
except ValidationError as e:
self.stderr.write(e.messages[0])
email = None
if password is not None:
try:
password = password.strip()
validate_password(password)
except ValidationError as e:
self.stderr.write(e.messages[0])
password = None
if not interactive:
if username and email and password:
# Call User manager's create_superuser using our wrapper
self.create_superuser(username, email, password, verbosity)
else:
try:
if hasattr(self.stdin, 'isatty') and not self.stdin.isatty():
raise NotRunningInTTYException("Not running in a TTY")
# Prompt for username/password, and any other required fields.
# Enclose this whole thing in a try/except to trap for a
# keyboard interrupt and exit gracefully.
while not username:
try:
message = force_str("Enter displayed username: ")
raw_value = input(message).strip()
validate_username(raw_value)
username = raw_value
except ValidationError as e:
self.stderr.write(e.messages[0])
while not email:
try:
raw_value = input("Enter E-mail address: ").strip()
validate_email(raw_value)
email = raw_value
except ValidationError as e:
self.stderr.write(e.messages[0])
while not password:
try:
raw_value = getpass("Enter password: ").strip()
validate_password(raw_value)
repeat_raw_value = getpass("Repeat password: ").strip()
if raw_value != repeat_raw_value:
raise ValidationError(
"Entered passwords are different.")
password = raw_value
except ValidationError as e:
self.stderr.write(e.messages[0])
# Call User manager's create_superuser using our wrapper
self.create_superuser(username, email, password, verbosity)
except KeyboardInterrupt:
self.stderr.write("\nOperation cancelled.")
sys.exit(1)
except NotRunningInTTYException:
self.stdout.write(
"Superuser creation skipped due to not running in a TTY. "
"You can run `manage.py createsuperuser` in your project "
"to create one manually."
)
def create_superuser(self, username, email, password, verbosity):
try:
User = get_user_model()
user = User.objects.create_superuser(username, email, password,
set_default_avatar=True)
if verbosity >= 1:
message = "Superuser #%(pk)s has been created successfully."
self.stdout.write(message % {'pk': user.pk})
except ValidationError as e:
self.stderr.write(e.messages[0])
except IntegrityError as e:
self.stderr.write(e.messages[0])
|
1905410/Misago
|
misago/users/management/commands/createsuperuser.py
|
Python
|
gpl-2.0
| 6,267 | 0.001277 |
###ExonArray
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string
import os.path
import unique
import time
import export
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
#add in code to prevent folder names from being included
dir_list2 = []
for file in dir_list:
if '.txt' in file: dir_list2.append(file)
return dir_list2
################# Begin Analysis
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importAnnotations(filename):
firstLine = True
fn = filepath(filename)
rows = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
tab_delimited_data = string.split(data,'\t')
if rows > 10: sys.exit()
print tab_delimited_data#;sys.exit()
rows+=1
def correlateMethylationData(filename,betaLow=0.4,betaHigh=0.6,counts=-1):
### Takes a filtered pre-processed beta-value file as input
firstLine = True
rows=0; filtered=0
for line in open(filename,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
if firstLine:
header = t
if len(t)>5 and 'Illumina_name' in header:
delimiter = -50
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
else:
delimiter = len(header)
headers = t[1:delimiter]
firstLine = False
export_object.write(string.join([t[0]]+headers,'\t')+'\n')
else:
probeID = t[0]
#try: beta_values = map(float,t[1:50])
beta_values = map(lambda x: conFloat(x,t[1:delimiter]),t[1:delimiter])
if '' in beta_values:
print beta_values;sys.exit()
high = sum(betaHighCount(x,betaHigh) for x in beta_values)
low = sum(betaLowCount(x,betaLow) for x in beta_values)
def importMethylationData(filename,betaLow=0.4,betaHigh=0.6,counts=-1, filter=None):
annot_file = filepath('AltDatabase/ucsc/Hs/Illumina_methylation_genes.txt')
export_object = open(filename[:-4]+'-filtered.txt','w')
print filename[:-4]+'-filtered.txt', counts
firstLine = True
rows=0; filtered=0
for line in open(filename,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
#export_object.write(string.join(t,'\t')+'\n')
#"""
if firstLine:
header = t
if len(t)>5 and 'Illumina_name' in header:
delimiter = -50
annot_export_object = open(annot_file,'w')
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
else:
delimiter = len(header)
headers = t[1:delimiter]
firstLine = False
export_object.write(string.join([t[0]]+headers,'\t')+'\n')
else:
probeID = t[0]
#try: beta_values = map(float,t[1:50])
beta_values = map(lambda x: conFloat(x,t[1:delimiter]),t[1:delimiter])
if '' in beta_values:
print beta_values;sys.exit()
high = sum(betaHighCount(x,betaHigh) for x in beta_values)
low = sum(betaLowCount(x,betaLow) for x in beta_values)
#if rows<50: print high, low, max(beta_values), min(beta_values)
#else:sys.exit()
#export_object.write(string.join(t[:delimiter])+'\n')
if high>=counts and low>=counts:
#if (high-low) > 0.2:
#if rows<50: print 1
if filter!=None:
if probeID in filter: proceed=True; probeID = str(filter[probeID])+':'+probeID
else: proceed = False
else: proceed = True
if proceed:
filtered+=1
export_object.write(string.join([probeID]+map(str,beta_values),'\t')+'\n')
if 'Illumina_name' in header:
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
rows+=1
#"""
export_object.close()
if delimiter == '-50':
annot_export_object.close()
print filtered, rows
def conFloat(x,betaValues):
try: x = float(x)
except Exception: x=None
if x== None or x == 0:
floats=[]
for i in betaValues:
if i=='': pass
elif float(i)==0: pass
else: floats.append(float(i))
try: return min(floats)
except Exception: print betaValues;sys.exit()
else:
return x
def betaHighCount(x,betaHigh):
if x>betaHigh:
return 1
else: return 0
def betaLowCount(x,betaLow):
if x<betaLow:
return 1
else: return 0
def getIDsFromFile(filename):
filterIDs = {}
fn = filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
filterIDs[string.lower(t[0])]=[]
return filterIDs
def getRegionType(filename,featureType=None,chromosome=None,filterIDs=None):
if filterIDs !=None:
filterIDs = getIDsFromFile(filterIDs)
firstLine = True
fn = filepath(filename)
count=0; filter_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,',')
if firstLine:
if len(t[2]) >0:
header = t
firstLine=False
chr_ind = header.index('CHR')
pos_ind = header.index('Coordinate_36')
tss_ind = header.index('UCSC_RefGene_Group')
gene_name = header.index('UCSC_RefGene_Name')
else:
probeID = t[0]
count+=1
try: gene_names = string.split(t[gene_name],';')
except Exception: gene_names = []
try:
if chromosome != None:
if t[chr_ind] == chromosome:
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if 'promoter' in string.lower(featureType):
if 'TSS' in t[tss_ind]:
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if 'mir' in string.lower(featureType) or 'micro' in string.lower(featureType):
if 'mir' in string.lower(t[gene_name]) or 'let' in string.lower(t[gene_name]):
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
except Exception:
pass
print len(filter_db), 'probes remaining'
return filter_db
if __name__ == '__main__':
import getopt
featureType = 'promoter'
featureType = 'all'
Species = 'Hs'
filter_db=None
chromosome=None
numRegulated = -1
analysis = 'filter'
filterIDs = None
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a methylation beta-value file as input in the command-line"
print "Example: python methylation.py --i /Users/me/sample1.txt --g /Users/me/human.gtf"
sys.exit()
else:
analysisType = []
useMultiProcessing=False
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','a=','t=','r=','c=','f='])
for opt, arg in options:
if opt == '--i': input_file=arg
elif opt == '--a': analysis=arg
elif opt == '--t': featureType=arg
elif opt == '--r': numRegulated=int(arg)
elif opt == '--c': chromosome=arg
elif opt == '--f': filterIDs=arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
if analysis == 'filter':
filename = 'AltDatabase/ucsc/Hs/wgEncodeHaibMethyl450CpgIslandDetails.txt'
#input_file = '/Volumes/SEQ-DATA/PCBC/Methylation/Methylome70allBValues_aronowAnnotations.txt'
if featureType!= 'all' or chromosome != None or filterIDs!=None:
filter_db = getRegionType(filename,featureType=featureType,chromosome=chromosome,filterIDs=filterIDs)
importMethylationData(input_file,filter = filter_db,counts=numRegulated); sys.exit()
#importAnnotations(methylation_file);sys.exit()
if analysis == 'correlate':
### Performs all pairwise correlations between probes corresponding to a gene
correlateMethylationData(input_file)
|
kdaily/altanalyze
|
methylation.py
|
Python
|
apache-2.0
| 11,047 | 0.023264 |
#!/usr/bin/env python2.7
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Reference
#https://docs.python.org/2/library/unittest.html
#http://eli.thegreenplace.net/2011/08/02/python-unit-testing-parametrized-test-cases
#public domain license reference: http://eli.thegreenplace.net/pages/code
#Run
#python tika/tests/tests_params.py
import csv
import unittest
import tika.parser
class CreateTest(unittest.TestCase):
"test for file types"
def __init__(self, methodName='runTest', param1=None, param2=None):
super(CreateTest, self).__init__(methodName)
self.param1 = param1
@staticmethod
def parameterize(test_case, param1=None, param2=None):
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(test_case)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(test_case(name, param1=param1, param2=param2))
return suite
class RemoteTest(CreateTest):
def setUp(self):
self.param1 = tika.parser.from_file(self.param1)
def test_true(self):
self.assertTrue(self.param1)
def test_meta(self):
self.assertTrue(self.param1['metadata'])
def test_content(self):
self.assertTrue(self.param1['content'])
def test_url():
with open('tika/tests/arguments/test_remote_content.csv', 'r') as csvfile:
urlread = csv.reader(csvfile)
for url in urlread:
yield url[1]
if __name__ == '__main__':
suite = unittest.TestSuite()
t_urls = list(test_url())
t_urls.pop(0) #remove header
for x in t_urls:
try:
suite.addTest(CreateTest.parameterize(RemoteTest,param1=x))
except IOError as e:
print(e.strerror)
unittest.TextTestRunner(verbosity=2).run(suite)
|
dongnizh/tika-python
|
tika/tests/tests_params.py
|
Python
|
apache-2.0
| 2,555 | 0.007828 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import numpy as np
from op_test import OpTest
import os
paddle.enable_static()
paddle.seed(100)
class TestExponentialOp1(OpTest):
def setUp(self):
self.op_type = "exponential"
self.config()
self.attrs = {"lambda": self.lam}
self.inputs = {'X': np.empty([1024, 1024], dtype=self.dtype)}
self.outputs = {'Out': np.ones([1024, 1024], dtype=self.dtype)}
def config(self):
self.lam = 0.5
self.dtype = "float64"
def test_check_output(self):
self.check_output_customized(self.verify_output)
def verify_output(self, outs):
hist1, _ = np.histogram(outs[0], range=(0, 5))
hist1 = hist1.astype("float32")
hist1 = hist1 / float(outs[0].size)
data_np = np.random.exponential(1. / self.lam, [1024, 1024])
hist2, _ = np.histogram(data_np, range=(0, 5))
hist2 = hist2.astype("float32")
hist2 = hist2 / float(data_np.size)
self.assertTrue(
np.allclose(
hist1, hist2, rtol=0.02),
"actual: {}, expected: {}".format(hist1, hist2))
def test_check_grad_normal(self):
self.check_grad(
['X'],
'Out',
user_defined_grads=[np.zeros(
[1024, 1024], dtype=self.dtype)],
user_defined_grad_outputs=[
np.random.rand(1024, 1024).astype(self.dtype)
])
class TestExponentialOp2(TestExponentialOp1):
def config(self):
self.lam = 0.25
self.dtype = "float32"
class TestExponentialAPI(unittest.TestCase):
def test_static(self):
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x_np = np.full([10, 10], -1.)
x = paddle.static.data(name="X", shape=[10, 10], dtype='float64')
x.exponential_(1.0)
exe = paddle.static.Executor()
out = exe.run(paddle.static.default_main_program(),
feed={"X": x_np},
fetch_list=[x])
self.assertTrue(np.min(out) >= 0)
def test_dygraph(self):
paddle.disable_static()
x = paddle.full([10, 10], -1., dtype='float32')
x.exponential_(0.5)
self.assertTrue(np.min(x.numpy()) >= 0)
paddle.enable_static()
def test_fixed_random_number(self):
# Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t'
if not paddle.is_compiled_with_cuda():
return
# Different GPU generatte different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name():
return
if os.getenv("FLAGS_use_curand", None) in ('0', 'False', None):
return
print("Test Fixed Random number on V100 GPU------>")
paddle.disable_static()
paddle.set_device('gpu')
paddle.seed(2021)
x = paddle.empty([64, 3, 1024, 1024], dtype="float32")
x.exponential_(1.0)
x_np = x.numpy()
expect = [
0.80073667, 0.2249291, 0.07734892, 1.25392, 0.14013891, 0.45736602,
1.9735607, 0.30490234, 0.57100505, 0.8115938
]
self.assertTrue(np.allclose(x_np[0, 0, 0, 0:10], expect))
expect = [
1.4296371e+00, 9.5411777e-01, 5.2575850e-01, 2.4805880e-01,
1.2322118e-04, 8.4604341e-01, 2.1111444e-01, 1.4143821e+00,
2.8194717e-01, 1.1360573e+00
]
self.assertTrue(np.allclose(x_np[16, 1, 300, 200:210], expect))
expect = [
1.3448033, 0.35146526, 1.7380928, 0.32012638, 0.10396296,
0.51344526, 0.15308502, 0.18712929, 0.03888268, 0.20771872
]
self.assertTrue(np.allclose(x_np[32, 1, 600, 500:510], expect))
expect = [
0.5107464, 0.20970327, 2.1986802, 1.580056, 0.31036147, 0.43966478,
0.9056133, 0.30119267, 1.4797124, 1.4319834
]
self.assertTrue(np.allclose(x_np[48, 2, 900, 800:810], expect))
expect = [
3.4640615, 1.1019983, 0.41195083, 0.22681557, 0.291846, 0.53617656,
1.5791925, 2.4645927, 0.04094889, 0.9057725
]
self.assertTrue(np.allclose(x_np[63, 2, 1023, 1000:1010], expect))
x = paddle.empty([10, 10], dtype="float32")
x.exponential_(3.0)
x_np = x.numpy()
expect = [
0.02831675, 0.1691551, 0.6798956, 0.69347525, 0.0243443, 0.22180498,
0.30574575, 0.9839696, 0.2834912, 0.59420055
]
self.assertTrue(np.allclose(x_np[5, 0:10], expect))
x = paddle.empty([16, 2, 1024, 768], dtype="float64")
x.exponential_(0.25)
x_np = x.numpy()
expect = [
10.0541229, 12.67860643, 1.09850734, 7.35289643, 2.65471225,
3.86217432, 2.97902086, 2.92744479, 2.67927152, 0.19667352
]
self.assertTrue(np.allclose(x_np[0, 0, 0, 100:110], expect))
expect = [
0.68328125, 3.1454553, 0.92158376, 1.95842188, 1.05296941,
12.93242051, 5.20255978, 3.3588624, 1.57377174, 5.73194183
]
self.assertTrue(np.allclose(x_np[4, 0, 300, 190:200], expect))
expect = [
1.37973974, 3.45036798, 7.94625406, 1.62610973, 0.31032122,
4.13596493, 1.98494535, 1.13207041, 8.30592769, 2.81460147
]
self.assertTrue(np.allclose(x_np[8, 1, 600, 300:310], expect))
expect = [
2.27710811, 12.25003028, 2.96409124, 4.72405788, 0.67917249,
4.35856718, 0.46870976, 2.31120149, 9.61595826, 4.64446271
]
self.assertTrue(np.allclose(x_np[12, 1, 900, 500:510], expect))
expect = [
0.95883744, 1.57316361, 15.22524512, 20.49559882, 13.70008548,
3.29430143, 3.90390424, 0.9146657, 0.80972249, 0.33376219
]
self.assertTrue(np.allclose(x_np[15, 1, 1023, 750:760], expect))
x = paddle.empty([512, 768], dtype="float64")
x.exponential_(0.3)
x_np = x.numpy()
expect = [
8.79266704, 4.79596009, 2.75480243, 6.04670011, 0.35379556,
0.76864868, 3.17428251, 0.26556859, 12.22485885, 10.51690383
]
self.assertTrue(np.allclose(x_np[0, 200:210], expect))
expect = [
5.6341126, 0.52243418, 5.36410796, 6.83672002, 11.9243311,
5.85985566, 5.75169548, 0.13877972, 6.1348385, 3.82436519
]
self.assertTrue(np.allclose(x_np[300, 400:410], expect))
expect = [
4.94883581, 0.56345306, 0.85841585, 1.92287801, 6.10036656,
1.19524847, 3.64735434, 5.19618716, 2.57467974, 3.49152791
]
self.assertTrue(np.allclose(x_np[500, 700:710], expect))
x = paddle.empty([10, 10], dtype="float64")
x.exponential_(4.0)
x_np = x.numpy()
expect = [
0.15713826, 0.56395964, 0.0680941, 0.00316643, 0.27046853,
0.19852724, 0.12776634, 0.09642974, 0.51977551, 1.33739699
]
self.assertTrue(np.allclose(x_np[5, 0:10], expect))
paddle.enable_static()
if __name__ == "__main__":
unittest.main()
|
PaddlePaddle/Paddle
|
python/paddle/fluid/tests/unittests/test_exponential_op.py
|
Python
|
apache-2.0
| 7,870 | 0.000381 |
from OpenGL import GL
import numpy as np
import math
def drawLine(start, end, color, width=1):
GL.glLineWidth(width)
GL.glColor3f(*color)
GL.glBegin(GL.GL_LINES)
GL.glVertex3f(*start)
GL.glVertex3f(*end)
GL.glEnd()
def drawCircle(center, radius, color, rotation=np.array([0,0,0]), axis=np.array([1,1,1]), width=1, sections = 16):
GL.glLineWidth(width)
GL.glColor3f(*color)
GL.glPushMatrix()
GL.glTranslatef(*center)
if not type(rotation) is int:
GL.glRotatef(rotation[0]*90,axis[0],0,0)
GL.glRotatef(rotation[1]*90,0,axis[1],0)
GL.glRotatef(rotation[2]*90,0,0,axis[2])
else:
GL.glRotatef(rotation*90,axis[0],axis[1],axis[2])
GL.glBegin(GL.GL_POLYGON)
steps = [i * ((math.pi*2)/sections) for i in range(sections)]
for i in steps:
GL.glVertex3f(math.cos(i)*radius, math.sin(i)*radius, 0,0)
GL.glEnd()
GL.glPopMatrix()
def makeDrawFunction(drawFunction, *args):
def closure():
drawFunction(*args)
return closure
|
g-rauhoeft/scrap-cap
|
motioncapture/gui/GL/Shapes.py
|
Python
|
mit
| 1,040 | 0.025 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
import argparse
from google.protobuf import text_format
#https://github.com/BVLC/caffe/issues/861#issuecomment-70124809
import matplotlib
matplotlib.use('Agg')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(BASE_DIR))
from global_variables import *
from utilities_caffe import *
parser = argparse.ArgumentParser(description="Extract image embedding features for IMAGE input.")
parser.add_argument('--image', help='Path to input image (cropped)', required=True)
parser.add_argument('--iter_num', '-n', help='Use caffemodel trained after iter_num iterations', type=int, default=20000)
parser.add_argument('--caffemodel', '-c', help='Path to caffemodel (will ignore -n option if provided)', required=False)
parser.add_argument('--prototxt', '-p', help='Path to prototxt (if not at the default place)', required=False)
parser.add_argument('--gpu_index', help='GPU index (default=0).', type=int, default=0)
args = parser.parse_args()
image_embedding_caffemodel = os.path.join(g_image_embedding_testing_folder, 'snapshots%s_iter_%d.caffemodel'%(g_shapenet_synset_set_handle, args.iter_num))
image_embedding_prototxt = g_image_embedding_testing_prototxt
if args.caffemodel:
image_embedding_caffemodel = args.caffemodel
if args.prototxt:
image_embedding_prototxt = args.prototxt
print 'Image embedding for %s is:'%(args.image)
image_embedding_array = extract_cnn_features(img_filelist=args.image,
img_root='/',
prototxt=image_embedding_prototxt,
caffemodel=image_embedding_caffemodel,
feat_name='image_embedding',
caffe_path=g_caffe_install_path,
mean_file=g_mean_file)[0]
print image_embedding_array.tolist()
|
ShapeNet/JointEmbedding
|
src/image_embedding_testing/extract_image_embedding.py
|
Python
|
bsd-3-clause
| 1,866 | 0.010718 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2018-10-26 01:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('pttrack', '0006_referral_additional_fields_20180826'),
('followup', '0002_simplehistory_add_change_reason'),
]
operations = [
migrations.CreateModel(
name='FollowupRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('written_datetime', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
('completion_date', models.DateTimeField(blank=True, null=True)),
('due_date', models.DateField(help_text=b'MM/DD/YYYY or YYYY-MM-DD')),
('contact_instructions', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Provider')),
('author_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.ProviderType')),
('completion_author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='referral_followuprequest_completed', to='pttrack.Provider')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Patient')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PatientContact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('written_datetime', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
('has_appointment', models.CharField(blank=True, choices=[(b'Y', b'Yes'), (b'N', b'No')], help_text=b'Did the patient make an appointment?', max_length=1, verbose_name=b'Appointment scheduled?')),
('pt_showed', models.CharField(blank=True, choices=[(b'Y', b'Yes'), (b'N', b'No')], help_text=b'Did the patient show up to the appointment?', max_length=1, null=True, verbose_name=b'Appointment attended?')),
('appointment_location', models.ManyToManyField(blank=True, help_text=b'Where did the patient make an appointment?', to='pttrack.ReferralLocation')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Provider')),
('author_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.ProviderType')),
('contact_method', models.ForeignKey(help_text=b'What was the method of contact?', on_delete=django.db.models.deletion.CASCADE, to='pttrack.ContactMethod')),
('contact_status', models.ForeignKey(help_text=b'Did you make contact with the patient about this referral?', on_delete=django.db.models.deletion.CASCADE, to='followup.ContactResult')),
('followup_request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='referral.FollowupRequest')),
('no_apt_reason', models.ForeignKey(blank=True, help_text=b"If the patient didn't make an appointment, why not?", null=True, on_delete=django.db.models.deletion.CASCADE, to='followup.NoAptReason', verbose_name=b'No appointment reason')),
('no_show_reason', models.ForeignKey(blank=True, help_text=b"If the patient didn't go to the appointment, why not?", null=True, on_delete=django.db.models.deletion.CASCADE, to='followup.NoShowReason')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Patient')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Referral',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('written_datetime', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
('comments', models.TextField(blank=True)),
('status', models.CharField(choices=[(b'S', b'Successful'), (b'P', b'Pending'), (b'U', b'Unsuccessful')], default=b'P', max_length=50)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Provider')),
('author_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.ProviderType')),
('kind', models.ForeignKey(help_text=b'The kind of care the patient should recieve at the referral location.', on_delete=django.db.models.deletion.CASCADE, to='pttrack.ReferralType')),
('location', models.ManyToManyField(to='pttrack.ReferralLocation')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pttrack.Patient')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='patientcontact',
name='referral',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='referral.Referral'),
),
migrations.AddField(
model_name='followuprequest',
name='referral',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='referral.Referral'),
),
]
|
SaturdayNeighborhoodHealthClinic/osler
|
referral/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 5,790 | 0.004836 |
import os
from .base import * # NOQA
import dj_database_url
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
DATABASES = {'default': dj_database_url.config()}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'DEBUG',
'propagate': True,
},
'django.request': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.db.backends': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
# Catch All Logger -- Captures any other logging
'': {
'handlers': ['console', 'mail_admins'],
'level': 'DEBUG',
'propagate': True,
}
}
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-host
EMAIL_HOST = os.environ.get('MAILGUN_SMTP_SERVER', None)
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = os.environ.get('MAILGUN_SMTP_PASSWORD', None)
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-host-user
EMAIL_HOST_USER = os.environ.get('MAILGUN_SMTP_LOGIN', None)
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-port
EMAIL_PORT = os.environ.get('MAILGUN_SMTP_PORT', None )
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[Scorinator] '
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/1.3/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
|
kencochrane/scorinator
|
scorinator/scorinator/settings/prod.py
|
Python
|
apache-2.0
| 2,973 | 0.001345 |
"""
This code was Ported from CPython's sha512module.c
"""
import _struct as struct
SHA_BLOCKSIZE = 128
SHA_DIGESTSIZE = 64
def new_shaobject():
return {
'digest': [0]*8,
'count_lo': 0,
'count_hi': 0,
'data': [0]* SHA_BLOCKSIZE,
'local': 0,
'digestsize': 0
}
ROR64 = lambda x, y: (((x & 0xffffffffffffffff) >> (y & 63)) | (x << (64 - (y & 63)))) & 0xffffffffffffffff
Ch = lambda x, y, z: (z ^ (x & (y ^ z)))
Maj = lambda x, y, z: (((x | y) & z) | (x & y))
S = lambda x, n: ROR64(x, n)
R = lambda x, n: (x & 0xffffffffffffffff) >> n
Sigma0 = lambda x: (S(x, 28) ^ S(x, 34) ^ S(x, 39))
Sigma1 = lambda x: (S(x, 14) ^ S(x, 18) ^ S(x, 41))
Gamma0 = lambda x: (S(x, 1) ^ S(x, 8) ^ R(x, 7))
Gamma1 = lambda x: (S(x, 19) ^ S(x, 61) ^ R(x, 6))
def sha_transform(sha_info):
W = []
d = sha_info['data']
for i in xrange(0,16):
W.append( (d[8*i]<<56) + (d[8*i+1]<<48) + (d[8*i+2]<<40) + (d[8*i+3]<<32) + (d[8*i+4]<<24) + (d[8*i+5]<<16) + (d[8*i+6]<<8) + d[8*i+7])
for i in xrange(16,80):
W.append( (Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]) & 0xffffffffffffffff )
ss = sha_info['digest'][:]
def RND(a,b,c,d,e,f,g,h,i,ki):
t0 = (h + Sigma1(e) + Ch(e, f, g) + ki + W[i]) & 0xffffffffffffffff
t1 = (Sigma0(a) + Maj(a, b, c)) & 0xffffffffffffffff
d = (d + t0) & 0xffffffffffffffff
h = (t0 + t1) & 0xffffffffffffffff
return d & 0xffffffffffffffff, h & 0xffffffffffffffff
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],0,0x428a2f98d728ae22)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],1,0x7137449123ef65cd)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],2,0xb5c0fbcfec4d3b2f)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],3,0xe9b5dba58189dbbc)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],4,0x3956c25bf348b538)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],5,0x59f111f1b605d019)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],6,0x923f82a4af194f9b)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],7,0xab1c5ed5da6d8118)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],8,0xd807aa98a3030242)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],9,0x12835b0145706fbe)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],10,0x243185be4ee4b28c)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],11,0x550c7dc3d5ffb4e2)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],12,0x72be5d74f27b896f)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],13,0x80deb1fe3b1696b1)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],14,0x9bdc06a725c71235)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],15,0xc19bf174cf692694)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],16,0xe49b69c19ef14ad2)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],17,0xefbe4786384f25e3)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],18,0x0fc19dc68b8cd5b5)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],19,0x240ca1cc77ac9c65)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],20,0x2de92c6f592b0275)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],21,0x4a7484aa6ea6e483)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],22,0x5cb0a9dcbd41fbd4)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],23,0x76f988da831153b5)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],24,0x983e5152ee66dfab)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],25,0xa831c66d2db43210)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],26,0xb00327c898fb213f)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],27,0xbf597fc7beef0ee4)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],28,0xc6e00bf33da88fc2)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],29,0xd5a79147930aa725)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],30,0x06ca6351e003826f)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],31,0x142929670a0e6e70)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],32,0x27b70a8546d22ffc)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],33,0x2e1b21385c26c926)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],34,0x4d2c6dfc5ac42aed)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],35,0x53380d139d95b3df)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],36,0x650a73548baf63de)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],37,0x766a0abb3c77b2a8)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],38,0x81c2c92e47edaee6)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],39,0x92722c851482353b)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],40,0xa2bfe8a14cf10364)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],41,0xa81a664bbc423001)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],42,0xc24b8b70d0f89791)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],43,0xc76c51a30654be30)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],44,0xd192e819d6ef5218)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],45,0xd69906245565a910)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],46,0xf40e35855771202a)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],47,0x106aa07032bbd1b8)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],48,0x19a4c116b8d2d0c8)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],49,0x1e376c085141ab53)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],50,0x2748774cdf8eeb99)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],51,0x34b0bcb5e19b48a8)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],52,0x391c0cb3c5c95a63)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],53,0x4ed8aa4ae3418acb)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],54,0x5b9cca4f7763e373)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],55,0x682e6ff3d6b2b8a3)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],56,0x748f82ee5defb2fc)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],57,0x78a5636f43172f60)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],58,0x84c87814a1f0ab72)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],59,0x8cc702081a6439ec)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],60,0x90befffa23631e28)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],61,0xa4506cebde82bde9)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],62,0xbef9a3f7b2c67915)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],63,0xc67178f2e372532b)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],64,0xca273eceea26619c)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],65,0xd186b8c721c0c207)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],66,0xeada7dd6cde0eb1e)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],67,0xf57d4f7fee6ed178)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],68,0x06f067aa72176fba)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],69,0x0a637dc5a2c898a6)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],70,0x113f9804bef90dae)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],71,0x1b710b35131c471b)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],72,0x28db77f523047d84)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],73,0x32caab7b40c72493)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],74,0x3c9ebe0a15c9bebc)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],75,0x431d67c49c100d4c)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],76,0x4cc5d4becb3e42b6)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],77,0x597f299cfc657e2a)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],78,0x5fcb6fab3ad6faec)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],79,0x6c44198c4a475817)
dig = []
for i, x in enumerate(sha_info['digest']):
dig.append( (x + ss[i]) & 0xffffffffffffffff )
sha_info['digest'] = dig
def sha_init():
sha_info = new_shaobject()
sha_info['digest'] = [ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179]
sha_info['count_lo'] = 0
sha_info['count_hi'] = 0
sha_info['local'] = 0
sha_info['digestsize'] = 64
return sha_info
def sha384_init():
sha_info = new_shaobject()
sha_info['digest'] = [ 0xcbbb9d5dc1059ed8, 0x629a292a367cd507, 0x9159015a3070dd17, 0x152fecd8f70e5939, 0x67332667ffc00b31, 0x8eb44a8768581511, 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4]
sha_info['count_lo'] = 0
sha_info['count_hi'] = 0
sha_info['local'] = 0
sha_info['digestsize'] = 48
return sha_info
def getbuf(s):
if isinstance(s, str):
return s
elif isinstance(s, unicode):
return str(s)
else:
return buffer(s)
def sha_update(sha_info, buffer):
count = len(buffer)
buffer_idx = 0
clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff
if clo < sha_info['count_lo']:
sha_info['count_hi'] += 1
sha_info['count_lo'] = clo
sha_info['count_hi'] += (count >> 29)
if sha_info['local']:
i = SHA_BLOCKSIZE - sha_info['local']
if i > count:
i = count
# copy buffer
for x in enumerate(buffer[buffer_idx:buffer_idx+i]):
sha_info['data'][sha_info['local']+x[0]] = struct.unpack('B', x[1])[0]
count -= i
buffer_idx += i
sha_info['local'] += i
if sha_info['local'] == SHA_BLOCKSIZE:
sha_transform(sha_info)
sha_info['local'] = 0
else:
return
while count >= SHA_BLOCKSIZE:
# copy buffer
sha_info['data'] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE]]
count -= SHA_BLOCKSIZE
buffer_idx += SHA_BLOCKSIZE
sha_transform(sha_info)
# copy buffer
pos = sha_info['local']
sha_info['data'][pos:pos+count] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + count]]
sha_info['local'] = count
def sha_final(sha_info):
lo_bit_count = sha_info['count_lo']
hi_bit_count = sha_info['count_hi']
count = (lo_bit_count >> 3) & 0x7f
sha_info['data'][count] = 0x80;
count += 1
if count > SHA_BLOCKSIZE - 16:
# zero the bytes in data after the count
sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
sha_transform(sha_info)
# zero bytes in data
sha_info['data'] = [0] * SHA_BLOCKSIZE
else:
sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
sha_info['data'][112] = 0;
sha_info['data'][113] = 0;
sha_info['data'][114] = 0;
sha_info['data'][115] = 0;
sha_info['data'][116] = 0;
sha_info['data'][117] = 0;
sha_info['data'][118] = 0;
sha_info['data'][119] = 0;
sha_info['data'][120] = (hi_bit_count >> 24) & 0xff
sha_info['data'][121] = (hi_bit_count >> 16) & 0xff
sha_info['data'][122] = (hi_bit_count >> 8) & 0xff
sha_info['data'][123] = (hi_bit_count >> 0) & 0xff
sha_info['data'][124] = (lo_bit_count >> 24) & 0xff
sha_info['data'][125] = (lo_bit_count >> 16) & 0xff
sha_info['data'][126] = (lo_bit_count >> 8) & 0xff
sha_info['data'][127] = (lo_bit_count >> 0) & 0xff
sha_transform(sha_info)
dig = []
for i in sha_info['digest']:
dig.extend([ ((i>>56) & 0xff), ((i>>48) & 0xff), ((i>>40) & 0xff), ((i>>32) & 0xff), ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ])
return ''.join([chr(i) for i in dig])
class sha512(object):
digest_size = digestsize = SHA_DIGESTSIZE
block_size = SHA_BLOCKSIZE
def __init__(self, s=None):
self._sha = sha_init()
if s:
sha_update(self._sha, getbuf(s))
def update(self, s):
sha_update(self._sha, getbuf(s))
def digest(self):
return sha_final(self._sha.copy())[:self._sha['digestsize']]
def hexdigest(self):
return ''.join([('0%x' % ord(i))[-2:] for i in self.digest()])
def copy(self):
new = sha512.__new__(sha512)
new._sha = self._sha.copy()
return new
class sha384(sha512):
digest_size = digestsize = 48
def __init__(self, s=None):
self._sha = sha384_init()
if s:
sha_update(self._sha, getbuf(s))
def copy(self):
new = sha384.__new__(sha384)
new._sha = self._sha.copy()
return new
def test():
import _sha512
a_str = "just a test string"
assert _sha512.sha512().hexdigest() == sha512().hexdigest()
assert _sha512.sha512(a_str).hexdigest() == sha512(a_str).hexdigest()
assert _sha512.sha512(a_str*7).hexdigest() == sha512(a_str*7).hexdigest()
s = sha512(a_str)
s.update(a_str)
assert _sha512.sha512(a_str+a_str).hexdigest() == s.hexdigest()
if __name__ == "__main__":
test()
|
aisk/grumpy
|
third_party/pypy/_sha512.py
|
Python
|
apache-2.0
| 14,181 | 0.062125 |
<<<<<<< HEAD
<<<<<<< HEAD
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import OrderedDict as _default_dict, ChainMap as _ChainMap
import functools
import io
import itertools
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %r' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %r, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('$')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rest, ":".join(path))
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except OSError:
continue
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, int, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, float, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, self._convert_to_boolean, option,
raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
self._join_multiline_values()
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
def get(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.get(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getint(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getint(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getfloat(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getfloat(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getboolean(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getboolean(self._name, option, raw=raw, vars=vars,
fallback=fallback)
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
=======
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import OrderedDict as _default_dict, ChainMap as _ChainMap
import functools
import io
import itertools
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %r' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %r, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('$')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rest, ":".join(path))
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except OSError:
continue
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, int, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, float, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, self._convert_to_boolean, option,
raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
self._join_multiline_values()
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
def get(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.get(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getint(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getint(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getfloat(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getfloat(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getboolean(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getboolean(self._name, option, raw=raw, vars=vars,
fallback=fallback)
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import OrderedDict as _default_dict, ChainMap as _ChainMap
import functools
import io
import itertools
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %r' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %r, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('$')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rest, ":".join(path))
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except OSError:
continue
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, int, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, float, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, self._convert_to_boolean, option,
raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
self._join_multiline_values()
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
def get(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.get(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getint(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getint(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getfloat(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getfloat(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getboolean(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getboolean(self._name, option, raw=raw, vars=vars,
fallback=fallback)
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
ArcherSys/ArcherSys
|
Lib/configparser.py
|
Python
|
mit
| 148,451 | 0.000626 |
"""
pystrix.ami.dahdi
=================
Provides classes meant to be fed to a `Manager` instance's `send_action()` function.
Specifically, this module provides implementations for features specific to the DAHDI technology.
Legal
-----
This file is part of pystrix.
pystrix is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU General Public License and
GNU Lesser General Public License along with this program. If not, see
<http://www.gnu.org/licenses/>.
(C) Ivrnet, inc., 2011
Authors:
- Neil Tallim <n.tallim@ivrnet.com>
The requests implemented by this module follow the definitions provided by
https://wiki.asterisk.org/
"""
from ami import (_Request, ManagerError)
import dahdi_events
import generic_transforms
class DAHDIDNDoff(_Request):
"""
Sets a DAHDI channel's DND status to off.
"""
def __init__(self, dahdi_channel):
"""
`dahdi_channel` is the channel to modify.
"""
_Request.__init__(self, 'DAHDIDNDoff')
self['DAHDIChannel'] = dahdi_channel
class DAHDIDNDon(_Request):
"""
Sets a DAHDI channel's DND status to on.
"""
def __init__(self, dahdi_channel):
"""
`dahdi_channel` is the channel to modify.
"""
_Request.__init__(self, 'DAHDIDNDon')
self['DAHDIChannel'] = dahdi_channel
class DAHDIDialOffhook(_Request):
"""
Dials a number on an off-hook DAHDI channel.
"""
def __init__(self, dahdi_channel, number):
"""
`dahdi_channel` is the channel to use and `number` is the number to dial.
"""
_Request.__init__(self, 'DAHDIDialOffhook')
self['DAHDIChannel'] = dahdi_channel
self['Number'] = number
class DAHDIHangup(_Request):
"""
Hangs up a DAHDI channel.
"""
def __init__(self, dahdi_channel):
"""
`dahdi_channel` is the channel to hang up.
"""
_Request.__init__(self, 'DAHDIHangup')
self['DAHDIChannel'] = dahdi_channel
class DAHDIRestart(_Request):
"""
Fully restarts all DAHDI channels.
"""
def __init__(self):
_Request.__init__(self, 'DAHDIRestart')
class DAHDIShowChannels(_Request):
"""
Provides the current status of all (or one) DAHDI channels through a series of
'DAHDIShowChannels' events, ending with a 'DAHDIShowChannelsComplete' event.
"""
_aggregates = (dahdi_events.DAHDIShowChannels_Aggregate,)
_synchronous_events_list = (dahdi_events.DAHDIShowChannels,)
_synchronous_events_finalising = (dahdi_events.DAHDIShowChannelsComplete,)
def __init__(self, dahdi_channel=None):
_Request.__init__(self, 'DAHDIShowChannels')
if not dahdi_channel is None:
self['DAHDIChannel'] = dahdi_channel
|
nhtdata/pystrix
|
pystrix/ami/dahdi.py
|
Python
|
gpl-3.0
| 3,238 | 0.004941 |
import RPi.GPIO as GPIO
import time
from array import *
#configuracoin de pines del stepper bipolar
out1 = 11
out2 = 13
out3 = 15
out4 = 16
#delay value
timeValue = 0.005
#matriz de pines del stepper
outs = [out1,out2,out3,out4]
#secuencia para mover el stepper
matriz = [
[1,0,0,1],
[1,1,0,0],
[0,1,1,0],
[0,0,1,1],
]
#seteo de pines
GPIO.setmode(GPIO.BOARD)
for o in outs:
GPIO.setup(o,GPIO.OUT)
def wakeupMotor():
for o in outs:
GPIO.output(o,GPIO.HIGH)
def sleepMotor():
for o in outs:
GPIO.output(o,GPIO.LOW)
def setMatrizPins(pin,valor):
if (valor == 0):
GPIO.output(outs[pin],GPIO.LOW)
if (valor == 1):
GPIO.output(outs[pin],GPIO.HIGH)
def runForward():
i = 0
while (i < 4):
#print(matriz[i][0],matriz[i][1],matriz[i][2],matriz[i][3])
setMatrizPins(0,matriz[i][0])
setMatrizPins(1,matriz[i][1])
setMatrizPins(2,matriz[i][2])
setMatrizPins(3,matriz[i][3])
i = i + 1
time.sleep(timeValue)
def runBackwards():
i = 3
while (i >=0):
#print(matriz[i][0],matriz[i][1],matriz[i][2],matriz[i][3])
setMatrizPins(0,matriz[i][0])
setMatrizPins(1,matriz[i][1])
setMatrizPins(2,matriz[i][2])
setMatrizPins(3,matriz[i][3])
i = i - 1
time.sleep(timeValue)
#void main()
print('starting stepper')
for x in range(500):
runBackwards()
print(x)
sleepMotor()
GPIO.cleanup()
|
Locottus/Python
|
machines/python/stepperLeft.py
|
Python
|
mit
| 1,515 | 0.031023 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import dateutil.parser
from models import F1ImportError, \
LectPool, \
codigoOrigen_to_O, \
O_to_codigoOrigen
IMP_ERRORS = {}
def register(cls_error):
name = cls_error.__name__
if name in IMP_ERRORS.keys():
return True
else:
IMP_ERRORS[name] = cls_error
class ImpError(object):
priority = None
@classmethod
def check(cls, O, e):
pass
def __init__(self, O, e):
self.O = O
self.e = e
def fix(self):
pass
def done(self):
pass
def __gt__(self, other):
return self.priority > other.piority
def __lt__(self, other):
return self.priority < other.piority
def __ge__(self, other):
return self.priority >= other.piority
def __le__(self, other):
return self.priority <= other.piority
class SmallDiffError(ImpError):
description = 'ERROR: Difference between abs(XML-DB) > 2.0'
priority = 1
exit = True
invoicing = False
max_diff = 2.0
@classmethod
def check(cls, O, e):
if e.error.tipus not in ['A', 'R']:
return False
diff = e.error.valor_xml-e.error.valor_db
if abs(diff) < cls.max_diff:
return True
return False
def fix(self):
e = self.e
O = self.O
filename = os.path.join('/tmp', e.name)
e.F1.dump(filename)
e.update_xml_attribute('Lectura')
e.reload(update=True)
return e
register(SmallDiffError)
class UnionFenosa0measError(ImpError):
description = 'Union Fenosa NULL measurement'
priority = 2
exit = True
invoicing = False
@classmethod
def check(cls, O, e):
uf_id = O.ResPartner.search([('name', '=', 'UNIÓN FENOSA DISTRIBUCIÓN S.A.')])[0]
return e.polissa.distribuidora[0] == uf_id and e.error.valor_xml == 0
def fix(self):
e = self.e
O = self.O
exception_tag = "Union Fenosa fix"
if e.polissa.tarifa not in ['2.0A', '2.1']:
raise Exception('{exception_tag}: DHA and >15kW not handled'.format(**locals()))
if len(e.F1.root.Facturas.FacturaATR) > 1:
raise Exception('{exception_tag}: Factura with multiple FacturaATR'.format(**locals()))
TerminoEnergiaActiva = e.F1.root.Facturas.FacturaATR.EnergiaActiva.TerminoEnergiaActiva
consumption = None
# TODO: Check whethet there's any later invoice
for TerminoEnergiaActiva_ in TerminoEnergiaActiva:
if TerminoEnergiaActiva.FechaDesde == e.error.data or TerminoEnergiaActiva.FechaHasta == e.error.data:
consumption = TerminoEnergiaActiva.Periodo[0].ValorEnergiaActiva
break
if not consumption:
raise Exception('{exception_tag}: Consumption not found'.format(**locals()))
if len(e.F1.root.Facturas.FacturaATR) > 1 or len(e.F1.root.Facturas.FacturaATR.Medidas) > 1 :
raise Exception('{exception_tag}: Factura with multiple FacturaATR or Medidas'.format(**locals()))
# Backup
filename = os.path.join('/tmp', e.name)
e.F1.dump(filename)
for idx_ap, Aparato in enumerate(e.F1.root.Facturas.FacturaATR.Medidas.Aparato):
if (Aparato.Tipo in ['CC', 'CA', 'P']) and (Aparato.CodigoDH == 1):
for idx_int, Integrador in enumerate(Aparato.Integrador):
if not (Integrador.Magnitud == 'AE' or Integrador.CodigoPeriodo == '10'):
continue
if not Integrador.ConsumoCalculado == consumption:
raise Exception('{exception_tag}: Integrador and factura doesn\'t match'.format(**locals()))
DesdeFechaHora = dateutil.parser.parse(
str(Integrador.LecturaDesde.FechaHora)).date().strftime('%Y-%m-%d')
HastaFechaHora = dateutil.parser.parse(
str(Integrador.LecturaHasta.FechaHora)).date().strftime('%Y-%m-%d')
if (DesdeFechaHora == e.error.data and
((Integrador.LecturaDesde.Lectura == 0) and (Integrador.LecturaHasta.Lectura == 0))):
Integrador.LecturaDesde.Lectura = e.error.valor_db
Integrador.LecturaHasta.Lectura = e.error.valor_db + consumption
e.reload(update=True)
fields_to_search = [('comptador.polissa', '=', e.polissa.id[0]),
('name', 'in', [DesdeFechaHora, HastaFechaHora]),
('lectura', '=', e.error.valor_db + consumption )]
lect_pool_ids = O.GiscedataLecturesLecturaPool.search(fields_to_search)
if not len(lect_pool_ids) > 0:
raise Exception('{exception_tag}: Failed updating lectura'.format(**locals()))
elif (HastaFechaHora == e.error.data and
((Integrador.LecturaDesde.Lectura == 0) and (Integrador.LecturaHasta.Lectura == 0))):
fields_to_search = [('comptador.polissa', '=', e.polissa.id[0]),
('name', '=',DesdeFechaHora)]
lect_pool_ids = O.GiscedataLecturesLecturaPool.search(fields_to_search)
if len(lect_pool_ids) != 1:
raise Exception('{exception_tag}: Failed updating lectura'.format(**locals()))
Integrador.LecturaDesde.Lectura = e.error.valor_db
Integrador.LecturaHasta.Lectura = e.error.valor_db + consumption
e.reload(update=True)
fields_to_search = [('comptador.polissa', '=', e.polissa.id[0]),
('name', 'in', [DesdeFechaHora, HastaFechaHora]),
('lectura', '=', e.error.valor_db + consumption )]
lect_pool_ids = O.GiscedataLecturesLecturaPool.search(fields_to_search)
if not len(lect_pool_ids) > 0:
raise Exception('{exception_tag}: Failed updating lectura'.format(**locals()))
lect_pool = LectPool(O, lect_pool_ids[0])
lect_pool.update_observacions('R. 0 Estimada a partir de consum F1 (ABr)')
return
raise Exception('{exception_tag}: Scenario not found'.format(**locals()))
register(UnionFenosa0measError)
class StartOfContractError(ImpError):
description = 'WARNING: ** Contract- First Measure **'
priority = 3
exit = True
invoicing = False
@classmethod
def check(cls, O, e):
return e.error.data == e.polissa.data_alta
register(StartOfContractError)
class EndOfContractError(ImpError):
description = 'WARNING: ** Contract- Last measure **'
priority = 4
exit = False
invoicing = True
@classmethod
def check(cls, O, e):
return e.error.data == e.polissa.data_baixa
register(EndOfContractError)
class StartOfMeterError(ImpError):
description = 'WARNING: ** Meter- First measure **'
priority = 5
exit = True
invoicing = False
@classmethod
def check(cls, O, e):
fields_to_search = [('polissa', '=', e.polissa.id), ('name', '=', e.error.comptador)]
comptador_ids = O.GiscedataLecturesComptador.search(fields_to_search, 0, 0, False, {'active_test': False})
if len(comptador_ids) == 0:
raise Exception('Comptador missing')
comptador_id = comptador_ids[0]
fields_to_search = [('comptador', '=', comptador_id)]
lect_pool_id = sorted(O.GiscedataLecturesLecturaPool.search(fields_to_search))[0]
fields_to_read = ['name']
fields_to_read = ['name']
return e.error.data == O.GiscedataLecturesLecturaPool.read(lect_pool_id, fields_to_read)['name']
register(StartOfMeterError)
class EndOfMeterError(ImpError):
description = 'WARNING: ** Meter - Last measure **'
priority = 6
exit = True
invoicing = False
@classmethod
def check(cls, O, e):
fields_to_search = [('polissa', '=', e.polissa.id), ('name', '=', e.error.comptador)]
comptador_ids = O.GiscedataLecturesComptador.search(fields_to_search, 0, 0, False, {'active_test': False})
if len(comptador_ids) == 0:
raise Exception('Comptador missing')
comptador_id = comptador_ids[0]
fields_to_search = [('comptador', '=', comptador_id)]
lect_pool_id = sorted(O.GiscedataLecturesLecturaPool.search(fields_to_search), reverse=True)[0]
fields_to_read = ['name']
fields_to_read = ['name']
return e.error.data == O.GiscedataLecturesLecturaPool.read(lect_pool_id, fields_to_read)['name']
register(EndOfMeterError)
class OldError(ImpError):
description = 'ERROR: XML entry timestamp < BDD entry timestamp'
priority = 7
exit = True
invoicing = False
@classmethod
def check(cls, O, e):
# Check F1_write_date <= DDBB_write_date
F1_write_date = dateutil.parser.parse(str(e.F1.root.Cabecera.FechaSolicitud)).replace(tzinfo=None)
DB_write_date = dateutil.parser.parse(e.error.lects_pool[e.error.periode].write_date)
return F1_write_date <= DB_write_date
def fix(self):
e = self.e
O = self.O
old_value = int(e.error.valor_db)
new_value = int(e.error.valor_xml)
old_origen = O_to_codigoOrigen[e.error.lects_pool[e.error.periode].origen]
new_origen = codigoOrigen_to_O[str(e.get_xml_attribute('Procedencia'))]
e.error.lects_pool[e.error.periode].update_lectura(new_value,
new_value,
origen=O_to_codigoOrigen[new_origen],
update_observacions=True,
observacions='XML ({})'.format(new_origen),
observacions_date=e.request_date)
e.reload(update=False)
def done(self):
e = self.e
old_value = int(e.error.valor_db)
new_value = int(e.error.valor_xml)
old_origen = O_to_codigoOrigen[e.error.lects_pool[e.error.periode].origen]
new_origen = codigoOrigen_to_O[str(e.get_xml_attribute('Procedencia'))]
e.error.lects_pool[e.error.periode].update_lectura(new_value,
old_value,
origen=old_origen,
update_observacions=False)
register(OldError)
class NewError(ImpError):
description = 'ERROR: XML entry timestamp > BDD entry timestamp'
priority = 8
exit = True
invoicing = True
UPDATE_ACTION = {
('Estimada', 'Estimada'): True,
('Autolectura', 'Autolectura'): True,
('Real', 'Real'): True,
('Estimada', 'Autolectura'): True,
('Autolectura', 'Estimada'): False,
('Estimada', 'Real'): True,
('Real', 'Estimada'): False,
('Autolectura', 'Real'): True,
('Real', 'Autolectura'): False
}
@classmethod
def check(cls, O, e):
# Check F1_write_date > DDBB_write_date
F1_write_date = dateutil.parser.parse(str(e.F1.root.Cabecera.FechaSolicitud)).replace(tzinfo=None)
DB_write_date = dateutil.parser.parse(e.error.lects_pool[e.error.periode].write_date)
return F1_write_date > DB_write_date
def get_new_origen(self):
return codigoOrigen_to_O[str(self.e.get_xml_attribute('Procedencia'))]
def get_old_origen(self):
return self.e.error.lects_pool[self.e.error.periode].origen
def get_action(self):
e = self.e
O = self.O
new_origen = self.get_new_origen()
old_origen = self.get_old_origen()
origen_groups = {
'Telemesura': 'Real',
'Telemesura corregida': 'Real',
'Telemedida': 'Real',
'Telemedida corregida': 'Real',
'TPL': 'Real',
'TPL corregida': 'Real',
'Visual': 'Real',
'Visual corregida': 'Real',
'Estimada': 'Estimada',
'Autolectura': 'Autolectura',
'Sense Lectura': 'Estimada',
'Sin Lectura': 'Estimada'
}
new_origen_group = origen_groups[new_origen]
old_origen_group = origen_groups[old_origen]
return (old_origen_group, new_origen_group)
def fix(self):
e = self.e
O = self.O
new_origen = self.get_new_origen()
new_value = e.error.valor_xml
old_origen = self.get_old_origen()
old_value = e.error.valor_db
action_id = self.get_action()
if action_id not in self.UPDATE_ACTION.keys():
raise 'Scenario not handled {}'.format(action_id)
if self.UPDATE_ACTION[action_id]:
e.error.lects_pool[e.error.periode].update_lectura(old_value,
new_value,
origen=O_to_codigoOrigen[new_origen],
update_observacions=True,
observacions='BBDD ({})'.format(old_origen),
observacions_date= e.request_date)
else:
e.error.lects_pool[e.error.periode].update_lectura(old_value,
new_value,
origen=O_to_codigoOrigen[new_origen],
update_observacions=False)
e.reload(update=False)
return action_id
def done(self):
e = self.e
O = self.O
action_id = self.get_action()
if not self.UPDATE_ACTION[action_id]:
new_origen = self.get_new_origen()
new_value = e.error.valor_xml
old_origen = self.get_old_origen()
old_value = e.error.valor_db
e.error.lects_pool[e.error.periode].update_lectura(new_value,
old_value,
origen=O_to_codigoOrigen[old_origen],
update_observacions=True,
observacions='XML ({})'.format(new_origen),
observacions_date= e.request_date)
register(NewError)
|
Som-Energia/invoice-janitor
|
invoicing/f1fixing/import_error/errors.py
|
Python
|
agpl-3.0
| 15,081 | 0.006366 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Andreas Büsching <crunchy@bitkipper.net>
#
# a generic dispatcher implementation
#
# Copyright (C) 2006, 2007, 2009, 2010
# Andreas Büsching <crunchy@bitkipper.net>
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version
# 2.1 as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""generic implementation of external dispatchers, integratable into
several notifiers."""
# required for dispatcher use
MIN_TIMER = 100
__dispatchers = {}
__dispatchers[True] = []
__dispatchers[False] = []
def dispatcher_add(method, min_timeout=True):
"""The notifier supports external dispatcher functions that will be
called within each scheduler step. This functionality may be useful
for applications having an own event mechanism that needs to be
triggered as often as possible. This method registers a new
dispatcher function. To ensure that the notifier loop does not
suspend to long in the sleep state during the poll a minimal timer
MIN_TIMER is set to guarantee that the dispatcher functions are
called at least every MIN_TIMER milliseconds."""
global __dispatchers, MIN_TIMER
__dispatchers[min_timeout].append(method)
if __dispatchers[True]:
return MIN_TIMER
else:
return None
def dispatcher_remove(method):
"""Removes an external dispatcher function from the list"""
global __dispatchers, MIN_TIMER
for val in (True, False):
if method in __dispatchers[val]:
__dispatchers[val].remove(method)
break
if __dispatchers[True]:
return MIN_TIMER
else:
return None
def dispatcher_run():
"""Invokes all registered dispatcher functions"""
global __dispatchers
for val in (True, False):
# there is no need to copy an empty dict
if not __dispatchers[val]:
continue
for disp in __dispatchers[val][:]:
if not disp():
dispatcher_remove(disp)
if __dispatchers[True]:
return MIN_TIMER
else:
return None
def dispatcher_count():
global __dispatchers
return len(__dispatchers)
|
crunchy-github/python-notifier
|
notifier/dispatch.py
|
Python
|
lgpl-2.1
| 2,535 | 0.015792 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-01-20 19:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('subjects', '0012_auto_20170112_1408'),
]
operations = [
migrations.AlterField(
model_name='subject',
name='tags',
field=models.ManyToManyField(blank=True, null=True, to='subjects.Tag', verbose_name='tags'),
),
]
|
amadeusproject/amadeuslms
|
subjects/migrations/0013_auto_20170120_1610.py
|
Python
|
gpl-2.0
| 503 | 0.001988 |
# -*- encoding: utf-8 -*-
"""Test class for Template CLI
:Requirement: Template
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: CLI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_string
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.factory import (
make_location,
make_org,
make_os,
make_template,
make_user,
)
from robottelo.cli.template import Template
from robottelo.cli.user import User
from robottelo.decorators import run_only_on, skip_if_bug_open, tier1, tier2
from robottelo.test import CLITestCase
class TemplateTestCase(CLITestCase):
"""Test class for Config Template CLI."""
@run_only_on('sat')
@tier1
def test_positive_create_with_name(self):
"""Check if Template can be created
:id: 77deaae8-447b-47cc-8af3-8b17476c905f
:expectedresults: Template is created
:CaseImportance: Critical
"""
name = gen_string('alpha')
template = make_template({'name': name})
self.assertEqual(template['name'], name)
@run_only_on('sat')
@tier1
def test_positive_update_name(self):
"""Check if Template can be updated
:id: 99bdab7b-1279-4349-a655-4294395ecbe1
:expectedresults: Template is updated
:CaseImportance: Critical
"""
template = make_template()
updated_name = gen_string('alpha')
Template.update({
'id': template['id'],
'name': updated_name,
})
template = Template.info({'id': template['id']})
self.assertEqual(updated_name, template['name'])
@tier1
def test_positive_update_with_manager_role(self):
"""Create template providing the initial name, then update its name
with manager user role.
:id: 28c4357a-93cb-4b01-a445-5db50435bcc0
:expectedresults: Provisioning Template is created, and its name can
be updated.
:CaseImportance: Critical
:BZ: 1277308
"""
new_name = gen_string('alpha')
username = gen_string('alpha')
password = gen_string('alpha')
org = make_org()
loc = make_location()
template = make_template({
'organization-ids': org['id'], 'location-ids': loc['id']})
# Create user with Manager role
user = make_user({
'login': username,
'password': password,
'admin': False,
'organization-ids': org['id'],
'location-ids': loc['id'],
})
User.add_role({'id': user['id'], 'role': "Manager"})
# Update template name with that user
Template.with_user(username=username, password=password).update({
'id': template['id'], 'name': new_name})
template = Template.info({'id': template['id']})
self.assertEqual(new_name, template['name'])
@run_only_on('sat')
@tier1
def test_positive_create_with_loc(self):
"""Check if Template with Location can be created
:id: 263aba0e-4f54-4227-af97-f4bc8f5c0788
:expectedresults: Template is created and new Location has been
assigned
:CaseImportance: Critical
"""
new_loc = make_location()
new_template = make_template({'location-ids': new_loc['id']})
self.assertIn(new_loc['name'], new_template['locations'])
@run_only_on('sat')
@tier1
def test_positive_create_locked(self):
"""Check that locked Template can be created
:id: ff10e369-85c6-45f3-9cda-7e1c17a6632d
:expectedresults: The locked template is created successfully
:CaseImportance: Critical
"""
new_template = make_template({
'locked': 'true',
'name': gen_string('alpha'),
})
self.assertEqual(new_template['locked'], 'yes')
@run_only_on('sat')
@tier1
def test_positive_create_with_org(self):
"""Check if Template with Organization can be created
:id: 5de5ca76-1a39-46ac-8dd4-5d41b4b49076
:expectedresults: Template is created and new Organization has been
assigned
:CaseImportance: Critical
"""
new_org = make_org()
new_template = make_template({
'name': gen_string('alpha'),
'organization-ids': new_org['id'],
})
self.assertIn(new_org['name'], new_template['organizations'])
@run_only_on('sat')
@tier2
def test_positive_add_os_by_id(self):
"""Check if operating system can be added to a template
:id: d9f481b3-9757-4208-b451-baf4792d4d70
:expectedresults: Operating system is added to the template
:CaseLevel: Integration
"""
new_template = make_template()
new_os = make_os()
Template.add_operatingsystem({
'id': new_template['id'],
'operatingsystem-id': new_os['id'],
})
new_template = Template.info({'id': new_template['id']})
os_string = '{0} {1}.{2}'.format(
new_os['name'], new_os['major-version'], new_os['minor-version'])
self.assertIn(os_string, new_template['operating-systems'])
@run_only_on('sat')
@skip_if_bug_open('bugzilla', 1395229)
@tier2
def test_positive_remove_os_by_id(self):
"""Check if operating system can be removed from a template
:id: b5362565-6dce-4770-81e1-4fe3ec6f6cee
:expectedresults: Operating system is removed from template
:CaseLevel: Integration
"""
template = make_template()
new_os = make_os()
Template.add_operatingsystem({
'id': template['id'],
'operatingsystem-id': new_os['id'],
})
template = Template.info({'id': template['id']})
os_string = '{0} {1}.{2}'.format(
new_os['name'], new_os['major-version'], new_os['minor-version']
)
self.assertIn(os_string, template['operating-systems'])
Template.remove_operatingsystem({
'id': template['id'],
'operatingsystem-id': new_os['id']
})
template = Template.info({'id': template['id']})
self.assertNotIn(os_string, template['operating-systems'])
@run_only_on('sat')
@tier1
def test_positive_create_with_content(self):
"""Check if Template can be created with specific content
:id: 0fcfc46d-5e97-4451-936a-e8684acac275
:expectedresults: Template is created with specific content
:CaseImportance: Critical
"""
content = gen_string('alpha')
name = gen_string('alpha')
template = make_template({
'content': content,
'name': name,
})
self.assertEqual(template['name'], name)
template_content = Template.dump({'id': template['id']})
self.assertIn(content, template_content[0])
@run_only_on('sat')
@tier1
def test_positive_delete_by_id(self):
"""Check if Template can be deleted
:id: 8e5245ee-13dd-44d4-8111-d4382cacf005
:expectedresults: Template is deleted
:CaseImportance: Critical
"""
template = make_template()
Template.delete({'id': template['id']})
with self.assertRaises(CLIReturnCodeError):
Template.info({'id': template['id']})
@run_only_on('sat')
@tier2
def test_positive_clone(self):
"""Assure ability to clone a provisioning template
:id: 27d69c1e-0d83-4b99-8a3c-4f1bdec3d261
:expectedresults: The template is cloned successfully
:CaseLevel: Integration
"""
cloned_template_name = gen_string('alpha')
template = make_template()
result = Template.clone({
'id': template['id'],
'new-name': cloned_template_name,
})
new_template = Template.info({'id': result[0]['id']})
self.assertEqual(new_template['name'], cloned_template_name)
|
ares/robottelo
|
tests/foreman/cli/test_template.py
|
Python
|
gpl-3.0
| 8,063 | 0 |
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports |
#+---------------------------------------------------------------------------+
import logging
from netzob.Common.Type.TypeConvertor import TypeConvertor
#+---------------------------------------------------------------------------+
#| Related third party imports |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local application imports |
#+---------------------------------------------------------------------------+
class Memory():
"""Memory:
Definition of a memory, used to store variable values in a persisting and independent way.
"""
def __init__(self):
"""Constructor of Memory:
"""
# create logger with the given configuration
self.log = logging.getLogger('netzob.Common.MMSTD.Dictionary.Memory.py')
self.memory = dict()
self.temporaryMemory = dict()
self.memory_acces_cb = None
def setMemoryAccess_cb(self, cb):
"""Set the callback to execute after a memory access"""
self.memory_acces_cb = cb
def duplicate(self):
"""Duplicates in a new memory"""
duplicatedMemory = Memory()
for k in self.memory.keys():
duplicatedMemory.memory[k] = self.memory[k]
duplicatedMemory.createMemory()
return duplicatedMemory
#+---------------------------------------------------------------------------+
#| Functions on memories |
#+---------------------------------------------------------------------------+
def createMemory(self):
"""createMemory:
Reinit the temporary memory and copy all values from the real memory in it.
"""
self.temporaryMemory = dict()
for key in self.memory.keys():
self.temporaryMemory[key] = self.memory[key]
def persistMemory(self):
"""persistMemory:
Copy all values from the temporary memory into the real memory.
"""
self.memory = dict()
for key in self.temporaryMemory.keys():
self.memory[key] = self.temporaryMemory[key]
def cleanMemory(self):
"""cleanMemory:
Remove all variables and values from real and temporary memories.
"""
# self.memory = dict() # TODO: impement this change in all calling functions.
self.temporaryMemory = dict()
def recallMemory(self):
"""recallMemory:
Return all values store in the temporary memory.
@return: the value of all variables in the temporary memory.
"""
return self.temporaryMemory
def printMemory(self):
"""printMemory:
Debug functions which print all values in temporary memory.
"""
self.log.debug("Memory map:")
for _id in self.temporaryMemory.keys():
self.log.debug("> " + str(_id) + " = " + str(self.temporaryMemory.get(_id)))
#+---------------------------------------------------------------------------+
#| Functions on temporary memory elements |
#+---------------------------------------------------------------------------+
def hasMemorized(self, variable):
"""hasMemorized:
Check if a variable is in the temporary memory.
@param variable: the given variable we search in memory.
@return: True if the variable has been found in the memory.
"""
return variable.getID() in self.temporaryMemory.keys()
def restore(self, variable):
"""restore:
Copy back the value of a variable from the real memory in the temporary memory.
@param variable: the given variable, the value of which we want to restore.
"""
if variable.getID() in self.memory.keys():
self.temporaryMemory[variable.getID()] = self.memory[variable.getID()]
if self.memory_acces_cb is not None:
value = variable.getCurrentValue()
if value is not None:
value = TypeConvertor.bin2strhex(value)
self.memory_acces_cb("W", variable, value)
def memorize(self, variable):
"""memorize:
Save the current value of a variable in memory.
@param variable: the given variable, the value of which we want to save.
"""
if variable.getCurrentValue() is not None:
self.temporaryMemory[variable.getID()] = variable.getCurrentValue()
if self.memory_acces_cb is not None:
value = variable.getCurrentValue()
if value is not None:
value = TypeConvertor.bin2strhex(value)
self.memory_acces_cb("W", variable, value)
def forget(self, variable):
"""forget:
Remove a variable and its value from the temporary memory.
"""
if self.hasMemorized(variable):
self.temporaryMemory.remove(variable.getID())
if self.memory_acces_cb is not None:
self.memory_acces_cb("D", variable, None)
def recall(self, variable):
"""recall:
Return the value of one variable store in the temporary memory.
@param variable: the variable, the value of which we are searching.
@return: the value of the given variable in the temporary memory.
"""
if self.hasMemorized(variable):
if self.memory_acces_cb is not None:
value = self.temporaryMemory[variable.getID()]
if value is not None:
value = TypeConvertor.bin2strhex(value)
self.memory_acces_cb("R", variable, value)
return self.temporaryMemory[variable.getID()]
else:
return None
|
nagyistoce/netzob
|
src/netzob/Common/MMSTD/Dictionary/Memory.py
|
Python
|
gpl-3.0
| 8,119 | 0.006285 |
"""Welcome cog
Sends welcome DMs to users that join the server.
"""
import os
import logging
import discord
from discord.ext import commands
from __main__ import send_cmd_help # pylint: disable=no-name-in-module
from cogs.utils.dataIO import dataIO
# Requires checks utility from:
# https://github.com/Rapptz/RoboDanny/tree/master/cogs/utils
from cogs.utils import checks
#Global variables
DEFAULT_MESSAGE = "Welcome to the server! Hope you enjoy your stay!"
DEFAULT_TITLE = "Welcome!"
LOGGER = None
SAVE_FOLDER = "data/lui-cogs/welcome/" #Path to save folder.
SAVE_FILE = "settings.json"
def checkFolder():
"""Used to create the data folder at first startup"""
if not os.path.exists(SAVE_FOLDER):
print("Creating " + SAVE_FOLDER + " folder...")
os.makedirs(SAVE_FOLDER)
def checkFiles():
"""Used to initialize an empty database at first startup"""
theFile = SAVE_FOLDER + SAVE_FILE
if not dataIO.is_valid_json(theFile):
print("Creating default welcome settings.json...")
dataIO.save_json(theFile, {})
class Welcome: # pylint: disable=too-many-instance-attributes
"""Send a welcome DM on server join."""
def loadSettings(self):
"""Loads settings from the JSON file"""
self.settings = dataIO.load_json(SAVE_FOLDER+SAVE_FILE)
def saveSettings(self):
"""Loads settings from the JSON file"""
dataIO.save_json(SAVE_FOLDER+SAVE_FILE, self.settings)
#Class constructor
def __init__(self, bot):
self.bot = bot
#The JSON keys for the settings:
self.keyWelcomeDMEnabled = "welcomeDMEnabled"
self.keyWelcomeLogEnabled = "welcomeLogEnabled"
self.keyWelcomeLogChannel = "welcomeLogChannel"
self.keyWelcomeTitle = "welcomeTitle"
self.keyWelcomeMessage = "welcomeMessage"
self.keyWelcomeImage = "welcomeImage"
self.keyLeaveLogEnabled = "leaveLogEnabled"
self.keyLeaveLogChannel = "leaveLogChannel"
checkFolder()
checkFiles()
self.loadSettings()
#The async function that is triggered on new member join.
async def sendWelcomeMessage(self, newUser, test=False):
"""Sends the welcome message in DM."""
serverId = newUser.server.id
#Do not send DM if it is disabled!
if not self.settings[serverId][self.keyWelcomeDMEnabled]:
return
try:
welcomeEmbed = discord.Embed(title=self.settings[serverId][self.keyWelcomeTitle])
welcomeEmbed.description = self.settings[serverId][self.keyWelcomeMessage]
welcomeEmbed.colour = discord.Colour.red()
if self.keyWelcomeImage in self.settings[serverId].keys() and \
self.settings[serverId][self.keyWelcomeImage]:
imageUrl = self.settings[serverId][self.keyWelcomeImage]
welcomeEmbed.set_image(url=imageUrl.replace(" ", "%20"))
await self.bot.send_message(newUser, embed=welcomeEmbed)
except (discord.Forbidden, discord.HTTPException) as errorMsg:
LOGGER.error("Could not send message, the user may have"
"turned off DM's from this server."
" Also, make sure the server has a title "
"and message set!", exc_info=True)
LOGGER.error(errorMsg)
if self.settings[serverId][self.keyWelcomeLogEnabled] and not test:
channel = self.bot.get_channel(self.settings[serverId][self.keyWelcomeLogChannel])
await self.bot.send_message(channel,
":bangbang: ``Server Welcome:`` User "
"{0.name}#{0.discriminator} ({0.id}) has"
" joined. Could not send DM!".format(
newUser))
await self.bot.send_message(channel, errorMsg)
else:
if self.settings[serverId][self.keyWelcomeLogEnabled] and not test:
channel = self.bot.get_channel(self.settings[serverId][self.keyWelcomeLogChannel])
await self.bot.send_message(channel,
":o: ``Server Welcome:`` User {0.name}#"
"{0.discriminator} ({0.id}) has joined. "
"DM sent.".format(newUser))
LOGGER.info("User %s#%s (%s) has joined. DM sent.",
newUser.name,
newUser.discriminator,
newUser.id)
async def logServerLeave(self, leaveUser):
"""Logs the server leave to a channel, if enabled."""
serverId = leaveUser.server.id
if self.settings[serverId][self.keyLeaveLogEnabled]:
channel = self.bot.get_channel(self.settings[serverId][self.keyLeaveLogChannel])
await self.bot.send_message(channel,
":x: ``Server Leave :`` User {0.name}#"
"{0.discriminator} ({0.id}) has left the "
"server.".format(leaveUser))
LOGGER.info("User %s#%s (%s) has left the server.",
leaveUser.name,
leaveUser.discriminator,
leaveUser.id)
####################
# MESSAGE COMMANDS #
####################
#[p]welcome
@commands.group(name="welcome", pass_context=True, no_pm=False)
@checks.serverowner() #Only allow server owner to execute the following command.
async def _welcome(self, ctx):
"""Server welcome message settings."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
#[p]welcome setmessage
@_welcome.command(pass_context=True, no_pm=False)
@checks.serverowner() #Only allow server owner to execute the following command.
async def setmessage(self, ctx):
"""Interactively configure the contents of the welcome DM."""
await self.bot.say("What would you like the welcome DM message to be?")
message = await self.bot.wait_for_message(timeout=60,
author=ctx.message.author,
channel=ctx.message.channel)
if message is None:
await self.bot.say("No response received, not setting anything!")
return
if len(message.content) > 2048:
await self.bot.say("Your message is too long!")
return
try:
self.loadSettings()
if ctx.message.author.server.id in self.settings:
self.settings[ctx.message.author.server.id] \
[self.keyWelcomeMessage] = message.content
else:
self.settings[ctx.message.author.server.id] = {}
self.settings[ctx.message.author.server.id] \
[self.keyWelcomeMessage] = message.content
self.saveSettings()
except Exception as errorMsg: # pylint: disable=broad-except
await self.bot.say("Could not save settings! Check the console for "
"details.")
print(errorMsg)
else:
await self.bot.say("Message set to:")
await self.bot.say("```" + message.content + "```")
LOGGER.info("Message changed by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author.id)
LOGGER.info(message.content)
#[p]welcome toggledm
@_welcome.command(pass_context=True, no_pm=False)
@checks.serverowner() #Only allow server owner to execute the following command.
async def toggledm(self, ctx):
"""Toggle sending a welcome DM."""
self.loadSettings()
try:
if self.settings[ctx.message.author.server.id][self.keyWelcomeDMEnabled]:
self.settings[ctx.message.author.server.id][self.keyWelcomeDMEnabled] = False
isSet = False
else:
self.settings[ctx.message.author.server.id][self.keyWelcomeDMEnabled] = True
isSet = True
except KeyError:
self.settings[ctx.message.author.server.id][self.keyWelcomeDMEnabled] = True
isSet = True
self.saveSettings()
if isSet:
await self.bot.say(":white_check_mark: Server Welcome - DM: Enabled.")
LOGGER.info("Message toggle ENABLED by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author.id)
else:
await self.bot.say(":negative_squared_cross_mark: Server Welcome - DM: "
"Disabled.")
LOGGER.info("Message toggle DISABLED by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author.id)
#[p]welcome togglelog
@_welcome.command(pass_context=True, no_pm=False, name="togglelog")
@checks.serverowner() #Only allow server owner to execute the following command.
async def toggleLog(self, ctx):
"""Toggle sending logs to a channel."""
self.loadSettings()
#If no channel is set, send error.
if not self.settings[ctx.message.author.server.id][self.keyWelcomeLogChannel] \
or not self.settings[ctx.message.author.server.id][self.keyLeaveLogChannel]:
await self.bot.say(":negative_squared_cross_mark: Please set a log channel first!")
return
try:
if self.settings[ctx.message.author.server.id][self.keyWelcomeLogEnabled]:
self.settings[ctx.message.author.server.id][self.keyWelcomeLogEnabled] = False
self.settings[ctx.message.author.server.id][self.keyLeaveLogEnabled] = False
isSet = False
else:
self.settings[ctx.message.author.server.id][self.keyWelcomeLogEnabled] = True
self.settings[ctx.message.author.server.id][self.keyLeaveLogEnabled] = True
isSet = True
except KeyError:
self.settings[ctx.message.author.server.id][self.keyWelcomeLogEnabled] = True
self.settings[ctx.message.author.server.id][self.keyLeaveLogEnabled] = True
isSet = True
self.saveSettings()
if isSet:
await self.bot.say(":white_check_mark: Server Welcome/Leave - Logging: "
"Enabled.")
LOGGER.info("Welcome channel logging ENABLED by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author.id)
else:
await self.bot.say(":negative_squared_cross_mark: Server Welcome/Leave "
"- Logging: Disabled.")
LOGGER.info("Welcome channel logging DISABLED by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author.id)
#[p]welcome setlog
@_welcome.command(pass_context=True, no_pm=True, name="setlog")
@checks.serverowner() #Only allow server owner to execute the following command.
async def setLog(self, ctx):
"""Enables, and sets current channel as log channel."""
self.loadSettings()
serverId = ctx.message.author.server.id
try:
self.settings[serverId][self.keyWelcomeLogChannel] = ctx.message.channel.id
self.settings[serverId][self.keyWelcomeLogEnabled] = True
self.settings[serverId][self.keyLeaveLogChannel] = ctx.message.channel.id
self.settings[serverId][self.keyLeaveLogEnabled] = True
except KeyError as errorMsg: #Typically a KeyError
await self.bot.say(":negative_squared_cross_mark: Please set default "
"settings first!")
print(errorMsg)
else:
self.saveSettings()
await self.bot.say(":white_check_mark: Server Welcome/Leave - Logging: "
"Enabled, and will be logged to this channel only.")
LOGGER.info("Welcome channel changed by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author.id)
LOGGER.info("Welcome channel set to #%s (%s)",
ctx.message.channel.name,
ctx.message.channel.id)
#[p]welcome default
@_welcome.command(pass_context=True, no_pm=False)
@checks.serverowner() #Only allow server owner to execute the following command.
async def default(self, ctx):
"""RUN FIRST: Set defaults, and enables welcome DM. Will ask for confirmation."""
await self.bot.say("Are you sure you want to revert to default settings? "
"Type \"yes\", otherwise type something else.")
message = await self.bot.wait_for_message(timeout=60,
author=ctx.message.author,
channel=ctx.message.channel)
if message is None:
await self.bot.say(":no_entry: No response received, aborting.")
return
if str.lower(message.content) == "yes":
try:
self.loadSettings()
serverId = ctx.message.author.server.id
self.settings[serverId] = {}
self.settings[serverId][self.keyWelcomeMessage] = DEFAULT_MESSAGE
self.settings[serverId][self.keyWelcomeTitle] = DEFAULT_TITLE
self.settings[serverId][self.keyWelcomeImage] = None
self.settings[serverId][self.keyWelcomeDMEnabled] = True
self.settings[serverId][self.keyWelcomeLogEnabled] = False
self.settings[serverId][self.keyWelcomeLogChannel] = None
self.settings[serverId][self.keyLeaveLogEnabled] = False
self.settings[serverId][self.keyLeaveLogChannel] = None
self.saveSettings()
except Exception as errorMsg: # pylint: disable=broad-except
await self.bot.say(":no_entry: Could not set default settings! "
"Please check the server logs.")
print(errorMsg)
else:
await self.bot.say(":white_check_mark: Default settings applied.")
LOGGER.info("Welcome cog set to its defaults by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author.id)
else:
await self.bot.say(":negative_squared_cross_mark: Not setting any "
"default settings.")
#[p]welcome settitle
@_welcome.command(pass_context=True, no_pm=False, name="settitle")
@checks.serverowner() #Only allow server owner to execute the following command.
async def setTitle(self, ctx):
"""Interactively configure the title for the welcome DM."""
await self.bot.say("What would you like the welcome DM message to be?")
title = await self.bot.wait_for_message(timeout=60,
author=ctx.message.author,
channel=ctx.message.channel)
if title is None:
await self.bot.say("No response received, not setting anything!")
return
if len(title.content) > 256:
await self.bot.say("The title is too long!")
return
try:
self.loadSettings()
serverId = ctx.message.author.server.id
if serverId in self.settings:
self.settings[serverId][self.keyWelcomeTitle] = title.content
else:
self.settings[serverId] = {}
self.settings[serverId][self.keyWelcomeTitle] = title.content
self.saveSettings()
except Exception as errorMsg: # pylint: disable=broad-except
await self.bot.say("Could not save settings! Please check server logs!")
print(errorMsg)
else:
await self.bot.say("Title set to:")
await self.bot.say("```" + title.content + "```")
LOGGER.info("Title changed by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author)
LOGGER.info(title.content)
#[p]welcome setimage
@_welcome.group(name="setimage", pass_context=True, no_pm=True)
async def setImage(self, ctx, imageUrl: str = None):
"""Sets an image in the embed with a URL. Empty URL results in no image."""
if imageUrl == "":
imageUrl = None
try:
self.loadSettings()
serverId = ctx.message.author.server.id
if serverId in self.settings:
self.settings[serverId][self.keyWelcomeImage] = imageUrl
else:
self.settings[serverId] = {}
self.settings[serverId][self.keyWelcomeImage] = imageUrl
self.saveSettings()
except Exception as errorMsg: # pylint: disable=broad-except
await self.bot.say("Could not save settings! Please check server logs!")
print(errorMsg)
else:
await self.bot.say("Image set to `{}`. Be sure to test it!".format(imageUrl))
LOGGER.info("Image changed by %s#%s (%s)",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.id)
LOGGER.info("Image set to %s",
imageUrl)
#[p]welcome test
@_welcome.command(pass_context=True, no_pm=False)
@checks.serverowner() #Only allow server owner to execute the following command.
async def test(self, ctx):
"""Test the welcome DM by sending a DM to you."""
await self.sendWelcomeMessage(ctx.message.author, test=True)
await self.bot.say("If this server has been configured, you should have received a DM.")
def setup(bot):
"""Add the cog to the bot."""
global LOGGER # pylint: disable=global-statement
checkFolder() #Make sure the data folder exists!
checkFiles() #Make sure we have settings!
customCog = Welcome(bot)
LOGGER = logging.getLogger("red.Welcome")
if LOGGER.level == 0:
# Prevents the LOGGER from being loaded again in case of module reload.
LOGGER.setLevel(logging.INFO)
handler = logging.FileHandler(filename=SAVE_FOLDER+"info.log",
encoding="utf-8",
mode="a")
handler.setFormatter(logging.Formatter("%(asctime)s %(message)s",
datefmt="[%d/%m/%Y %H:%M:%S]"))
LOGGER.addHandler(handler)
bot.add_listener(customCog.sendWelcomeMessage, 'on_member_join')
bot.add_listener(customCog.logServerLeave, 'on_member_remove')
bot.add_cog(customCog)
|
Injabie3/lui-cogs
|
welcome/welcome.py
|
Python
|
gpl-3.0
| 20,055 | 0.005335 |
import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import AbstractNode, NodeLog
from osf.utils import permissions
from osf.utils.sanitize import strip_html
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
OSFGroupFactory,
RegistrationFactory,
AuthUserFactory,
PrivateLinkFactory,
)
from tests.base import fake
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeChildrenList:
@pytest.fixture()
def private_project(self, user):
private_project = ProjectFactory()
private_project.add_contributor(
user,
permissions=permissions.WRITE
)
private_project.save()
return private_project
@pytest.fixture()
def component(self, user, private_project):
return NodeFactory(parent=private_project, creator=user)
@pytest.fixture()
def pointer(self):
return ProjectFactory()
@pytest.fixture()
def private_project_url(self, private_project):
return '/{}nodes/{}/children/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_component(self, user, public_project):
return NodeFactory(parent=public_project, creator=user, is_public=True)
@pytest.fixture()
def public_project_url(self, user, public_project):
return '/{}nodes/{}/children/'.format(API_BASE, public_project._id)
@pytest.fixture()
def view_only_link(self, private_project):
view_only_link = PrivateLinkFactory(name='node_view_only_link')
view_only_link.nodes.add(private_project)
view_only_link.save()
return view_only_link
def test_return_public_node_children_list(
self, app, public_component,
public_project_url):
# test_return_public_node_children_list_logged_out
res = app.get(public_project_url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == public_component._id
# test_return_public_node_children_list_logged_in
non_contrib = AuthUserFactory()
res = app.get(public_project_url, auth=non_contrib.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == public_component._id
def test_return_private_node_children_list(
self, app, user, component, private_project, private_project_url):
# test_return_private_node_children_list_logged_out
res = app.get(private_project_url, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_node_children_list_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.get(
private_project_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_return_private_node_children_list_logged_in_contributor
res = app.get(private_project_url, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == component._id
# test_return_private_node_children_osf_group_member_admin
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
private_project.add_osf_group(group, permissions.ADMIN)
res = app.get(private_project_url, auth=group_mem.auth)
assert res.status_code == 200
# Can view node children that you have implict admin permissions
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == component._id
def test_node_children_list_does_not_include_pointers(
self, app, user, component, private_project_url):
res = app.get(private_project_url, auth=user.auth)
assert len(res.json['data']) == 1
def test_node_children_list_does_not_include_unauthorized_projects(
self, app, user, component, private_project, private_project_url):
NodeFactory(parent=private_project)
res = app.get(private_project_url, auth=user.auth)
assert len(res.json['data']) == 1
def test_node_children_list_does_not_include_deleted(
self, app, user, public_project, public_component,
component, public_project_url):
child_project = NodeFactory(parent=public_project, creator=user)
child_project.save()
res = app.get(public_project_url, auth=user.auth)
assert res.status_code == 200
ids = [node['id'] for node in res.json['data']]
assert child_project._id in ids
assert 2 == len(ids)
child_project.is_deleted = True
child_project.save()
res = app.get(public_project_url, auth=user.auth)
assert res.status_code == 200
ids = [node['id'] for node in res.json['data']]
assert child_project._id not in ids
assert 1 == len(ids)
def test_node_children_list_does_not_include_node_links(
self, app, user, public_project, public_component,
public_project_url):
pointed_to = ProjectFactory(is_public=True)
public_project.add_pointer(
pointed_to,
auth=Auth(public_project.creator)
)
res = app.get(public_project_url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert public_component._id in ids # sanity check
assert pointed_to._id not in ids
# Regression test for https://openscience.atlassian.net/browse/EMB-593
# Duplicates returned in child count
def test_node_children_related_counts_duplicate_query_results(self, app, user, public_project,
private_project, public_project_url):
user_2 = AuthUserFactory()
# Adding a child component
child = NodeFactory(parent=public_project, creator=user, is_public=True, category='software')
child.add_contributor(user_2, permissions.WRITE, save=True)
# Adding a grandchild
NodeFactory(parent=child, creator=user, is_public=True)
# Adding a node link
public_project.add_pointer(
private_project,
auth=Auth(public_project.creator)
)
# Assert NodeChildrenList returns one result
res = app.get(public_project_url, auth=user.auth)
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == child._id
project_url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, public_project._id)
res = app.get(project_url, auth=user.auth)
assert res.status_code == 200
# Verifying related_counts match direct children count (grandchildren not included, pointers not included)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_node_children_related_counts(self, app, user, public_project):
parent = ProjectFactory(creator=user, is_public=False)
user_2 = AuthUserFactory()
parent.add_contributor(user_2, permissions.ADMIN)
child = NodeFactory(parent=parent, creator=user_2, is_public=False, category='software')
NodeFactory(parent=child, creator=user_2, is_public=False)
# child has one component. `user` can view due to implict admin perms
component_url = '/{}nodes/{}/children/'.format(API_BASE, child._id, auth=user.auth)
res = app.get(component_url, auth=user.auth)
assert len(res.json['data']) == 1
project_url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, child._id)
res = app.get(project_url, auth=user.auth)
assert res.status_code == 200
# Nodes with implicit admin perms are also included in the count
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_child_counts_permissions(self, app, user, public_project):
NodeFactory(parent=public_project, creator=user)
url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, public_project._id)
user_two = AuthUserFactory()
# Unauthorized
res = app.get(url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# Logged in noncontrib
res = app.get(url, auth=user_two.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# Logged in contrib
res = app.get(url, auth=user.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_private_node_children_with_view_only_link(self, user, app, private_project,
component, view_only_link, private_project_url):
# get node related_counts with vol before vol is attached to components
node_url = '/{}nodes/{}/?related_counts=children&view_only={}'.format(API_BASE,
private_project._id, view_only_link.key)
res = app.get(node_url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# view only link is not attached to components
view_only_link_url = '{}?view_only={}'.format(private_project_url, view_only_link.key)
res = app.get(view_only_link_url)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert len(ids) == 0
assert component._id not in ids
# view only link is attached to components
view_only_link.nodes.add(component)
res = app.get(view_only_link_url)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert component._id in ids
assert 'contributors' in res.json['data'][0]['relationships']
assert 'implicit_contributors' in res.json['data'][0]['relationships']
assert 'bibliographic_contributors' in res.json['data'][0]['relationships']
# get node related_counts with vol once vol is attached to components
res = app.get(node_url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
# make private vol anonymous
view_only_link.anonymous = True
view_only_link.save()
res = app.get(view_only_link_url)
assert 'contributors' not in res.json['data'][0]['relationships']
assert 'implicit_contributors' not in res.json['data'][0]['relationships']
assert 'bibliographic_contributors' not in res.json['data'][0]['relationships']
# delete vol
view_only_link.is_deleted = True
view_only_link.save()
res = app.get(view_only_link_url, expect_errors=True)
assert res.status_code == 401
@pytest.mark.django_db
class TestNodeChildrenListFiltering:
def test_node_child_filtering(self, app, user):
project = ProjectFactory(creator=user)
title_one, title_two = fake.bs(), fake.bs()
component = NodeFactory(title=title_one, parent=project)
component_two = NodeFactory(title=title_two, parent=project)
url = '/{}nodes/{}/children/?filter[title]={}'.format(
API_BASE,
project._id,
title_one
)
res = app.get(url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert component._id in ids
assert component_two._id not in ids
@pytest.mark.django_db
class TestNodeChildCreate:
@pytest.fixture()
def project(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/children/'.format(API_BASE, project._id)
@pytest.fixture()
def child(self):
return {
'data': {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
}
def test_creates_child(self, app, user, project, child, url):
# test_creates_child_logged_out_user
res = app.post_json_api(url, child, expect_errors=True)
assert res.status_code == 401
project.reload()
assert len(project.nodes) == 0
# test_creates_child_logged_in_read_contributor
read_contrib = AuthUserFactory()
project.add_contributor(
read_contrib,
permissions=permissions.READ,
auth=Auth(user), save=True
)
res = app.post_json_api(
url, child, auth=read_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_creates_child_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.post_json_api(
url, child, auth=non_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_creates_child_group_member_read
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
project.add_osf_group(group, permissions.READ)
res = app.post_json_api(
url, child, auth=group_mem.auth,
expect_errors=True
)
assert res.status_code == 403
project.update_osf_group(group, permissions.WRITE)
res = app.post_json_api(
url, child, auth=group_mem.auth,
expect_errors=True
)
assert res.status_code == 201
# test_creates_child_no_type
child = {
'data': {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_creates_child_incorrect_type
child = {
'data': {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
# test_creates_child_properties_not_nested
child = {
'data': {
'attributes': {
'title': 'child',
'description': 'this is a child project'
},
'category': 'project'
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
assert res.json['errors'][1]['detail'] == 'This field is required.'
assert res.json['errors'][1]['source']['pointer'] == '/data/attributes/category'
def test_creates_child_logged_in_write_contributor(
self, app, user, project, child, url):
write_contrib = AuthUserFactory()
project.add_contributor(
write_contrib,
permissions=permissions.WRITE,
auth=Auth(user),
save=True)
res = app.post_json_api(url, child, auth=write_contrib.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['title'] == child['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == child['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == child['data']['attributes']['category']
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_creates_child_logged_in_owner(
self, app, user, project, child, url):
res = app.post_json_api(url, child, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['title'] == child['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == child['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == child['data']['attributes']['category']
project.reload()
assert res.json['data']['id'] == project.nodes[0]._id
assert project.nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED
def test_creates_child_creates_child_and_sanitizes_html_logged_in_owner(
self, app, user, project, url):
title = '<em>Reasonable</em> <strong>Project</strong>'
description = 'An <script>alert("even reasonabler")</script> child'
res = app.post_json_api(url, {
'data': {
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}
}, auth=user.auth)
child_id = res.json['data']['id']
assert res.status_code == 201
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = app.get(url, auth=user.auth)
assert res.json['data']['attributes']['title'] == strip_html(title)
assert res.json['data']['attributes']['description'] == strip_html(
description)
assert res.json['data']['attributes']['category'] == 'project'
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_cannot_create_child_on_a_registration(self, app, user, project):
registration = RegistrationFactory(project=project, creator=user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = app.post_json_api(url, {
'data': {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 404
@pytest.mark.django_db
class TestNodeChildrenBulkCreate:
@pytest.fixture()
def project(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/children/'.format(API_BASE, project._id)
@pytest.fixture()
def child_one(self):
return {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
@pytest.fixture()
def child_two(self):
return {
'type': 'nodes',
'attributes': {
'title': 'second child',
'description': 'this is my hypothesis',
'category': 'hypothesis'
}
}
def test_bulk_children_create_blank_request(self, app, user, url):
res = app.post_json_api(
url, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
def test_bulk_creates_children_limits(self, app, user, child_one, url):
res = app.post_json_api(
url, {'data': [child_one] * 101},
auth=user.auth, expect_errors=True, bulk=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
def test_bulk_creates_children_auth_errors(
self, app, user, project, child_one, child_two, url):
# test_bulk_creates_children_logged_out_user
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
expect_errors=True, bulk=True
)
assert res.status_code == 401
project.reload()
assert len(project.nodes) == 0
# test_bulk_creates_children_logged_in_read_contributor
read_contrib = AuthUserFactory()
project.add_contributor(
read_contrib,
permissions=permissions.READ,
auth=Auth(user),
save=True)
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=read_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_bulk_creates_children_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=non_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
def test_bulk_creates_children_logged_in_owner(
self, app, user, project, child_one, child_two, url):
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=user.auth, bulk=True)
assert res.status_code == 201
assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title']
assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description']
assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category']
assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title']
assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description']
assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category']
project.reload()
nodes = project.nodes
assert res.json['data'][0]['id'] == nodes[0]._id
assert res.json['data'][1]['id'] == nodes[1]._id
assert nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED
assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED
def test_bulk_creates_children_child_logged_in_write_contributor(
self, app, user, project, child_one, child_two, url):
write_contrib = AuthUserFactory()
project.add_contributor(
write_contrib,
permissions=permissions.WRITE,
auth=Auth(user),
save=True)
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=write_contrib.auth, bulk=True)
assert res.status_code == 201
assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title']
assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description']
assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category']
assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title']
assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description']
assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category']
project.reload()
child_id = res.json['data'][0]['id']
child_two_id = res.json['data'][1]['id']
nodes = project.nodes
assert child_id == nodes[0]._id
assert child_two_id == nodes[1]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED
def test_bulk_creates_children_and_sanitizes_html_logged_in_owner(
self, app, user, project, url):
title = '<em>Reasoning</em> <strong>Aboot Projects</strong>'
description = 'A <script>alert("super reasonable")</script> child'
res = app.post_json_api(url, {
'data': [{
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}]
}, auth=user.auth, bulk=True)
child_id = res.json['data'][0]['id']
assert res.status_code == 201
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = app.get(url, auth=user.auth)
assert res.json['data']['attributes']['title'] == strip_html(title)
assert res.json['data']['attributes']['description'] == strip_html(
description)
assert res.json['data']['attributes']['category'] == 'project'
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_cannot_bulk_create_children_on_a_registration(
self, app, user, project, child_two):
registration = RegistrationFactory(project=project, creator=user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = app.post_json_api(url, {
'data': [child_two, {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}]
}, auth=user.auth, expect_errors=True, bulk=True)
assert res.status_code == 404
project.reload()
assert len(project.nodes) == 0
def test_bulk_creates_children_payload_errors(
self, app, user, project, child_two, url):
# def test_bulk_creates_children_no_type(self, app, user, project,
# child_two, url):
child = {
'data': [child_two, {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/1/type'
project.reload()
assert len(project.nodes) == 0
# def test_bulk_creates_children_incorrect_type(self, app, user, project,
# child_two, url):
child = {
'data': [child_two, {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
project.reload()
assert len(project.nodes) == 0
# def test_bulk_creates_children_properties_not_nested(self, app, user,
# project, child_two, url):
child = {
'data': [child_two, {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/1/type'
assert res.json['errors'][1]['detail'] == 'This field is required.'
assert res.json['errors'][1]['source']['pointer'] == '/data/1/attributes/title'
assert res.json['errors'][2]['detail'] == 'This field is required.'
assert res.json['errors'][2]['source']['pointer'] == '/data/1/attributes/category'
project.reload()
assert len(project.nodes) == 0
|
Johnetordoff/osf.io
|
api_tests/nodes/views/test_node_children_list.py
|
Python
|
apache-2.0
| 30,028 | 0.001599 |
"""FunctionInterval module: contains the FunctionInterval class"""
__all__ = ['FunctionInterval', 'EventInterval', 'AcceptInterval', 'IgnoreInterval', 'ParentInterval', 'WrtParentInterval', 'PosInterval', 'HprInterval', 'ScaleInterval', 'PosHprInterval', 'HprScaleInterval', 'PosHprScaleInterval', 'Func', 'Wait']
from panda3d.core import *
from panda3d.direct import *
from direct.showbase.MessengerGlobal import *
from direct.directnotify.DirectNotifyGlobal import directNotify
import Interval
#############################################################
### ###
### See examples of function intervals in IntervalTest.py ###
### ###
#############################################################
class FunctionInterval(Interval.Interval):
# Name counter
functionIntervalNum = 1
# Keep a list of function intervals currently in memory for
# Control-C-Control-V redefining. These are just weakrefs so they
# should not cause any leaks.
if __debug__:
import weakref
FunctionIntervals = weakref.WeakKeyDictionary()
@classmethod
def replaceMethod(self, oldFunction, newFunction):
import types
count = 0
for ival in self.FunctionIntervals:
# print 'testing: ', ival.function, oldFunction
# Note: you can only replace methods currently
if type(ival.function) == types.MethodType:
if (ival.function.im_func == oldFunction):
# print 'found: ', ival.function, oldFunction
ival.function = types.MethodType(newFunction,
ival.function.im_self,
ival.function.im_class)
count += 1
return count
# create FunctionInterval DirectNotify category
notify = directNotify.newCategory('FunctionInterval')
# Class methods
def __init__(self, function, **kw):
"""__init__(function, name = None, openEnded = 1, extraArgs = [])
"""
name = kw.pop('name', None)
openEnded = kw.pop('openEnded', 1)
extraArgs = kw.pop('extraArgs', [])
# Record instance variables
self.function = function
# Create a unique name for the interval if necessary
if name is None:
name = self.makeUniqueName(function)
assert isinstance(name, str)
# Record any arguments
self.extraArgs = extraArgs
self.kw = kw
# Initialize superclass
# Set openEnded true if privInitialize after end time cause interval
# function to be called. If false, privInitialize calls have no effect
# Event, Accept, Ignore intervals default to openEnded = 0
# Parent, Pos, Hpr, etc intervals default to openEnded = 1
Interval.Interval.__init__(self, name, duration = 0.0, openEnded = openEnded)
# For rebinding, let's remember this function interval on the class
if __debug__:
self.FunctionIntervals[self] = 1
@staticmethod
def makeUniqueName(func, suffix = ''):
name = 'Func-%s-%d' % (getattr(func, '__name__', str(func)), FunctionInterval.functionIntervalNum)
FunctionInterval.functionIntervalNum += 1
if suffix:
name = '%s-%s' % (name, str(suffix))
return name
def privInstant(self):
# Evaluate the function
self.function(*self.extraArgs, **self.kw)
# Print debug information
self.notify.debug(
'updateFunc() - %s: executing Function' % self.name)
### FunctionInterval subclass for throwing events ###
class EventInterval(FunctionInterval):
# Initialization
def __init__(self, event, sentArgs=[]):
"""__init__(event, sentArgs)
"""
def sendFunc(event = event, sentArgs = sentArgs):
messenger.send(event, sentArgs)
# Create function interval
FunctionInterval.__init__(self, sendFunc, name = event)
### FunctionInterval subclass for accepting hooks ###
class AcceptInterval(FunctionInterval):
# Initialization
def __init__(self, dirObj, event, function, name = None):
"""__init__(dirObj, event, function, name)
"""
def acceptFunc(dirObj = dirObj, event = event, function = function):
dirObj.accept(event, function)
# Determine name
if (name == None):
name = 'Accept-' + event
# Create function interval
FunctionInterval.__init__(self, acceptFunc, name = name)
### FunctionInterval subclass for ignoring events ###
class IgnoreInterval(FunctionInterval):
# Initialization
def __init__(self, dirObj, event, name = None):
"""__init__(dirObj, event, name)
"""
def ignoreFunc(dirObj = dirObj, event = event):
dirObj.ignore(event)
# Determine name
if (name == None):
name = 'Ignore-' + event
# Create function interval
FunctionInterval.__init__(self, ignoreFunc, name = name)
### Function Interval subclass for adjusting scene graph hierarchy ###
class ParentInterval(FunctionInterval):
# ParentInterval counter
parentIntervalNum = 1
# Initialization
def __init__(self, nodePath, parent, name = None):
"""__init__(nodePath, parent, name)
"""
def reparentFunc(nodePath = nodePath, parent = parent):
nodePath.reparentTo(parent)
# Determine name
if (name == None):
name = 'ParentInterval-%d' % ParentInterval.parentIntervalNum
ParentInterval.parentIntervalNum += 1
# Create function interval
FunctionInterval.__init__(self, reparentFunc, name = name)
### Function Interval subclass for adjusting scene graph hierarchy ###
class WrtParentInterval(FunctionInterval):
# WrtParentInterval counter
wrtParentIntervalNum = 1
# Initialization
def __init__(self, nodePath, parent, name = None):
"""__init__(nodePath, parent, name)
"""
def wrtReparentFunc(nodePath = nodePath, parent = parent):
nodePath.wrtReparentTo(parent)
# Determine name
if (name == None):
name = ('WrtParentInterval-%d' %
WrtParentInterval.wrtParentIntervalNum)
WrtParentInterval.wrtParentIntervalNum += 1
# Create function interval
FunctionInterval.__init__(self, wrtReparentFunc, name = name)
### Function Interval subclasses for instantaneous pose changes ###
class PosInterval(FunctionInterval):
# PosInterval counter
posIntervalNum = 1
# Initialization
def __init__(self, nodePath, pos, duration = 0.0,
name = None, other = None):
"""__init__(nodePath, pos, duration, name)
"""
# Create function
def posFunc(np = nodePath, pos = pos, other = other):
if other:
np.setPos(other, pos)
else:
np.setPos(pos)
# Determine name
if (name == None):
name = 'PosInterval-%d' % PosInterval.posIntervalNum
PosInterval.posIntervalNum += 1
# Create function interval
FunctionInterval.__init__(self, posFunc, name = name)
class HprInterval(FunctionInterval):
# HprInterval counter
hprIntervalNum = 1
# Initialization
def __init__(self, nodePath, hpr, duration = 0.0,
name = None, other = None):
"""__init__(nodePath, hpr, duration, name)
"""
# Create function
def hprFunc(np = nodePath, hpr = hpr, other = other):
if other:
np.setHpr(other, hpr)
else:
np.setHpr(hpr)
# Determine name
if (name == None):
name = 'HprInterval-%d' % HprInterval.hprIntervalNum
HprInterval.hprIntervalNum += 1
# Create function interval
FunctionInterval.__init__(self, hprFunc, name = name)
class ScaleInterval(FunctionInterval):
# ScaleInterval counter
scaleIntervalNum = 1
# Initialization
def __init__(self, nodePath, scale, duration = 0.0,
name = None, other = None):
"""__init__(nodePath, scale, duration, name)
"""
# Create function
def scaleFunc(np = nodePath, scale = scale, other = other):
if other:
np.setScale(other, scale)
else:
np.setScale(scale)
# Determine name
if (name == None):
name = 'ScaleInterval-%d' % ScaleInterval.scaleIntervalNum
ScaleInterval.scaleIntervalNum += 1
# Create function interval
FunctionInterval.__init__(self, scaleFunc, name = name)
class PosHprInterval(FunctionInterval):
# PosHprInterval counter
posHprIntervalNum = 1
# Initialization
def __init__(self, nodePath, pos, hpr, duration = 0.0,
name = None, other = None):
"""__init__(nodePath, pos, hpr, duration, name)
"""
# Create function
def posHprFunc(np = nodePath, pos = pos, hpr = hpr, other = other):
if other:
np.setPosHpr(other, pos, hpr)
else:
np.setPosHpr(pos, hpr)
# Determine name
if (name == None):
name = 'PosHprInterval-%d' % PosHprInterval.posHprIntervalNum
PosHprInterval.posHprIntervalNum += 1
# Create function interval
FunctionInterval.__init__(self, posHprFunc, name = name)
class HprScaleInterval(FunctionInterval):
# HprScaleInterval counter
hprScaleIntervalNum = 1
# Initialization
def __init__(self, nodePath, hpr, scale, duration = 0.0,
name = None, other = None):
"""__init__(nodePath, hpr, scale, duration, other, name)
"""
# Create function
def hprScaleFunc(np=nodePath, hpr=hpr, scale=scale,
other = other):
if other:
np.setHprScale(other, hpr, scale)
else:
np.setHprScale(hpr, scale)
# Determine name
if (name == None):
name = ('HprScale-%d' %
HprScaleInterval.hprScaleIntervalNum)
HprScaleInterval.hprScaleIntervalNum += 1
# Create function interval
FunctionInterval.__init__(self, hprScaleFunc, name = name)
class PosHprScaleInterval(FunctionInterval):
# PosHprScaleInterval counter
posHprScaleIntervalNum = 1
# Initialization
def __init__(self, nodePath, pos, hpr, scale, duration = 0.0,
name = None, other = None):
"""__init__(nodePath, pos, hpr, scale, duration, other, name)
"""
# Create function
def posHprScaleFunc(np=nodePath, pos=pos, hpr=hpr, scale=scale,
other = other):
if other:
np.setPosHprScale(other, pos, hpr, scale)
else:
np.setPosHprScale(pos, hpr, scale)
# Determine name
if (name == None):
name = ('PosHprScale-%d' %
PosHprScaleInterval.posHprScaleIntervalNum)
PosHprScaleInterval.posHprScaleIntervalNum += 1
# Create function interval
FunctionInterval.__init__(self, posHprScaleFunc, name = name)
class Func(FunctionInterval):
def __init__(self, *args, **kw):
function = args[0]
assert hasattr(function, '__call__')
extraArgs = args[1:]
kw['extraArgs'] = extraArgs
FunctionInterval.__init__(self, function, **kw)
class Wait(WaitInterval):
def __init__(self, duration):
WaitInterval.__init__(self, duration)
"""
SAMPLE CODE
from IntervalGlobal import *
i1 = Func(base.transitions.fadeOut)
i2 = Func(base.transitions.fadeIn)
def caughtIt():
print 'Caught here-is-an-event'
class DummyAcceptor(DirectObject):
pass
da = DummyAcceptor()
i3 = Func(da.accept, 'here-is-an-event', caughtIt)
i4 = Func(messenger.send, 'here-is-an-event')
i5 = Func(da.ignore, 'here-is-an-event')
# Using a function
def printDone():
print 'done'
i6 = Func(printDone)
# Create track
t1 = Sequence([
# Fade out
(0.0, i1),
# Fade in
(2.0, i2),
# Accept event
(4.0, i3),
# Throw it,
(5.0, i4),
# Ignore event
(6.0, i5),
# Throw event again and see if ignore worked
(7.0, i4),
# Print done
(8.0, i6)], name = 'demo')
# Play track
t1.play()
### Specifying interval start times during track construction ###
# Interval start time can be specified relative to three different points:
# PREVIOUS_END
# PREVIOUS_START
# TRACK_START
startTime = 0.0
def printStart():
global startTime
startTime = globalClock.getFrameTime()
print 'Start'
def printPreviousStart():
global startTime
currTime = globalClock.getFrameTime()
print 'PREVIOUS_END %0.2f' % (currTime - startTime)
def printPreviousEnd():
global startTime
currTime = globalClock.getFrameTime()
print 'PREVIOUS_END %0.2f' % (currTime - startTime)
def printTrackStart():
global startTime
currTime = globalClock.getFrameTime()
print 'TRACK_START %0.2f' % (currTime - startTime)
i1 = Func(printStart)
# Just to take time
i2 = LerpPosInterval(camera, 2.0, Point3(0, 10, 5))
# This will be relative to end of camera move
i3 = FunctionInterval(printPreviousEnd)
# Just to take time
i4 = LerpPosInterval(camera, 2.0, Point3(0, 0, 5))
# This will be relative to the start of the camera move
i5 = FunctionInterval(printPreviousStart)
# This will be relative to track start
i6 = FunctionInterval(printTrackStart)
# Create the track, if you don't specify offset type in tuple it defaults to
# relative to TRACK_START (first entry below)
t2 = Track([(0.0, i1), # i1 start at t = 0, duration = 0.0
(1.0, i2, TRACK_START), # i2 start at t = 1, duration = 2.0
(2.0, i3, PREVIOUS_END), # i3 start at t = 5, duration = 0.0
(1.0, i4, PREVIOUS_END), # i4 start at t = 6, duration = 2.0
(3.0, i5, PREVIOUS_START), # i5 start at t = 9, duration = 0.0
(10.0, i6, TRACK_START)], # i6 start at t = 10, duration = 0.0
name = 'startTimeDemo')
t2.play()
smiley = loader.loadModel('models/misc/smiley')
from direct.actor import Actor
donald = Actor.Actor()
donald.loadModel("phase_6/models/char/donald-wheel-1000")
donald.loadAnims({"steer":"phase_6/models/char/donald-wheel-wheel"})
donald.reparentTo(render)
seq = Sequence(Func(donald.setPos, 0, 0, 0),
donald.actorInterval('steer', duration=1.0),
donald.posInterval(1, Point3(0, 0, 1)),
Parallel(donald.actorInterval('steer', duration=1.0),
donald.posInterval(1, Point3(0, 0, 0)),
),
Wait(1.0),
Func(base.toggleWireframe),
Wait(1.0),
Parallel(donald.actorInterval('steer', duration=1.0),
donald.posInterval(1, Point3(0, 0, -1)),
Sequence(donald.hprInterval(1, Vec3(180, 0, 0)),
donald.hprInterval(1, Vec3(0, 0, 0)),
),
),
Func(base.toggleWireframe),
Func(messenger.send, 'hello'),
)
"""
|
hj3938/panda3d
|
direct/src/interval/FunctionInterval.py
|
Python
|
bsd-3-clause
| 15,597 | 0.011092 |
from kivy.config import Config
from kivy.config import ConfigParser
import pentai.base.logger as log
import os
def config_instance():
return _config
def create_config_instance(ini_file, user_path):
global _config
ini_path = os.path.join(user_path, ini_file)
if not ini_file in os.listdir(user_path):
log.info("Writing initial ini file %s" % ini_path)
import shutil
shutil.copy(ini_file, ini_path)
else:
log.info("Loading ini file from %s" % ini_path)
_config = ConfigParser()
_config.read(ini_path)
log.info("Updating ini file from %s" % ini_file)
_config.update_config(ini_file)
# Don't need to write it back until something is changed.
return _config
|
cropleyb/pentai
|
pentai/gui/config.py
|
Python
|
mit
| 737 | 0.004071 |
#!/usr/bin/env python3
#
# Copyright (C) 2013 - Tony Chyi <tonychee1989@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
from PyQt5 import QtWidgets, QtCore, QtGui
import json
import urllib.request
import sys
class Timer(QtCore.QThread):
"""Run QTimer in another thread."""
trigger = QtCore.pyqtSignal(int, dict)
def __init__(self, parent=None):
QtCore.QThread.__init__(self, parent)
self.interval = 0
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.tc)
def setup(self, thread_no=1, interval=0):
self.thread_no = thread_no
self.interval = interval
def run(self):
self.timer.start(self.interval)
@QtCore.pyqtSlot()
def tc(self):
try:
val = self.getValue()
self.trigger.emit(self.thread_no, val)
except TypeError as err:
print('\033[31;1mERR:\033[0m', err)
def getValue(self):
"""This is used for get json from specified address."""
url = "https://data.btcchina.com/data/ticker"
try:
p_conn = urllib.request.urlopen(url)
b = p_conn.read()
p_conn.close()
jso = json.loads(b.decode("utf8"))
return jso["ticker"]
except:
return None
class Window(QtWidgets.QWidget):
def __init__(self):
self.TITLE = "BtcChina实时报价"
self.valPrev = 0
QtWidgets.QWidget.__init__(self)
self.setWindowTitle(self.TITLE)
self.move(100, 200)
self.setMinimumSize(500, 500)
self.setMaximumSize(500, 500)
# Get ready for widget
self.label1 = QtWidgets.QLabel("Loading...")
self.label1.setStyleSheet("font-size:50px")
self.label2 = QtWidgets.QLabel("Loading...")
self.label2.setStyleSheet("font-size:12px")
self.label2.setMaximumHeight(60)
self.label2.setMinimumHeight(60)
self.graph = Graphs()
# Set Layout
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(self.label1)
hbox.addStretch(1)
hbox.addWidget(self.label2)
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(self.graph)
self.setLayout(vbox)
# Start Get Data
timer = Timer(self)
timer.trigger.connect(self.setLabel)
timer.setup(interval=10000)
timer.start()
@QtCore.pyqtSlot(int, dict)
def setLabel(self, thread_no, val):
try:
self.label1.setText("¥{0}".format(val["last"]))
self.label2.setText("High:\t¥{0}\nLow:\t¥{1}\nBuy:\t¥{2}\nSell:\t¥{3}".format(val["high"], val["low"], val["buy"], val["sell"]))
self.graph.setPeak(val["high"], val["low"])
self.graph.addPoint(val["last"])
if float(val["last"]) > self.valPrev:
self.label1.setStyleSheet("font-size:50px;color:red") # WOW! Bull market!
elif float(val["last"]) < self.valPrev:
self.label1.setStyleSheet("font-size:50px;color:green") # Damn bear market!
self.setWindowTitle("¥{0}|{1}".format(val["last"], self.TITLE))
self.valPrev = float(val["last"])
except:
pass
class Graphs(QtWidgets.QWidget):
"""A costomized controller, to show graph on the window."""
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.flagFirst = True
self.setMinimumSize(300, 300)
self.recentData = [] # To draw lines, a list is needed
self.max_ = 10000
self.min_ = 0
self.valuePrev = self.height()
self.mousePosit = QtCore.QPoint(0, 0)
self.label1 = QtWidgets.QLabel("10k", self)
self.label1.move(0, self.height() * 0.03)
self.label2 = QtWidgets.QLabel("0", self)
self.label2.move(0, self.height() * 0.83)
self.setStep(10)
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
self.drawGird(event, painter)
self.drawFrame(event, painter)
self.drawMouse(event, painter)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
self.draw(event, painter)
painter.end()
def draw(self, event, painter):
"""Draw data line on widget."""
pen = QtGui.QPen(QtGui.QColor(0, 0, 0), 1, QtCore.Qt.SolidLine)
painter.setPen(pen)
xPrev = self.width() * 0.10
xCur = self.width() * 0.10
for value in self.recentData:
xCur += self.step
painter.drawLine(xPrev, self.valuePrev, xCur, value)
self.valuePrev = value
xPrev = xCur
def drawFrame(self, event, painter):
"""Draw the border of chart."""
painter.setPen(QtGui.QColor(0, 0, 0))
painter.drawRect(self.width() * 0.10, self.height() * 0.05, self.width() * 0.90, self.height() * 0.95)
def drawGird(self, event, painter):
"""Draw gird on chart"""
painter.setPen(QtGui.QColor(192, 192, 192))
for v in range(2, 100):
painter.drawLine(self.width() * 0.05 * v, self.height() * 0.05, self.width() * 0.05 * v, self.height())
for h in range(1, 100):
painter.drawLine(self.width() * 0.10, self.height() * 0.05 * h, self.width(), self.height() * 0.05 * h)
def drawMouse(self, event, painter):
if self.mousePosit in QtCore.QRect(self.width() * 0.1, self.height() * 0.05, self.width() * 0.9, self.height() * 0.95):
painter.setPen(QtGui.QColor(255, 0, 255))
painter.drawLine(self.mousePosit.x(), self.height() * 0.05, self.mousePosit.x(), self.height())
painter.drawLine(self.width() * 0.10, self.mousePosit.y(), self.width(), self.mousePosit.y())
price = float((1 - (self.mousePosit.y() - self.height() * 0.05) / (self.height() * 0.95)) * (self.max_ - self.min_) + self.min_)
painter.setPen(QtGui.QColor(0, 0, 255))
painter.drawText(QtCore.QPoint(self.width() * 0.1, self.mousePosit.y()), format(price, '.2f'))
def addPoint(self, value):
"""Append a data to data list, for drawing lines."""
value = float(value)
valueCur = int((1.0 - (value - self.min_) / (self.max_ - self.min_)) * self.height() * 0.95 + self.height() * 0.05)
self.recentData.append(valueCur)
if len(self.recentData) >= self.posit:
del self.recentData[0] # Del the first data, look like the chart moving.
self.update()
def setPeak(self, max_, min_):
"""Set the max/min value of the chart."""
self.max_ = float(max_)
self.min_ = float(min_)
self.label1.setText(max_)
self.label1.adjustSize()
self.label2.setText(min_)
self.label2.adjustSize()
self.update()
def setStep(self, step):
"""Set the length of X to a line."""
step = int(step)
self.step = step
self.posit = len(range(int(self.width() * 0.10), int(self.width() * 0.75), step))
def mouseMoveEvent(self, event):
self.mousePosit = event.pos()
self.update()
def main():
app = QtWidgets.QApplication(sys.argv)
win = Window()
win.show()
app.exec_()
if __name__ == "__main__":
main()
|
tonychee7000/BtcChinaRT
|
btc.py
|
Python
|
gpl-3.0
| 7,964 | 0.002392 |
from json import dumps # pragma: no cover
from sqlalchemy.orm import class_mapper # pragma: no cover
from app.models import User, Group # pragma: no cover
def serialize(obj, columns):
# then we return their values in a dict
return dict((c, getattr(obj, c)) for c in columns)
def queryAllToJson(model,conditions):
# we can then use this for your particular example
columns = [c.key for c in class_mapper(model).columns]
serialized_objs = [
serialize(obj,columns)
for obj in model.query.filter_by(**conditions)
]
return dumps(serialized_objs)
def objectToJson(obj):
columns = [c.key for c in class_mapper(obj.__class__).columns]
serialized_obj = serialize(obj, columns)
return dumps(serialized_obj)
def getUserId(username):
user = User.query.filter_by(username=username).first()
if user is None:
raise Exception('username %s not found in database' % username)
else:
return user.id
def getGroupId(groupname):
group = Group.query.filter_by(groupname=groupname).first()
if group is None:
raise Exception('groupname %s not found in database' % groupname)
else:
return group.id
|
omarayad1/cantkeepup
|
app/core/helpers.py
|
Python
|
mit
| 1,103 | 0.029918 |
class ocho:
def __init__(self):
self.cadena=''
def getString(self):
self.cadena = raw_input("Your desires are orders to me: ")
def printString(self):
print "Here's your sentence: {cadena}".format(cadena=self.cadena)
oct = ocho()
oct.getString()
oct.printString()
|
dcabalas/UNI
|
SN/Python/ocho.py
|
Python
|
gpl-3.0
| 301 | 0.013289 |
from django.urls import reverse
from oppia.test import OppiaTestCase
class CompletionRatesViewTest(OppiaTestCase):
fixtures = ['tests/test_user.json',
'tests/test_oppia.json',
'tests/test_quiz.json',
'tests/test_permissions.json',
'tests/test_cohort.json',
'tests/test_course_permissions.json',
'tests/test_usercoursesummary.json']
def setUp(self):
super(CompletionRatesViewTest, self).setUp()
self.allowed_users = [self.admin_user, self.staff_user]
self.disallowed_users = [self.teacher_user, self.normal_user]
def test_view_completion_rates(self):
template = 'reports/completion_rates.html'
url = reverse('reports:completion_rates')
for allowed_user in self.allowed_users:
self.client.force_login(user=allowed_user)
response = self.client.get(url)
self.assertTemplateUsed(response, template)
self.assertEqual(response.status_code, 200)
for disallowed_user in self.disallowed_users:
self.client.force_login(user=disallowed_user)
response = self.client.get(url)
self.assertRedirects(response,
'/admin/login/?next=' + url,
302,
200)
|
DigitalCampus/django-oppia
|
tests/reports/views/test_completion_rates.py
|
Python
|
gpl-3.0
| 1,385 | 0 |
import logging
log = logging.getLogger(__name__)
try:
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU, LeakyReLU
from keras.optimizers import Adagrad, Adadelta, RMSprop, Adam
from keras.layers.core import Dense
from keras.utils import to_categorical
import_keras = True
except:
import_keras = False
log.info('could not import keras. Neural networks will not be used')
def keras_create_model(params, problem_type):
# creates a neural net model with params definition
log.info('creating NN structure')
model = Sequential()
for l in range(int(params['number_layers'])):
if l == 0:
model.add(Dense(units=params['units'], input_dim=params['input_dim']))
else:
model.add(Dense(units=params['units']))
model.add(Activation(params['activation']))
if params['batch_normalization']:
model.add(BatchNormalization())
model.add(Dropout(params['dropout']))
model.add(Dense(params['output_dim']))
if problem_type == 'classification':
model.add(Activation('sigmoid'))
keras_compile_model(model, params, problem_type)
return model
def keras_compile_model(model, params, problem_type):
# compile the model (usefull to reset weights also)
log.info('compiling NN model')
if params['optimizer'] == 'Adagrad':
optimizer = Adagrad(lr=params['learning_rate'])
elif params['optimizer'] == 'Adadelta':
optimizer = Adadelta(lr=params['learning_rate'])
elif params['optimizer'] == 'Adam':
optimizer = Adam(lr=params['learning_rate'])
else:
optimizer = RMSprop(lr=params['learning_rate'])
if problem_type == 'regression':
loss = 'mse'
elif params['output_dim'] == 2:
loss = 'binary_crossentropy'
else:
loss = 'categorical_crossentropy'
model.compile(loss=loss, optimizer=optimizer)
|
pierre-chaville/automlk
|
automlk/utils/keras_wrapper.py
|
Python
|
mit
| 2,073 | 0.00193 |
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: route53
:platform: Unix
:synopsis: Module contains a useful Route53 class.
.. version:: @VERSION@
.. author:: Kevin Glisson (kglisson@netflix.com), Patrick Kelley (patrick@netflix.com) @monkeysecurity
"""
import os
import re
import boto
import boto.route53.record
from security_monkey import app
class Route53Service(object):
"""
Class provides useful functions of manipulating Route53 records
"""
def __init__(self, **kwargs):
super(Route53Service, self).__init__(**kwargs)
self.conn = boto.connect_route53()
try:
self.hostname = os.environ['EC2_PUBLIC_HOSTNAME']
except KeyError:
app.logger.warn("We cannot register a domain on non ec2 instances")
def register(self, fqdn, exclusive=False, ttl=60, type='CNAME', regions=None):
fqdn = fqdn.replace('_', '-')
fqdn = re.sub(r'[^\w\-\.]', '', fqdn)
app.logger.debug('route53: register fqdn: {}, hostname: {}'.format(fqdn, self.hostname))
zone_id = self._get_zone_id(fqdn)
if exclusive:
app.logger.debug('route53: making fqdn: {} exclusive'.format(fqdn))
rrsets = self.conn.get_all_rrsets(zone_id, type, name=fqdn)
for rrset in rrsets:
if rrset.name == fqdn + '.':
app.logger.debug('found fqdn to delete: {}'.format(rrset))
for rr in rrset.resource_records:
changes = boto.route53.record.ResourceRecordSets(self.conn, zone_id)
changes.add_change("DELETE", fqdn, type, ttl).add_value(rr)
changes.commit()
changes = boto.route53.record.ResourceRecordSets(self.conn, zone_id)
changes.add_change("CREATE", fqdn, type, ttl).add_value(self.hostname)
changes.commit()
def unregister(self, fqdn, ttl=60, type='CNAME'):
# Unregister this fqdn
fqdn = fqdn.replace('_', '-')
fqdn = re.sub(r'[^\w\-\.]', '', fqdn)
app.logger.debug('route53: unregister fqdn: {}, hostname: {}'.format(fqdn, self.hostname))
zone_id = self._get_zone_id(fqdn)
changes = boto.route53.record.ResourceRecordSets(self.conn, zone_id)
changes.add_change("DELETE", fqdn, type, ttl).add_value(self.hostname)
changes.commit()
def _get_zone_id(self, domain):
if domain[-1] != '.':
domain += '.'
result = self.conn.get_all_hosted_zones()
hosted_zones = result['ListHostedZonesResponse']['HostedZones']
while domain != '.':
for zone in hosted_zones:
app.logger.debug("{} {}".format(zone['Name'], domain))
if zone['Name'] == domain:
return zone['Id'].replace('/hostedzone/', '')
else:
domain = domain[domain.find('.') + 1:]
raise ZoneIDNotFound(domain)
|
Dklotz-Circle/security_monkey
|
security_monkey/common/route53.py
|
Python
|
apache-2.0
| 3,538 | 0.001696 |
from __future__ import absolute_import, unicode_literals
import json
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailadmin.widgets import AdminChooser
class AdminSnippetChooser(AdminChooser):
target_content_type = None
def __init__(self, content_type=None, **kwargs):
if 'snippet_type_name' in kwargs:
snippet_type_name = kwargs.pop('snippet_type_name')
self.choose_one_text = _('Choose %s') % snippet_type_name
self.choose_another_text = _('Choose another %s') % snippet_type_name
super(AdminSnippetChooser, self).__init__(**kwargs)
if content_type is not None:
self.target_content_type = content_type
def render_html(self, name, value, attrs):
original_field_html = super(AdminSnippetChooser, self).render_html(name, value, attrs)
model_class = self.target_content_type.model_class()
instance = self.get_instance(model_class, value)
return render_to_string("wagtailsnippets/widgets/snippet_chooser.html", {
'widget': self,
'original_field_html': original_field_html,
'attrs': attrs,
'value': value,
'item': instance,
})
def render_js_init(self, id_, name, value):
content_type = self.target_content_type
return "createSnippetChooser({id}, {content_type});".format(
id=json.dumps(id_),
content_type=json.dumps('{app}/{model}'.format(
app=content_type.app_label,
model=content_type.model)))
|
chimeno/wagtail
|
wagtail/wagtailsnippets/widgets.py
|
Python
|
bsd-3-clause
| 1,643 | 0.001826 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#--------------------------------------------------------------------------------------------------
# Program Name: holy_orders
# Program Description: Update program for the Abbot Cantus API server.
#
# Filename: holy_orders/current.py
# Purpose: Functions to determine which resources to update.
#
# Copyright (C) 2015, 2016 Christopher Antila
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------------------------------------------------------
'''
Functions to determine which resources to update.
'''
import datetime
import logging
import tornado.log
import iso8601
# settings
LOG_LEVEL = logging.DEBUG
# script-level "globals"
_log = tornado.log.app_log
def _now_wrapper():
'''
A wrapper function for datetime.datetime.utcnow() that can be mocked for automated tests.
'''
return datetime.datetime.now(datetime.timezone.utc)
def get_last_updated(updates_db, rtype):
'''
Get a :class:`datetime` of the most recent update for a resource type.
:param updates_db: A :class:`Connection` to the database that holds
:type updates_db: :class:`sqlite3.Connection`
:param str rtype: The resource type to check.
:returns: The time of the most recent update for the resource type.
:rtype: :class:`datetime.datetime`
If the database's most recent update is recorded as ``'never'``, meaning the resource type was
never updated, the :class:`datetime` returned corresponds to Unix time ``0``.
'''
last_update = updates_db.cursor().execute('SELECT updated FROM rtypes WHERE name=?', (rtype,))
last_update = last_update.fetchone()[0]
if last_update == 'never':
return datetime.datetime.fromtimestamp(0.0)
else:
return iso8601.parse_date(last_update)
def should_update(rtype, config, updates_db):
'''
Check whether HolyOrders "should update" resources of a particular type.
:param str rtype: The resource type to check.
:param config: Dictionary of the configuration file that has our data.
:type config: :class:`configparser.ConfigParser`
:param updates_db: A :class:`Connection` to the database that holds
:type updates_db: :class:`sqlite3.Connection`
:returns: Whether the resource type should be updated.
:rtype: bool
'''
last_update = get_last_updated(updates_db, rtype)
if last_update.year < 1990:
_log.info('should_update({0}) -> True (first update)'.format(rtype))
return True
late_update_delta = _now_wrapper() - last_update
update_freq_delta = config['update_frequency'][rtype]
if update_freq_delta.endswith('d'):
update_freq_delta = datetime.timedelta(days=int(update_freq_delta[:-1]))
else:
update_freq_delta = datetime.timedelta(hours=int(update_freq_delta[:-1]))
if late_update_delta >= update_freq_delta:
_log.info('should_update({0}) -> True'.format(rtype))
return True
else:
_log.info('should_update({0}) -> False'.format(rtype))
return False
def calculate_chant_updates(updates_db):
'''
Determine which dates should be requested for updates of "chant" resources.
:param updates_db: A :class:`Connection` to the database that holds
:type updates_db: :class:`sqlite3.Connection`
:returns: The dates that require an update. These are formatted as YYYYMMD, so they may be used
directly in Drupal URLs.
:rtype: list of str
If no updates are required, the function returns an empty list. To ensure no updates are missed,
this function always includes one additional day than required. For example, if the most recent
update was earlier today, then this function requests updates for both today and yesterday.
However, also note that "days ago" is determined in 24-hour periods, rather than the "yesterday"
style of thinking that humans use. The actual dates requested aren't especially important---it's
enough to know that this function errs on the side of requesting more days than required.
'''
post = []
last_update = get_last_updated(updates_db, 'chant')
delta = _now_wrapper() - last_update
if delta.total_seconds() >= 0:
days_to_request = delta.days + 2
one_day = datetime.timedelta(days=1)
cursor = _now_wrapper()
for _ in range(days_to_request):
post.append(cursor.strftime('%Y%m%d'))
cursor -= one_day
_log.info('Requesting chant updates for {}'.format(post))
return post
def update_db(updates_db, rtype, time):
'''
Revise the updates database to show a new "last updated" time for a resource type.
:param updates_db: A :class:`Connection` to the database that holds
:type updates_db: :class:`sqlite3.Connection`
:param str rtype: The resource type that was updated.
:param time: The time at which the resource type is current.
:type time: :class:`datetime.datetime`
While it's tempting to think that the ``time`` argument should correspond to the moment this
function is called, that's not true---especially for resource types that take considerable time
to update (chants). Therefore the :class:`datetime` given to this function should correspond
to the moment just before data are requested from Drupal.
'''
time = time.isoformat()
updates_db.cursor().execute('UPDATE rtypes SET updated=? WHERE name=?;', (time, rtype))
updates_db.commit()
|
CANTUS-Project/abbot
|
holy_orders/current.py
|
Python
|
gpl-3.0
| 6,147 | 0.003579 |
from __future__ import print_function, division, absolute_import
import difflib
import locale
import os
import pprint
import six
import sys
import tempfile
try:
import unittest2 as unittest
except ImportError:
import unittest
# just log py.warnings (and pygtk warnings in particular)
import logging
try:
# 2.7+
logging.captureWarnings(True)
except AttributeError:
pass
from mock import Mock, MagicMock, NonCallableMock, patch, mock_open
from contextlib import contextmanager
from . import stubs
import subscription_manager.injection as inj
import subscription_manager.managercli
from rhsmlib.services import config
# use instead of the normal pid file based ActionLock
from threading import RLock
if six.PY2:
OPEN_FUNCTION = '__builtin__.open'
else:
OPEN_FUNCTION = 'builtins.open'
@contextmanager
def open_mock(content=None, **kwargs):
content_out = six.StringIO()
m = mock_open(read_data=content)
with patch(OPEN_FUNCTION, m, create=True, **kwargs) as mo:
stream = six.StringIO(content)
rv = mo.return_value
rv.write = lambda x: content_out.write(x)
rv.content_out = lambda: content_out.getvalue()
rv.__iter__ = lambda x: iter(stream.readlines())
yield rv
@contextmanager
def open_mock_many(file_content_map=None, **kwargs):
"""
Mock out access to one or many files opened using the builtin "open".
:param file_content_map: A dictionary of path : file_contents
:type file_content_map: dict[str,str]
:param kwargs:
:return:
"""
file_content_map = file_content_map or {}
for key, value in file_content_map.items():
file_content_map[key] = (mock_open(read_data=value), value, six.StringIO())
def get_file(path, *args, **kwargs):
"""
The side effect that will allow us to "open" the right "file".
Not for use outside open_mock_many.
:param path: The path which is passed in to the built
:param args:
:param kwargs:
:return:
"""
try:
rv, file_contents, content_out = file_content_map[path]
except KeyError:
if six.PY2:
raise IOError(2, 'No such file or directory')
else:
raise OSError(2, 'No such file or directory')
rv = rv.return_value
rv.write = lambda x: content_out.write(x)
rv.content_out = lambda: content_out.getvalue()
return rv
with patch(OPEN_FUNCTION, **kwargs) as mo:
mo.side_effect = get_file
yield mo
@contextmanager
def temp_file(content, *args, **kwargs):
try:
kwargs['delete'] = False
kwargs.setdefault('prefix', 'sub-man-test')
fh = tempfile.NamedTemporaryFile(mode='w+', *args, **kwargs)
fh.write(content)
fh.close()
yield fh.name
finally:
os.unlink(fh.name)
@contextmanager
def locale_context(new_locale, category=None):
old_category = category or locale.LC_CTYPE
old_locale = locale.getlocale(old_category)
category = category or locale.LC_ALL
locale.setlocale(category, new_locale)
try:
yield
finally:
locale.setlocale(category, old_locale)
class FakeLogger(object):
def __init__(self):
self.expected_msg = ""
self.msg = None
self.logged_exception = None
def debug(self, buf, *args, **kwargs):
self.msg = buf
def error(self, buf, *args, **kwargs):
self.msg = buf
def exception(self, e, *args, **kwargs):
self.logged_exception = e
def set_expected_msg(self, msg):
self.expected_msg = msg
def info(self, buf, *args, **kwargs):
self.msg = buf
def warning(self, buf, *args, **kwargs):
self.msg = buf
class FakeException(Exception):
def __init__(self, msg=None):
self.msg = msg
def __str__(self):
return repr(self.msg)
class Matcher(object):
@staticmethod
def set_eq(first, second):
"""Useful for dealing with sets that have been cast to or instantiated as lists."""
return set(first) == set(second)
def __init__(self, compare, some_obj):
self.compare = compare
self.some_obj = some_obj
def __eq__(self, other):
return self.compare(self.some_obj, other)
class SubManFixture(unittest.TestCase):
def set_facts(self):
"""Override if you need to set facts for a test."""
return {"mock.facts": "true"}
"""
Can be extended by any subscription manager test case to make
sure nothing on the actual system is read/touched, and appropriate
mocks/stubs are in place.
"""
def setUp(self):
# No matter what, stop all patching (even if we have a failure in setUp itself)
self.addCleanup(patch.stopall)
# Never attempt to use the actual managercli.cfg which points to a
# real file in etc.
self.mock_cfg_parser = stubs.StubConfig()
original_conf = subscription_manager.managercli.conf
def unstub_conf():
subscription_manager.managercli.conf = original_conf
# Mock makes it damn near impossible to mock a module attribute (which we shouldn't be using
# in the first place because it's terrible) so we monkey-patch it ourselves.
# TODO Fix this idiocy by not reading the damn config on module import
subscription_manager.managercli.conf = config.Config(self.mock_cfg_parser)
self.addCleanup(unstub_conf)
facts_host_patcher = patch('rhsmlib.dbus.facts.FactsClient', auto_spec=True)
self.mock_facts_host = facts_host_patcher.start()
self.mock_facts_host.return_value.GetFacts.return_value = self.set_facts()
# By default mock that we are registered. Individual test cases
# can override if they are testing disconnected scenario.
id_mock = NonCallableMock(name='FixtureIdentityMock')
id_mock.exists_and_valid = Mock(return_value=True)
id_mock.uuid = 'fixture_identity_mock_uuid'
id_mock.name = 'fixture_identity_mock_name'
id_mock.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
id_mock.keypath.return_value = "/not/a/real/key/path"
id_mock.certpath.return_value = "/not/a/real/cert/path"
# Don't really care about date ranges here:
self.mock_calc = NonCallableMock()
self.mock_calc.calculate.return_value = None
# Avoid trying to read real /etc/yum.repos.d/redhat.repo
self.mock_repofile_path_exists_patcher = patch('subscription_manager.repolib.YumRepoFile.path_exists')
mock_repofile_path_exists = self.mock_repofile_path_exists_patcher.start()
mock_repofile_path_exists.return_value = True
inj.provide(inj.IDENTITY, id_mock)
inj.provide(inj.PRODUCT_DATE_RANGE_CALCULATOR, self.mock_calc)
inj.provide(inj.ENTITLEMENT_STATUS_CACHE, stubs.StubEntitlementStatusCache())
inj.provide(inj.POOL_STATUS_CACHE, stubs.StubPoolStatusCache())
inj.provide(inj.PROD_STATUS_CACHE, stubs.StubProductStatusCache())
inj.provide(inj.CONTENT_ACCESS_MODE_CACHE, stubs.StubContentAccessModeCache())
inj.provide(inj.SUPPORTED_RESOURCES_CACHE, stubs.StubSupportedResourcesCache())
inj.provide(inj.SYSPURPOSE_VALID_FIELDS_CACHE, stubs.StubSyspurposeValidFieldsCache())
inj.provide(inj.CURRENT_OWNER_CACHE, stubs.StubCurrentOwnerCache)
inj.provide(inj.OVERRIDE_STATUS_CACHE, stubs.StubOverrideStatusCache())
inj.provide(inj.RELEASE_STATUS_CACHE, stubs.StubReleaseStatusCache())
inj.provide(inj.AVAILABLE_ENTITLEMENT_CACHE, stubs.StubAvailableEntitlementsCache())
inj.provide(inj.PROFILE_MANAGER, stubs.StubProfileManager())
# By default set up an empty stub entitlement and product dir.
# Tests need to modify or create their own but nothing should hit
# the system.
self.ent_dir = stubs.StubEntitlementDirectory()
inj.provide(inj.ENT_DIR, self.ent_dir)
self.prod_dir = stubs.StubProductDirectory()
inj.provide(inj.PROD_DIR, self.prod_dir)
# Installed products manager needs PROD_DIR injected first
inj.provide(inj.INSTALLED_PRODUCTS_MANAGER, stubs.StubInstalledProductsManager())
self.stub_cp_provider = stubs.StubCPProvider()
self._release_versions = []
self.stub_cp_provider.content_connection.get_versions = self._get_release_versions
inj.provide(inj.CP_PROVIDER, self.stub_cp_provider)
inj.provide(inj.CERT_SORTER, stubs.StubCertSorter())
# setup and mock the plugin_manager
plugin_manager_mock = MagicMock(name='FixturePluginManagerMock')
plugin_manager_mock.runiter.return_value = iter([])
inj.provide(inj.PLUGIN_MANAGER, plugin_manager_mock)
inj.provide(inj.DBUS_IFACE, Mock(name='FixtureDbusIfaceMock'))
pooltype_cache = Mock()
inj.provide(inj.POOLTYPE_CACHE, pooltype_cache)
# don't use file based locks for tests
inj.provide(inj.ACTION_LOCK, RLock)
self.stub_facts = stubs.StubFacts()
inj.provide(inj.FACTS, self.stub_facts)
content_access_cache_mock = MagicMock(name='ContentAccessCacheMock')
inj.provide(inj.CONTENT_ACCESS_CACHE, content_access_cache_mock)
self.dbus_patcher = patch('subscription_manager.managercli.CliCommand._request_validity_check')
self.dbus_patcher.start()
# No tests should be trying to connect to any configure or test server
# so really, everything needs this mock. May need to be in __init__, or
# better, all test classes need to use SubManFixture
self.is_valid_server_patcher = patch("subscription_manager.managercli.is_valid_server_info")
is_valid_server_mock = self.is_valid_server_patcher.start()
is_valid_server_mock.return_value = True
# No tests should be trying to test the proxy connection
# so really, everything needs this mock. May need to be in __init__, or
# better, all test classes need to use SubManFixture
self.test_proxy_connection_patcher = patch("subscription_manager.managercli.CliCommand.test_proxy_connection")
test_proxy_connection_mock = self.test_proxy_connection_patcher.start()
test_proxy_connection_mock.return_value = True
self.syncedstore_patcher = patch('subscription_manager.syspurposelib.SyncedStore')
syncedstore_mock = self.syncedstore_patcher.start()
set_up_mock_sp_store(syncedstore_mock)
self.files_to_cleanup = []
def tearDown(self):
if not hasattr(self, 'files_to_cleanup'):
return
for f in self.files_to_cleanup:
# Assuming these are tempfile.NamedTemporaryFile, created with
# the write_tempfile() method in this class.
f.close()
def write_tempfile(self, contents):
"""
Write out a tempfile and append it to the list of those to be
cleaned up in tearDown.
"""
fid = tempfile.NamedTemporaryFile(mode='w+', suffix='.tmp')
fid.write(contents)
fid.seek(0)
self.files_to_cleanup.append(fid)
return fid
def set_consumer_auth_cp(self, consumer_auth_cp):
cp_provider = inj.require(inj.CP_PROVIDER)
cp_provider.consumer_auth_cp = consumer_auth_cp
def get_consumer_cp(self):
cp_provider = inj.require(inj.CP_PROVIDER)
consumer_cp = cp_provider.get_consumer_auth_cp()
return consumer_cp
# The ContentConnection used for reading release versions from
# the cdn. The injected one uses this.
def _get_release_versions(self, listing_path):
return self._release_versions
# For changing injection consumer id to one that fails "is_valid"
def _inject_mock_valid_consumer(self, uuid=None):
"""For changing injected consumer identity to one that passes is_valid()
Returns the injected identity if it need to be examined.
"""
identity = NonCallableMock(name='ValidIdentityMock')
identity.uuid = uuid or "VALIDCONSUMERUUID"
identity.is_valid = Mock(return_value=True)
identity.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
inj.provide(inj.IDENTITY, identity)
return identity
def _inject_mock_invalid_consumer(self, uuid=None):
"""For chaining injected consumer identity to one that fails is_valid()
Returns the injected identity if it need to be examined.
"""
invalid_identity = NonCallableMock(name='InvalidIdentityMock')
invalid_identity.is_valid = Mock(return_value=False)
invalid_identity.uuid = uuid or "INVALIDCONSUMERUUID"
invalid_identity.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
inj.provide(inj.IDENTITY, invalid_identity)
return invalid_identity
# use our naming convention here to make it clear
# this is our extension. Note that python 2.7 adds a
# assertMultilineEquals that assertEqual of strings does
# automatically
def assert_string_equals(self, expected_str, actual_str, msg=None):
if expected_str != actual_str:
expected_lines = expected_str.splitlines(True)
actual_lines = actual_str.splitlines(True)
delta = difflib.unified_diff(expected_lines, actual_lines, "expected", "actual")
message = ''.join(delta)
if msg:
message += " : " + msg
self.fail("Multi-line strings are unequal:\n" + message)
def assert_equal_dict(self, expected_dict, actual_dict):
mismatches = []
missing_keys = []
extra = []
for key in expected_dict:
if key not in actual_dict:
missing_keys.append(key)
continue
if expected_dict[key] != actual_dict[key]:
mismatches.append((key, expected_dict[key], actual_dict[key]))
for key in actual_dict:
if key not in expected_dict:
extra.append(key)
message = ""
if missing_keys or extra:
message += "Keys in only one dict: \n"
if missing_keys:
for key in missing_keys:
message += "actual_dict: %s\n" % key
if extra:
for key in extra:
message += "expected_dict: %s\n" % key
if mismatches:
message += "Unequal values: \n"
for info in mismatches:
message += "%s: %s != %s\n" % info
# pprint the dicts
message += "\n"
message += "expected_dict:\n"
message += pprint.pformat(expected_dict)
message += "\n"
message += "actual_dict:\n"
message += pprint.pformat(actual_dict)
if mismatches or missing_keys or extra:
self.fail(message)
def assert_items_equals(self, a, b):
"""Assert that two lists contain the same items regardless of order."""
if sorted(a, key=lambda item: str(item)) != sorted(b, key=lambda item: str(item)):
self.fail("%s != %s" % (a, b))
return True
class Capture(object):
class Tee(object):
def __init__(self, stream, silent):
self.buf = six.StringIO()
self.stream = stream
self.silent = silent
def write(self, data):
self.buf.write(data)
if not self.silent:
self.stream.write(data)
def flush(self):
pass
def getvalue(self):
return self.buf.getvalue()
def isatty(self):
return False
def __init__(self, silent=False):
self.silent = silent
def __enter__(self):
self.buffs = (self.Tee(sys.stdout, self.silent), self.Tee(sys.stderr, self.silent))
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout, sys.stderr = self.buffs
return self
@property
def out(self):
return self.buffs[0].getvalue()
@property
def err(self):
return self.buffs[1].getvalue()
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.stdout
sys.stderr = self.stderr
def set_up_mock_sp_store(mock_sp_store):
"""
Sets up the mock syspurpose store with methods that are mock versions of the real deal.
Allows us to test in the absence of the syspurpose module.
This documents the essential expected behaviour of the methods subman relies upon
from the syspurpose codebase.
:return:
"""
contents = {}
mock_sp_store_contents = contents
def set(item, value):
contents[item] = value
def read(path, raise_on_error=False):
return mock_sp_store
def unset(item):
contents[item] = None
def add(item, value):
current = contents.get(item, [])
if value not in current:
current.append(value)
contents[item] = current
def remove(item, value):
current = contents.get(item)
if current is not None and isinstance(current, list) and value in current:
current.remove(value)
def get_local_contents():
return contents
def get_cached_contents():
return contents
def update_local(data):
global contents
contents = data
mock_sp_store.return_value.set = Mock(side_effect=set)
mock_sp_store.return_value.read = Mock(side_effect=read)
mock_sp_store.return_value.unset = Mock(side_effect=unset)
mock_sp_store.return_value.add = Mock(side_effect=add)
mock_sp_store.return_value.remove = Mock(side_effect=remove)
mock_sp_store.return_value.local_contents = mock_sp_store_contents
mock_sp_store.return_value.get_local_contents = Mock(side_effect=get_local_contents)
mock_sp_store.return_value.update_local = Mock(side_effect=update_local)
mock_sp_store.return_value.get_cached_contents = Mock(side_effect=get_cached_contents)
return mock_sp_store, mock_sp_store_contents
|
Lorquas/subscription-manager
|
test/fixture.py
|
Python
|
gpl-2.0
| 18,129 | 0.001655 |
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce.core.parameters import Parameter
class Random(Parameter):
"""
Random hyperparameter (specification key: `random`).
Args:
distribution ("normal" | "uniform"): Distribution type for random hyperparameter value
(<span style="color:#C00000"><b>required</b></span>).
kwargs: Additional arguments dependent on distribution type.<br>
Normal distribution:
<ul>
<li><b>mean</b> (<i>float</i>) – Mean
(<span style="color:#00C000"><b>default</b></span>: 0.0).</li>
<li><b>stddev</b> (<i>float > 0.0</i>) – Standard deviation
(<span style="color:#00C000"><b>default</b></span>: 1.0).</li>
</ul>
Uniform distribution:
<ul>
<li><b>minval</b> (<i>int / float</i>) – Lower bound
(<span style="color:#00C000"><b>default</b></span>: 0 / 0.0).</li>
<li><b>maxval</b> (<i>float > minval</i>) – Upper bound
(<span style="color:#00C000"><b>default</b></span>: 1.0 for float,
<span style="color:#C00000"><b>required</b></span> for int).</li>
</ul>
name (string): <span style="color:#0000C0"><b>internal use</b></span>.
dtype (type): <span style="color:#0000C0"><b>internal use</b></span>.
shape (iter[int > 0]): <span style="color:#0000C0"><b>internal use</b></span>.
min_value (dtype-compatible value): <span style="color:#0000C0"><b>internal use</b></span>.
max_value (dtype-compatible value): <span style="color:#0000C0"><b>internal use</b></span>.
"""
def __init__(
self, *, distribution, name=None, dtype=None, shape=(), min_value=None, max_value=None,
**kwargs
):
assert dtype in ('int', 'float')
assert distribution in ('normal', 'uniform')
self.distribution = distribution
self.kwargs = kwargs
super().__init__(
name=name, dtype=dtype, shape=shape, min_value=min_value, max_value=max_value
)
def min_value(self):
if self.distribution == 'uniform':
return self.spec.py_type()(self.kwargs.get('minval', 0))
else:
return super().min_value()
def max_value(self):
if self.distribution == 'uniform':
return self.spec.py_type()(self.kwargs.get('maxval', 1.0))
else:
return super().max_value()
def final_value(self):
if self.distribution == 'normal':
return self.spec.py_type()(self.kwargs.get('mean', 0.0))
elif self.distribution == 'uniform':
return self.spec.py_type()(
(self.kwargs.get('maxval', 1.0) + self.kwargs.get('minval', 0.0)) / 2.0
)
else:
return super().final_value()
def parameter_value(self, *, step):
if self.distribution == 'normal':
parameter = tf.random.normal(
shape=self.spec.shape, dtype=self.spec.tf_type(), mean=self.kwargs.get('mean', 0.0),
stddev=self.kwargs.get('stddev', 1.0)
)
elif self.distribution == 'uniform':
parameter = tf.random.uniform(
shape=self.spec.shape, dtype=self.spec.tf_type(),
minval=self.kwargs.get('minval', 0), maxval=self.kwargs.get('maxval')
)
return parameter
|
reinforceio/tensorforce
|
tensorforce/core/parameters/random.py
|
Python
|
apache-2.0
| 4,125 | 0.002182 |
from distutils.core import setup
setup(
name = 'ml_easy_peer_grade',
packages = ['ml_easy_peer_grade'],
version = '0.18',
scripts=['bin/ml_easy_peer_grade'],
description = 'Ez peer grade your project members, exclusive to privileged Bilkent students',
author = 'Cuklahan Dorum',
author_email = 'badass@alumni.bilkent.edu.tr',
url = 'https://github.com/cagdass/ml-easy-peer-grade',
download_url = 'https://github.com/cagdass/ml-easy-peer-grade/tarball/0.1',
keywords = ['testing'],
classifiers = [],
)
|
cagdass/ml-easy-peer-grade
|
setup.py
|
Python
|
gpl-3.0
| 525 | 0.04381 |
import logging
from ...util import none_or
from ..errors import MalformedResponse
from .collection import Collection
logger = logging.getLogger("mw.api.collections.revisions")
class Revisions(Collection):
"""
A collection of revisions indexes by title, page_id and user_text.
Note that revisions of deleted pages are queriable via
:class:`mw.api.DeletedRevs`.
"""
PROPERTIES = {'ids', 'flags', 'timestamp', 'user', 'userid', 'size',
'sha1', 'contentmodel', 'comment', 'parsedcomment',
'content', 'tags', 'flagged'}
DIFF_TO = {'prev', 'next', 'cur'}
# This is *not* the right way to do this, but it should work for all queries.
MAX_REVISIONS = 50
def get(self, rev_id, **kwargs):
"""
Get a single revision based on it's ID. Throws a :py:class:`KeyError`
if the rev_id cannot be found.
:Parameters:
rev_id : int
Revision ID
``**kwargs``
Passed to :py:meth:`query`
:Returns:
A single rev dict
"""
rev_id = int(rev_id)
revs = list(self.query(revids={rev_id}, **kwargs))
if len(revs) < 1:
raise KeyError(rev_id)
else:
return revs[0]
def query(self, *args, limit=None, **kwargs):
"""
Get revision information.
See `<https://www.mediawiki.org/wiki/API:Properties#revisions_.2F_rv>`_
:Parameters:
properties : set(str)
Which properties to get for each revision:
* ids - The ID of the revision
* flags - Revision flags (minor)
* timestamp - The timestamp of the revision
* user - User that made the revision
* userid - User id of revision creator
* size - Length (bytes) of the revision
* sha1 - SHA-1 (base 16) of the revision
* contentmodel - Content model id
* comment - Comment by the user for revision
* parsedcomment - Parsed comment by the user for the revision
* content - Text of the revision
* tags - Tags for the revision
limit : int
Limit how many revisions will be returned
No more than 500 (5000 for bots) allowed
start_id : int
From which revision id to start enumeration (enum)
end_id : int
Stop revision enumeration on this revid
start : :class:`mw.Timestamp`
From which revision timestamp to start enumeration (enum)
end : :class:`mw.Timestamp`
Enumerate up to this timestamp
direction : str
"newer" or "older"
user : str
Only include revisions made by user_text
excludeuser : bool
Exclude revisions made by user
tag : str
Only list revisions tagged with this tag
expandtemplates : bool
Expand templates in revision content (requires "content" propery)
generatexml : bool
Generate XML parse tree for revision content (requires "content" propery)
parse : bool
Parse revision content (requires "content" propery)
section : int
Only retrieve the content of this section number
token : set(str)
Which tokens to obtain for each revision
* rollback - See `<https://www.mediawiki.org/wiki/API:Edit_-_Rollback#Token>`_
rvcontinue : str
When more results are available, use this to continue
diffto : int
Revision ID to diff each revision to. Use "prev", "next" and
"cur" for the previous, next and current revision respectively
difftotext : str
Text to diff each revision to. Only diffs a limited number of
revisions. Overrides diffto. If section is set, only that
section will be diffed against this text
contentformat : str
Serialization format used for difftotext and expected for output of content
* text/x-wiki
* text/javascript
* text/css
* text/plain
* application/json
:Returns:
An iterator of rev dicts returned from the API.
"""
revisions_yielded = 0
done = False
while not done:
if limit == None:
kwargs['limit'] = self.MAX_REVISIONS
else:
kwargs['limit'] = min(limit - revisions_yielded, self.MAX_REVISIONS)
rev_docs, rvcontinue = self._query(*args, **kwargs)
for doc in rev_docs:
yield doc
revisions_yielded += 1
if limit != None and revisions_yielded >= limit:
done = True
break
if rvcontinue != None and len(rev_docs) > 0:
kwargs['rvcontinue'] = rvcontinue
else:
done = True
def _query(self, revids=None, titles=None, pageids=None, properties=None,
limit=None, start_id=None, end_id=None, start=None,
end=None, direction=None, user=None, excludeuser=None,
tag=None, expandtemplates=None, generatexml=None,
parse=None, section=None, token=None, rvcontinue=None,
diffto=None, difftotext=None, contentformat=None):
params = {
'action': "query",
'prop': "revisions"
}
params['revids'] = self._items(revids, type=int)
params['titles'] = self._items(titles)
params['pageids'] = self._items(pageids, type=int)
params['rvprop'] = self._items(properties, levels=self.PROPERTIES)
if revids == None: # Can't have a limit unless revids is none
params['rvlimit'] = none_or(limit, int)
params['rvstartid'] = none_or(start_id, int)
params['rvendid'] = none_or(end_id, int)
params['rvstart'] = self._check_timestamp(start)
params['rvend'] = self._check_timestamp(end)
params['rvdir'] = self._check_direction(direction)
params['rvuser'] = none_or(user, str)
params['rvexcludeuser'] = none_or(excludeuser, int)
params['rvtag'] = none_or(tag, str)
params['rvexpandtemplates'] = none_or(expandtemplates, bool)
params['rvgeneratexml'] = none_or(generatexml, bool)
params['rvparse'] = none_or(parse, bool)
params['rvsection'] = none_or(section, int)
params['rvtoken'] = none_or(token, str)
params['rvcontinue'] = none_or(rvcontinue, int)
params['rvdiffto'] = self._check_diffto(diffto)
params['rvdifftotext'] = none_or(difftotext, str)
params['rvcontentformat'] = none_or(contentformat, str)
doc = self.session.get(params)
try:
if 'query-continue' in doc:
rvcontinue = doc['query-continue']['revisions']['rvcontinue']
else:
rvcontinue = None
pages = doc['query'].get('pages', {}).values()
rev_docs = []
for page_doc in pages:
if 'missing' in page_doc or 'revisions' not in page_doc: continue
page_rev_docs = page_doc['revisions']
del page_doc['revisions']
for rev_doc in page_rev_docs:
rev_doc['page'] = page_doc
rev_docs.extend(page_rev_docs)
return rev_docs, rvcontinue
except KeyError as e:
raise MalformedResponse(str(e), doc)
def _check_diffto(self, diffto):
if diffto == None or diffto in self.DIFF_TO:
return diffto
else:
return int(diffto)
|
makoshark/Mediawiki-Utilities
|
mw/api/collections/revisions.py
|
Python
|
mit
| 8,519 | 0.006926 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestAllMaskBits(vtk.test.Testing.vtkTest):
def testAllMaskBits(self):
# This script calculates the luminance of an image
renWin = vtk.vtkRenderWindow()
# Image pipeline
image1 = vtk.vtkTIFFReader()
image1.SetFileName(VTK_DATA_ROOT + "/Data/beach.tif")
# "beach.tif" image contains ORIENTATION tag which is
# ORIENTATION_TOPLEFT (row 0 top, col 0 lhs) type. The TIFF
# reader parses this tag and sets the internal TIFF image
# orientation accordingly. To overwrite this orientation with a vtk
# convention of ORIENTATION_BOTLEFT (row 0 bottom, col 0 lhs ), invoke
# SetOrientationType method with parameter value of 4.
image1.SetOrientationType(4)
shrink = vtk.vtkImageShrink3D()
shrink.SetInputConnection(image1.GetOutputPort())
shrink.SetShrinkFactors(2, 2, 1)
operators = ["ByPass", "And", "Nand", "Xor", "Or", "Nor"]
operator = dict()
mapper = dict()
actor = dict()
imager = dict()
for idx, op in enumerate(operators):
if op != "ByPass":
operator.update({idx: vtk.vtkImageMaskBits()})
operator[idx].SetInputConnection(shrink.GetOutputPort())
eval('operator[' + str(idx) + '].SetOperationTo' + op + '()')
operator[idx].SetMasks(255, 255, 0)
mapper.update({idx: vtk.vtkImageMapper()})
if op != "ByPass":
mapper[idx].SetInputConnection(operator[idx].GetOutputPort())
else:
mapper[idx].SetInputConnection(shrink.GetOutputPort())
mapper[idx].SetColorWindow(255)
mapper[idx].SetColorLevel(127.5)
actor.update({idx: vtk.vtkActor2D()})
actor[idx].SetMapper(mapper[idx])
imager.update({idx: vtk.vtkRenderer()})
imager[idx].AddActor2D(actor[idx])
renWin.AddRenderer(imager[idx])
column = 0
row = 0
deltaX = 1.0 / 3.0
deltaY = 1.0 / 2.0
for idx in range(len(operators)):
imager[idx].SetViewport(column * deltaX, row * deltaY, (column + 1) * deltaX, (row + 1) * deltaY)
column += 1
if column > 2:
column = 0
row += 1
renWin.SetSize(384, 256)
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "TestAllMaskBits.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestAllMaskBits, 'test')])
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/TestAllMaskBits.py
|
Python
|
gpl-3.0
| 3,568 | 0.002522 |
from __future__ import print_function
dictData = [
{'forename':'Marc','surname':'Mine','age':35, 'tags':('family','work'),
'job':{'name':'Baker','category':'Business'},
'hobbies':[{'name':'swimming','period':7},
{'name':'reading','period':1}]},
{'forename':'Mike','surname':'Meyer','age':14, 'tags':('work',),
'job':{'name':'Banker','category':'Business'},
'hobbies':[{'name':'swimming','period':14}]},
{'forename':'Marc','surname':'Andrew','age':78, 'tags':('hobby','work'),
'job':{'name':'Police','category':'Government'},
'hobbies':[{'name':'swimming','period':7},
{'name':'music','period':1},
{'name':'reading','period':2}]},
{'forename':'Marc','surname':'Muscels','age':35, 'tags':('family','hobby'),
'job':{'name':'Teacher','category':'Social'},
'hobbies':[{'name':'math','period':30},
{'name':'ski','period':365}]},
{'forename':'Andy','surname':'Young','age':11, 'tags':('family','work'),
'job':{'name':'President','category':'Government'},
'hobbies':[{'name':'swimming','period':2},
{'name':'fitness','period':3},
{'name':'reading','period':4},
{'name':'rc-cars','period':14}]},
{'forename':'Andy','surname':'Andy','age':51, 'tags':('family',),
'job':{'name':'Killer','category':'Business'},
'hobbies':[{'name':'counterstrike','period':1}]}
]
class AutoSet(object):
def __init__(self, **kwargs):
for name in kwargs:
self.__setattr__(name, kwargs[name])
class Person(AutoSet):
forename = ''
surname = ''
tags = ()
age = 0
job = None
hobbies = []
def __str__(self):
return '<Person surname={0} forename={1} age={2} tags={3}>'.format(self.forename, self.surname,
self.age, self.tags)
class Job(AutoSet):
name = ''
category = ''
class Hobby(AutoSet):
name = ''
period = 0
objectData = [
Person(forename='Marc',surname='Mine',age=35, tags=('family','work'),
job=Job(name='Baker',category='Business'),
hobbies=[Hobby(name='swimming',period=7),
Hobby(name='reading',period=1)]
),
Person(forename='Mike',surname='Meyer',age=14, tags=('work'),
job=Job(name='Banker',category='Business'),
hobbies=[Hobby(name='swimming',period=14)]
),
Person(forename='Marc',surname='Andrew',age=78, tags=('hobby','work'),
job=Job(name='Police',category='Government'),
hobbies=[Hobby(name='swimming',period=7),
Hobby(name='music',period=1),
Hobby(name='reading',period=2),]
),
Person(forename='Marc',surname='Muscels',age=35, tags=('family','hobby'),
job=Job(name='Teacher',category='Social'),
hobbies=[Hobby(name='math',period=30),
Hobby(name='ski',period=365)]
),
Person(forename='Andy',surname='Young',age=11, tags=('family','work'),
job=Job(name='President',category='Government'),
hobbies=[Hobby(name='swimming',period=2),
Hobby(name='fitness',period=3),
Hobby(name='reading',period=4),
Hobby(name='rc-cars',period=14)]
),
Person(forename='Andy',surname='Andy',age=51, tags=('family',),
job=Job(name='Killer',category='Business'),
hobbies=[Hobby(name='counterstrike',period=1)]
)
]
if __name__ == '__main__':
from sqliter.query import select, and_, or_, c, test
dictData = objectData
matchTest = {'name':'Olaf','weight':67.3,'age':33,'married':True}
def dev_null(self, *args):
pass
printOut = True
if printOut:
print_func = print
else:
print_func = dev_null
print_func('-------- SELECT FROM dictData')
for name in select().from_(dictData):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE forename = Marc')
for name in select().from_(dictData).where(forename='Marc'):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE surname IN ("Meyer","Muscels")')
for name in select().from_(dictData).where(c('surname').in_('Meyer','Muscels')):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE age > 32')
for name in select().from_(dictData).where(c('age') > 32):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE forname=surname')
for name in select().from_(dictData).where(c('forename') == c('surname')):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE forname=surname OR age < 20')
for name in select().from_(dictData).where(or_(c('forename') == c('surname'),c('age') < 20)):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE forname="Marc" AND surname == "Andrew"')
for name in select().from_(dictData).where(and_(c('forename') == 'Marc',c('surname') == 'Andrew')):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE "family" IN tags')
for name in select().from_(dictData).where(c('"family"').in_(c('tags'))):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE job.name == "President"')
for name in select().from_(dictData).where(c('job.name') == 'President'):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE job.category == "Business"')
for name in select().from_(dictData).where(c('job.category') == 'Business'):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE job.category IN ("Business", "Social")')
for name in select().from_(dictData).where(c('job.category').in_('Business','Social')):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE hobbies[*].name == "reading"')
for name in select().from_(dictData).where(c('hobbies[*].name') == "reading"):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE hobbies[*].period < 7')
for name in select().from_(dictData).where(c('hobbies[*].period') < 7):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE asdkuh < 7')
for name in select().from_(dictData).where(c('asdkuh') < 7):
print_func(name)
print_func('-------- SELECT FROM dictData WHERE hobbies[*].period < 7.first()')
print_func(select().from_(dictData).where(c('hobbies[*].period') < 7).first())
print_func('-------- SELECT FROM dictData WHERE hobbies[*].period < 7.last()')
print_func(select().from_(dictData).where(c('hobbies[*].period') < 7).last())
print_func('-------- TEST IF age > 22')
print_func(test(c('age') > 22).match(matchTest))
print_func('-------- TEST IF age < 22')
print_func(test(c('age') < 22).match(matchTest))
print_func('-------- TEST IF name == "Olaf"')
print_func(test(c('name') == 'Olaf').match(matchTest))
print_func('-------- TEST IF name != "Olaf"')
print_func(test(c('name') != 'Olaf').match(matchTest))
print_func('-------- TEST IF name == "Olafs"')
print_func(test(c('name') == 'Olafs').match(matchTest))
print_func('-------- TEST IF name LIKE "Ol%"')
print_func(test(c('name').like('Ol%')).match(matchTest))
print_func('-------- TEST IF name LIKE "%laf"')
print_func(test(c('name').like('%laf')).match(matchTest))
print_func('-------- TEST IF name LIKE "O%f"')
print_func(test(c('name').like('O%f')).match(matchTest))
print_func('-------- TEST IF name LIKE "olaf"')
print_func(test(c('name').like('olaf')).match(matchTest))
print_func('-------- SELECT * FROM dictData WHERE job.category IN ("Business", "Social")')
for name in select('*').from_(dictData).where(c('job.category').in_('Business','Social')):
print_func(name)
print_func('-------- SELECT forname FROM dictData WHERE job.category IN ("Business", "Social")')
for name in select('forename').from_(dictData).where(c('job.category').in_('Business','Social')):
print_func(name)
print_func('-------- SELECT forname,tags FROM dictData')
for name in select('forename','tags').from_(dictData):
print_func(name)
print_func('-------- SELECT age FROM dictData WHERE forname="Marc" AND (surname == "Andrew" OR age < size).collect_fieldnames')
print_func(select('age').from_(dictData).where(and_(c('forename') == 'Marc',or_(c('surname') == 'Andrew'),c('age') < c('size'))).collect_fieldnames())
print_func('-------- SELECT age FROM dictData WHERE forname="Marc" AND (surname == "Andrew" OR age < size).where_fieldnames')
print_func(select('age').from_(dictData).where(and_(c('forename') == 'Marc',or_(c('surname') == 'Andrew'),c('age') < c('size'))).where_fieldnames())
print_func('-------- SELECT age FROM dictData WHERE forname="Marc" AND (surname == "Andrew" OR age < size) GROUP BY tags[0],tags[1]).collect_fieldnames')
print_func(select('age').from_(dictData).where(
and_(c('forename') == 'Marc',
or_(c('surname') == 'Andrew'),c('age') < c('size')
)).group_by('tags[0]','tags[1]').collect_fieldnames())
|
mtils/sqliter
|
examples/testdata.py
|
Python
|
mit
| 9,753 | 0.022147 |
# Copyright (C) 2009, Hyves (Startphone Ltd.)
#
# This module is part of the Concurrence Framework and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
from concurrence.timer import Timeout
from concurrence.database.mysql import ProxyProtocol, PacketReader, PACKET_READ_RESULT, CLIENT_STATES, SERVER_STATES
class Proxy(object):
#errors
EOF_READ = -1
EOF_WRITE = -2
#direction
CLIENT_TO_SERVER = 1
SERVER_TO_CLIENT = 2
def __init__(self, clientStream, serverStream, buffer, initState):
self.clientStream = clientStream
self.serverStream = serverStream
self.readStream = self.clientStream
self.writeStream = self.serverStream
self.direction = self.CLIENT_TO_SERVER
self.protocol = ProxyProtocol(initState)
self.reader = PacketReader()
self.buffer = buffer
self.remaining = 0
def close(self):
self.clientStream = None
self.serverStream = None
self.readStream = None
self.writeStream = None
self.protocol = None
self.reader = None
self.buffer = None
def reset(self, state):
self.protocol.reset(state)
def readFromStream(self):
#read some data from stream into buffer
if self.remaining:
#some leftover partially read packet from previous read, put it in front of buffer
self.buffer.limit = self.buffer.position + self.remaining
self.buffer.compact()
else:
#normal clear, position = 0, limit = capacity
self.buffer.clear()
#read data from socket
return self.readStream.read(self.buffer, Timeout.current())
def writeToStream(self):
#forward data to receiving socket
self.buffer.flip()
while self.buffer.remaining:
if not self.writeStream.write(self.buffer, Timeout.current()):
return False
return True
def next(self, readResult, newState, prevState):
return 0
def cycle(self, readProtocol):
if not self.readFromStream():
return self.EOF_READ
#inspect data read according to protocol
n = 0
self.buffer.flip()
while True:
readResult, newState, prevState = readProtocol(self.reader, self.buffer)
#make note of any remaining data (half read packets),
# we use buffer.compact to put remainder in front next time around
self.remaining = self.buffer.remaining
#take action depending on state transition
n = self.next(readResult, newState, prevState)
if n != 0:
break
if not (readResult & PACKET_READ_RESULT.MORE):
break
if n == 0:
#write data trough to write stream
if not self.writeToStream():
return self.EOF_WRITE
return n
def run(self):
while True:
state = self.protocol.state
if state in SERVER_STATES:
self.direction = self.SERVER_TO_CLIENT
self.readStream = self.serverStream
self.writeStream = self.clientStream
n = self.cycle(self.protocol.readServer)
elif state in CLIENT_STATES:
self.direction = self.CLIENT_TO_SERVER
self.readStream = self.clientStream
self.writeStream = self.serverStream
n = self.cycle(self.protocol.readClient)
else:
assert False, "Unknown state %s" % state
if n < 0:
return n
|
concurrence/concurrence
|
lib/concurrence/database/mysql/proxy.py
|
Python
|
bsd-3-clause
| 3,790 | 0.007916 |
"""
Setup/build script for MasterChess
For usage info, see readme.md
"""
import os, sys, subprocess
from distutils.dir_util import copy_tree
from setuptools import setup
from MasterChessGUI import __description__, __copyright__, __version__
def get_folder(path):
if isinstance(path, list):
return [get_folder(i) for i in path]
else:
return (path, [os.path.join(path, i) for i in os.listdir(path) if i[:1] != "." and os.path.isfile(os.path.join(path, i))])
DATA_FILES = [get_folder("resources")]
DATA_FILES_MAC = ["QuickLook.py"]
DATA_MODULE_PACKAGES = ["MasterChess"]
PY2EXE_BUNDLE = False
options = {
"name": "MasterChess",
"version": __version__,
"description": __description__,
"author": "Jake Hartz",
"author_email": "jhartz@outlook.com",
"license": "GPL",
"url": "http://jhartz.github.io/masterchess/"
}
if sys.platform == "darwin" and "py2app" in sys.argv:
options.update({
"setup_requires": ["py2app"],
"app": ["MasterChessGUI.py"],
"data_files": DATA_FILES + DATA_FILES_MAC,
"options": {
"py2app": {
"argv_emulation": True,
"iconfile": "resources/Chess.icns",
"plist": {
"CFBundleIdentifier": "com.github.jhartz.masterchess",
"CFBundleGetInfoString": __description__,
"NSHumanReadableCopyright": __copyright__,
"UTExportedTypeDeclarations": [
{
"UTTypeIdentifier": "com.github.jhartz.masterchess.mcdb",
"UTTypeDescription": "MasterChess database",
#"UTTypeIconFile": "Chess.icns",
"UTTypeConformsTo": [
"public.data"
],
"UTTypeTagSpecification": {
"public.filename-extension": "mcdb"
}
}
],
"CFBundleDocumentTypes": [
{
#"CFBundleTypeExtensions": [
# "mcdb"
#],
"CFBundleTypeIconFile": "Chess.icns",
#"CFBundleTypeName": "MasterChess database",
"CFBundleTypeName": "MasterChess database",
"LSItemContentTypes": [
"com.github.jhartz.masterchess.mcdb"
],
"CFBundleTypeRole": "Editor",
"LSHandlerRank": "Owner"
}
]
}
}
}
})
elif sys.platform == "win32" and "py2exe" in sys.argv:
import py2exe
options.update({
"setup_requires": ["py2exe"],
"data_files": DATA_FILES,
"windows": [
{
"script": "MasterChessGUI.py",
"icon_resources": [(1, "resources/Chess.ico")],
"other_resources": [(u"VERSIONTAG", 1, "MasterChess " + __version__)] # TODO: Test this!!
}
]
})
if PY2EXE_BUNDLE:
options.update({
"options": {
"py2exe": {
"bundle_files": 1
}
},
"zipfile": None
})
else:
options.update({
"scripts": ["MasterChessGUI.py"],
"packages": DATA_MODULE_PACKAGES,
"data_files": DATA_FILES,
"install_requires": ["wx"]
})
setup(**options)
if sys.platform == "darwin" and "py2app" in sys.argv:
# If we have a compiled MC-QuickLook or MC-Spotlight, include that
if os.path.isdir(os.path.join("dist", "MasterChess.app", "Contents")):
# QuickLook
loc = os.path.join("Mac components", "MC-QuickLook", "Build", "Release", "MC-QuickLook.qlgenerator")
if not os.path.exists(loc):
# Try debug version
loc = os.path.join("Mac components", "MC-QuickLook", "Build", "Debug", "MC-QuickLook.qlgenerator")
if os.path.exists(loc):
print ""
print "Copying MC-QuickLook to app bundle"
copy_tree(loc, os.path.join("dist", "MasterChess.app", "Contents", "Library", "QuickLook", os.path.basename(loc)))
print "Reloading quicklookd"
try:
subprocess.call(["qlmanage", "-r"])
subprocess.call(["qlmanage", "-r", "cache"])
except OSError:
print "Error calling qlmanage (manually call `qlmanage -r` and `qlmanage -r cache` to reload quicklookd)"
# Spotlight
loc = os.path.join("Mac components", "MC-Spotlight", "Build", "Release", "MC-Spotlight.mdimporter")
if not os.path.exists(loc):
# Try debug version
loc = os.path.join("Mac components", "MC-Spotlight", "Build", "Debug", "MC-Spotlight.mdimporter")
if os.path.exists(loc):
print ""
print "Copying MC-Spotlight to app bundle"
copy_tree(loc, os.path.join("dist", "MasterChess.app", "Contents", "Library", "Spotlight", os.path.basename(loc)))
|
jhartz/masterchess
|
setup.py
|
Python
|
gpl-3.0
| 5,318 | 0.003197 |
# -*- coding: utf-8 -*-
# © 2011 Raphaël Valyi, Renato Lima, Guewen Baconnier, Sodexis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, models, fields
class ExceptionRule(models.Model):
_inherit = 'exception.rule'
rule_group = fields.Selection(
selection_add=[('sale', 'Sale')],
)
model = fields.Selection(
selection_add=[
('sale.order', 'Sale order'),
('sale.order.line', 'Sale order line'),
])
class SaleOrder(models.Model):
_inherit = ['sale.order', 'base.exception']
_name = 'sale.order'
_order = 'main_exception_id asc, date_order desc, name desc'
rule_group = fields.Selection(
selection_add=[('sale', 'Sale')],
default='sale',
)
@api.model
def test_all_draft_orders(self):
order_set = self.search([('state', '=', 'draft')])
order_set.test_exceptions()
return True
@api.constrains('ignore_exception', 'order_line', 'state')
def sale_check_exception(self):
orders = self.filtered(lambda s: s.state == 'sale')
if orders:
orders._check_exception()
@api.onchange('order_line')
def onchange_ignore_exception(self):
if self.state == 'sale':
self.ignore_exception = False
@api.multi
def action_confirm(self):
if self.detect_exceptions():
return self._popup_exceptions()
else:
return super(SaleOrder, self).action_confirm()
@api.multi
def action_draft(self):
res = super(SaleOrder, self).action_draft()
orders = self.filtered(lambda s: s.ignore_exception)
orders.write({
'ignore_exception': False,
})
return res
def _sale_get_lines(self):
self.ensure_one()
return self.order_line
@api.model
def _get_popup_action(self):
action = self.env.ref('sale_exception.action_sale_exception_confirm')
return action
|
kittiu/sale-workflow
|
sale_exception/models/sale.py
|
Python
|
agpl-3.0
| 2,008 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Everything related to Ubuntu Unity integration (quicklist..)
See the MPRIS plugin for sound menu integration.
"""
import gi
from quodlibet import _
from quodlibet.util import gi_require_versions
is_unity = True
try:
gi.require_version("Dbusmenu", "0.4")
from gi.repository import Dbusmenu
except (ValueError, ImportError):
is_unity = False
try:
gi_require_versions("Unity", ["7.0", "6.0", "5.0"])
from gi.repository import Unity
except (ValueError, ImportError):
is_unity = False
def init(desktop_id, player):
"""Set up unity integration.
* desktop_id: e.g. 'quodlibet.desktop'
* player: BasePlayer()
http://developer.ubuntu.com/api/devel/ubuntu-12.04/c/Unity-5.0.html
http://developer.ubuntu.com/api/devel/ubuntu-13.10/c/Unity-7.0.html
"""
if not is_unity:
return
launcher = Unity.LauncherEntry.get_for_desktop_id(desktop_id)
main = Dbusmenu.Menuitem()
play_pause = Dbusmenu.Menuitem()
play_pause.property_set(Dbusmenu.MENUITEM_PROP_LABEL,
_("Play/Pause"))
play_pause.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True)
main.child_append(play_pause)
def play_pause_cb(item, timestamp):
player.playpause()
play_pause.connect("item-activated", play_pause_cb)
next_ = Dbusmenu.Menuitem()
next_.property_set(Dbusmenu.MENUITEM_PROP_LABEL, _("Next"))
next_.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True)
main.child_append(next_)
def next_cb(item, timestamp):
player.next()
next_.connect("item-activated", next_cb)
prev = Dbusmenu.Menuitem()
prev.property_set(Dbusmenu.MENUITEM_PROP_LABEL, _("Previous"))
prev.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True)
main.child_append(prev)
def prev_cb(item, timestamp):
player.previous()
prev.connect("item-activated", prev_cb)
launcher.set_property("quicklist", main)
|
elbeardmorez/quodlibet
|
quodlibet/quodlibet/qltk/unity.py
|
Python
|
gpl-2.0
| 2,257 | 0 |
#Problem J4: Wait Time
inputarray = []
for i in range(input()):
inputarray.append(raw_input().split(" "))
#Number, total, lastwait, response
friendarray = []
ctime = 0
for i in range(len(inputarray)):
if inputarray[i][0].lower() == "c":
ctime += inputarray[i][1]
if inputarray[i][0].lower() == "r":
friendlist = [friendarray[j][0] for j in range(len(friendarray))]
if (inputarray[i][1] not in friendlist):
friendarray.append([inputarray[i][0].lower(), 0, ])
else:
location = friendlist.index(inputarray[i][1])
friendarray[location] = [inputarray[i][1], friendarray[location] + ]
|
jacksarick/My-Code
|
Events/CCC/2015/J4.py
|
Python
|
mit
| 601 | 0.021631 |
# pytgasu - Automating creation of Telegram sticker packs
# Copyright (C) 2017 Lemon Lam <almk@rmntn.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ..constants import *
__all__ = ['parse']
def parse(def_file):
"""
Parse specified sticker set definition file.
:param def_file: A Path-like object to .ssd file
:return: A tuple of set_title, set_short_name, [(image_fullpath, emojis)] representing the set
None on error
"""
import re
_sticker_line_pattern = re.compile(REGEX_MATCHING_EMOJI)
try:
with open(def_file, encoding='utf-8', errors='strict') as f:
lines = [l.rstrip() for l in f] # strip line breaks
except ValueError:
print(ERROR_DEFFILE_ENCODING % def_file)
else:
set_title = lines[0]
set_short_name = lines[1]
stickers = list() # there may be a 120 stickers per set hard limit, idk
for sticker_line in lines[2:]:
if not _sticker_line_pattern.fullmatch(sticker_line):
print(ERROR_INCORRECT_STICKER_LINE % sticker_line)
continue
image_filename, emoji_seq = sticker_line.split('/')
image_path = def_file.with_name(image_filename)
if not _validate_image(image_path):
continue
if not emoji_seq:
emoji_seq = DEFAULT_EMOJI
stickers.append((image_path, emoji_seq))
if not stickers:
print(ERROR_NO_STICKER_IN_SET)
return None
return set_title, set_short_name, stickers
def _validate_image(image_path):
"""
Check file existence, image is correct PNG,
dimension is 512x? or ?x512 and file size < 512KB
:param image_path: The image for a sticker
:return: Boolean if all limits on stickers met
"""
from PIL.Image import open as pil_open
try:
with pil_open(image_path) as image:
criteria = [
max(image.size) == 512,
image.format == 'PNG',
image_path.stat().st_size < 512 * 1000
]
return True if all(criteria) else False
except IOError:
print(ERROR_INVAILD_STICKER_IMAGE % image_path.name)
return False # not a picture or just 404
|
alemonmk/pytgasu
|
pytgasu/upload/defparse.py
|
Python
|
gpl-3.0
| 2,873 | 0.001044 |
import pandas as pd
from pandas import DataFrame
df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True)
#notice what i did, since it is an object
df['H-L'] = df.High - df.Low
print df.head()
df['100MA'] = pd.rolling_mean(df['Close'], 100)
# must do a slice, since there will be no value for 100ma until 100 points
print df[200:210]
df['Difference'] = df['Close'].diff()
print df.head()
|
PythonProgramming/Pandas-Basics-with-2.7
|
pandas 5 - Column Operations (Basic mathematics, moving averages).py
|
Python
|
mit
| 416 | 0.009615 |
import weakref
from sys import getrefcount
import string
import talkshowConfig
style = talkshowConfig.config().parser.style
from talkshowLogger import logger
debug = logger.debug
info = logger.info
warn = logger.warn
#?? pyglet.options['audio'] = ('directsound', 'openal', 'silent')
from pyglet.gl import *
from pyglet.media import *
from rect import *
from animated_property import AnimatedProperty
class Visible(object):
instanceCount = 0
def __init__(self, p, name, x=0, y=0, w=10, h=10):
self.x = x
self.y = y
self.w = w
self.h = h
self.name = name
self.__parent__ = None
self.setParent(p)
self.__class__.instanceCount += 1
def __del__(self):
self.__class__.instanceCount -= 1
def getParent(self):
if self.__parent__ == None: return None
return self.__parent__()
def setParent(self, newParent):
if self.__parent__ != None:
if self.__parent__() == newParent:
return
else:
if newParent == None:
return
if newParent != None:
newParent.__addChild__(self)
if self.__parent__ != None:
self.__parent__().__removeChild__(self)
if newParent != None:
self.__parent__ = weakref.ref(newParent)
else:
self.__parent__ = None
parent = property(getParent, setParent)
def contains(self, x, y):
if x >= self.x and y >= self.y and x < self.x + self.w and y < self.y + self.h:
return True
else:
return False
def _getPosition(self): return (self.x, self.y)
def _setPosition(self, value): self.x, self.y = value
position = property(_getPosition, _setPosition)
def _getExtent(self): return (self.w, self.h)
def _setExtent(self, value): self.w, self.h = value
extent = property(_getExtent, _setExtent)
def draw(self):
pass
def animate(self, propname, startvalue, endvalue, when, duration = 0, flags = 0):
if propname != "color":
AnimatedProperty.animate(self, propname, startvalue, endvalue, when, duration, flags)
else:
self._color_fade_value1 = splitColorChannels(startvalue)
self._color_fade_value2 = splitColorChannels(endvalue)
self._color_fade = 0.0
AnimatedProperty.animate(self, "_color_fade", 0.0, 1.0, when, duration, flags)
def splitColorChannels(c):
return (
string.atoi(c[1:3], 16) / 255.0,
string.atoi(c[3:5], 16) / 255.0,
string.atoi(c[5:7], 16) / 255.0
)
def mergeColorChannels(r,g,b):
return "#%2.2X%2.2X%2.2X" % (r*255,g*255,b*255)
class ColoredVisible(Visible):
instanceCount = 0
def __init__(self, p, name, x=0, y=0, w=0, h=0, color="#00ff00", opacity=1.0):
Visible.__init__(self, p, name, x, y, w, h)
self.color = color
self.opacity = opacity
def _setColor(self, c):
self.r, self.g, self.b = splitColorChannels(c)
def _getColor(self):
return mergeColorChannels(self.r, self.g, self.b)
color = property(_getColor, _setColor)
def _setCOLORFADE(self, cf):
self._COLORFADE= cf
r1, g1, b1 = self._color_fade_value1
r2, g2, b2 = self._color_fade_value2
self.r = r1 + (r2 - r1) * cf
self.g = g1 + (g2 - g1) * cf
self.b = b1 + (b2 - b1) * cf
def _getCOLORFADE(self):
return self._COLORFADE
_color_fade = property(_getCOLORFADE, _setCOLORFADE)
class Rect(ColoredVisible):
instanceCount = 0
def __init__(self, p, name, x=0, y=0, w=0, h=0, color="#00ff00", opacity=1.0):
ColoredVisible.__init__(self, p, name, x, y, w, h, color, opacity)
def draw(self):
#glColor3f(self.r,self.g,self.b)
pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,
[0, 1, 2, 0, 2, 3],
('v2f', (float(self.x), float(self.y),
float(self.x+self.w), float(self.y),
float(self.x+self.w), float(self.y+self.h),
float(self.x), float(self.y+self.h))),
('c4f', (self.r, self.g, self.b, self.opacity)*4)
)
class Screen(ColoredVisible):
def __init__(self, name, device = "", w = 800, h = 600, color=style.page.background_color):
try:
fullscreen = bool(talkshowConfig.fullScreen)
except:
fullscreen = False
self.window = pyglet.window.Window(caption=name, fullscreen=fullscreen, resizable=True)
if not fullscreen:
self.window.set_size(w, h)
##?? self.window.push_handlers(pyglet.window.event.WindowEventLogger())
ColoredVisible.__init__(self, None, name, 0, 0, self.w, self.h, color, opacity=1.0)
self.__children__ = []
self.event_handler = None
@self.window.event
def on_resize(width, height):
self.extent = width, height
glViewport(0, 0, width, height)
glMatrixMode(gl.GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0, width, 0, height);
glScalef(1, -1, 1);
glTranslatef(0, -height, 0);
glMatrixMode(gl.GL_MODELVIEW)
h = self.getHandlerMethod("onResize")
if h: h(width, height)
self.window.on_resize = on_resize
@self.window.event
def on_draw():
self.window.clear()
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
for x in self.__children__:
x.draw()
glDisable(GL_BLEND)
buttonLUT = {
pyglet.window.mouse.LEFT: "left",
pyglet.window.mouse.MIDDLE: "middle",
pyglet.window.mouse.RIGHT: "right",
}
@self.window.event
def on_mouse_motion(x, y, dx, dy):
y = self.h - y
h = self.getHandlerMethod("onMouseMove")
if h: h(x, y)
@self.window.event
def on_mouse_press(x, y, button, modifiers):
y = self.h - y
h = self.getHandlerMethod("onMouseButtonDown")
if h: h(buttonLUT[button], x, y)
@self.window.event
def on_mouse_release(x, y, button, modifiers):
y = self.h - y
h = self.getHandlerMethod("onMouseButtonUp")
if h: h(buttonLUT[button], x, y)
@self.window.event
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
# we use the same handler as for mouse move
y = self.h - y
h = self.getHandlerMethod("onMouseMove")
if h: h(x, y)
#@self.window.event
#def on_mouse_scroll(x, y, scroll_x, scroll_y):
def getHandlerMethod(self, name):
if self.event_handler != None:
if hasattr(self.event_handler, name):
return getattr(self.event_handler, name)
return None
def __addChild__(self, c):
if not c in self.__children__:
self.__children__.append(c)
def __removeChild__(self, c):
self.__children__.remove(c)
def __iter__(self):
return self.__children__.__iter__()
def __len__(self):
return len(self.__children__)
def getW(self): return self.window.width
def setW(self, w):
if w != self.window.width:
self.window.width = w
w = property(getW, setW)
def getH(self): return self.window.height
def setH(self, h):
if h != self.window.height:
self.window.height = h
h = property(getH, setH)
class Image(ColoredVisible):
def __init__(self, p, name, path, x=0, y=0, w=None, h=None, color="#ffffff", opacity=1.0):
debug(path)
if path:
image = pyglet.image.load(path)
self.sprite = pyglet.sprite.Sprite(image)
if w == None: w = self.sprite.width
if h == None: h = self.sprite.height
ColoredVisible.__init__(self, p, name, x, y, w, h, color, opacity)
def _colorComponentGetter(i):
def getter(self):
self.sprite.color[i]/255.0
return getter
def _colorComponentSetter(i):
def setter(self, x):
components = list(self.sprite.color)
components[i] = int(x * 255)
self.sprite.color = components
return setter
r = property(_colorComponentGetter(0), _colorComponentSetter(0))
g = property(_colorComponentGetter(1), _colorComponentSetter(1))
b = property(_colorComponentGetter(2), _colorComponentSetter(2))
def _setOpacity(self, x): self.sprite.opacity = int(x*255.0)
def _getOpacity(self): return self.sprite.opacity/255.0
opacity = property(_getOpacity, _setOpacity)
def draw(self):
glMatrixMode(gl.GL_MODELVIEW)
glPushMatrix()
glTranslatef(self.x, self.y+self.h, 0);
glScalef(float(self.w) / float(self.sprite.width), -float(self.h) / float(self.sprite.height), 1);
self.sprite.draw()
glPopMatrix()
class Text(ColoredVisible):
def __init__(self, p, name, x=0, y=0, h=0, color="#00ff00", opacity=1.0, text=None, font=None):
self.label = pyglet.text.Label(
text if text != None else name,
font_name=font if font != None else "Helvetica",
font_size=h,
anchor_y = 'center',
x=0, y=0)
ColoredVisible.__init__(self, p, name, x, y, self.label.content_width, h, color, opacity)
def _colorComponentGetter(i):
def getter(self):
self.label.color[i]/255.0
return getter
def _colorComponentSetter(i):
def setter(self, x):
components = list(self.label.color)
components[i] = int(x * 255)
self.label.color = components
return setter
r = property(_colorComponentGetter(0), _colorComponentSetter(0))
g = property(_colorComponentGetter(1), _colorComponentSetter(1))
b = property(_colorComponentGetter(2), _colorComponentSetter(2))
opacity = property(_colorComponentGetter(3), _colorComponentSetter(3))
def _setText(self, t): self.label.text = t
def _getText(self): return self.label.text
text = property(_getText, _setText)
def _setFont(self, x): self.label.font_name = x
def _getFont(self): return self.label.font_name
font = property(_getFont, _setFont)
def draw(self):
glMatrixMode(gl.GL_MODELVIEW)
glPushMatrix()
glTranslatef(self.x, self.y + self.h, 0);
glScalef(float(self.w) / float(self.label.content_width), -float(self.h) / float(self.label.font_size), 1);
self.label.draw()
glPopMatrix()
class ClippingContainer(Visible):
instanceCount = 0
def __init__(self, p, name, x=0, y=0, w=10, h=10, ox=0, oy=0, clip=True):
Visible.__init__(self, p, name, x, y, w, h)
self.ox = ox
self.oy = oy
self.clip = clip
def _getOffset(self): return (self.ox, self.oy)
def _setOffset(self, value): self.ox, self.oy = value
offset = property(_getOffset, _setOffset)
def draw(self):
if self.clip:
self.drawClipped()
else:
self.drawUnclipped()
def drawUnclipped(self):
pass
def drawClipped(self):
# get screen coordinates of lower left corner
x = self.x
y = self.y + self.h
model_view_matrix = (GLdouble * 16)()
projection_matrix = (GLdouble * 16)()
viewport = (GLint * 4)()
glGetDoublev(GL_MODELVIEW_MATRIX, model_view_matrix)
glGetDoublev(GL_PROJECTION_MATRIX, projection_matrix)
glGetIntegerv(GL_VIEWPORT, viewport)
s_x, s_y, s_z = GLdouble(), GLdouble(), GLdouble()
gluProject(x, y, 0.0, model_view_matrix, projection_matrix, viewport, s_x, s_y, s_z)
scissor_was_enabled = glIsEnabled(GL_SCISSOR_TEST)
old_scissor = (GLint*4)();
r = ((int(s_x.value), int(s_y.value)), self.extent)
if scissor_was_enabled:
glGetIntegerv(GL_SCISSOR_BOX, old_scissor);
osr = (old_scissor[0:2], old_scissor[2:4])
r = clip_rect(r, osr)
glScissor(*[int(x) for x in flatten_rect(r)])
glEnable(GL_SCISSOR_TEST)
self.drawUnclipped()
if not scissor_was_enabled:
glDisable(GL_SCISSOR_TEST)
else:
glScissor(old_scissor[0], old_scissor[1], old_scissor[2], old_scissor[3])
class Group(ClippingContainer):
instanceCount = 0
def __init__(self, p, name, x=0, y=0, w=10, h=10, ox=0, oy=0, clipChildren=True):
self._W, self._H = w, h
ClippingContainer.__init__(self, p, name, x, y, w, h*2 if hasattr(self,'fg') else h, ox, oy, clipChildren)
self.__children__ = []
def __addChild__(self, c):
if not c in self.__children__:
self.__children__.append(c)
def __removeChild__(self, c):
self.__children__.remove(c)
def __iter__(self):
return self.__children__.__iter__()
def __len__(self):
return len(self.__children__)
def drawUnclipped(self):
glMatrixMode(gl.GL_MODELVIEW)
glPushMatrix()
glTranslatef(self.x - self.ox, self.y - self.oy, 0);
for x in self:
x.draw()
glPopMatrix()
## W TODO: this isn't nice stuff ...
def _getW(self): return self._W
def _setW(self, value):
if self._W == value: return
self._W = value
self.doLayout(self._W, self._H)
w = property(_getW, _setW)
## H
def _getH(self): return self._H
def _setH(self, value):
self._H = value
if self._H == value: return
self.doLayout(self._W, self._H)
h = property(_getH, _setH)
## EXTENT
def _getExtent(self): return (self._W, self._H)
def _setExtent(self, value):
if (self._W, self._H) == value: return
self._W, self._H = value
self.doLayout(self._W, self._H)
extent = property(_getExtent, _setExtent)
def doLayout(self, w, h):
pass
class Viewport(ClippingContainer):
instanceCount = 0
def __init__(self, p, name, x=0, y=0, w=10, h=10, ox=0, oy=0, world = None):
ClippingContainer.__init__(self, p, name, x, y, w, h, ox, oy, True)
self.world = world
def drawUnclipped(self):
if not self.world: return
glMatrixMode(gl.GL_MODELVIEW)
glPushMatrix()
glTranslatef(self.x - self.ox, self.y - self.oy, 0);
self.world.draw()
glPopMatrix()
## TODO: refactor properties speed, t, progress, duration into common base class
class Video(Image):
def __init__(self, p, name, path, x=0, y=0, w=None, h=None, color="#ffffff", opacity=1.0):
self.player = Player()
self.source = load(path)
self.player.queue(self.source)
image = self.player.texture
self.sprite = pyglet.sprite.Sprite(image)
Image.__init__(self, p, name, None, x, y, w, h, color, opacity)
def __del__(self):
self.speed=0
def getT(self): return self.player.time
def setT(self, x):
playing = self.player.playing
self.player.seek(x)
if playing: self.player.play()
t = property(getT, setT)
def getDuration(self):
return self.source.duration
duration = property(getDuration, None)
def getProgress(self): return self.player.time/self.source.duration
def setProgress(self, p):
playing = self.player.playing
self.player.seek(p*self.source.duration)
if playing: self.player.play()
progress = property(getProgress, setProgress)
def getSpeed(self):
if self.player.playing:
return self.player.pitch
else:
return 0.0
def setSpeed(self, s):
if s == 0.0:
if self.player.playing:
self.player.pause()
elif not self.player.playing:
self.player.pitch = s
self.player.play()
speed = property(getSpeed, setSpeed)
class Sound(object):
_globalVolume = 1.0
_allSounds = []
@staticmethod
def setGlobalVolume(v):
Sound._globalVolume = v
for w in Sound._allSounds:
s = w()
if s != None:
s._setAbsoluteVolume(v * s._volume)
def __init__(self, device, path):
self.player = Player()
self.source = load(path)
self.player.queue(self.source)
self.id = len(self._allSounds)
self._allSounds.append(weakref.ref(self))
self.volume = 1.0
def __del__(self):
self.speed=0
del self._allSounds[self.id]
def getT(self): return self.player.time
def setT(self, x): self.player.seek(x)
t = property(getT, setT)
def getDuration(self):
return self.source.duration
duration = property(getDuration, None)
def getProgress(self): return self.player.time/self.source.duration
def setProgress(self, p): self.player.seek(p*self.source.duration)
progress = property(getProgress, setProgress)
def getSpeed(self):
if self.player.playing:
return self.player.pitch
else:
return 0.0
def setSpeed(self, s):
if s == 0.0:
if self.player.playing:
self.player.pause()
elif not self.player.playing:
self.player.pitch = s
self.player.play()
speed = property(getSpeed, setSpeed)
def _setAbsoluteVolume(self, v):
self.player.volume = v
def setVolume(self, v):
self._volume = v
self._setAbsoluteVolume(self._volume * self._globalVolume)
def getVolume(self):
return self._volume
volume = property(getVolume, setVolume)
##
# Regression Tests
##
import unittest
from test import test_support
class TestVisuals(unittest.TestCase):
# Only use setUp() and tearDown() if necessary
def setUp(self):
pass
def tearDown(self):
pass
def test_Group(self):
v = Visible(None, "test1")
assert(Visible.instanceCount == 1)
assert(v.parent == None)
assert(getrefcount(v) == 2)
del v
assert(Visible.instanceCount == 0)
v = Visible(None, "test1")
assert(Visible.instanceCount == 1)
assert(v.parent == None)
assert(getrefcount(v) == 2)
g = Group(None, "group")
assert(Group.instanceCount == 1)
assert(Visible.instanceCount == 1)
assert(g.parent == None)
assert(len(g)==0)
v.parent = g
assert(getrefcount(v) == 3)
assert(g.parent == None)
assert(v.parent == g)
assert(len(g)==1)
for x in g: assert(x==v)
del v
for x in g: assert(getrefcount(x) == 3)
assert(Visible.instanceCount == 1)
for x in g: assert(x.parent==g)
for x in g: x.parent = None
del x
assert(len(g)==0)
assert(Visible.instanceCount == 0)
g.ox=10
g.oy=20
assert(g.offset==(10,20))
g.offset = (20,40)
assert(g.ox==20)
assert(g.oy==40)
def test_color_properties(self):
r = Rect(None, "Rect")
r.color = "#ff7f00"
assert(r.r==1.0)
assert(r.g==0x7f/255.0)
assert(r.b==0)
r.b = 10/255.0
assert(r.color == "#FF7F0A")
def test_basic_properties(self):
r = Rect(None, "Rect")
r.x=10
r.y=20
assert(r.position==(10,20))
r.position = (20,40)
assert(r.x==20)
assert(r.y==40)
r.w=10
r.h=20
assert(r.extent==(10,20))
r.extent = (20,40)
assert(r.w==20)
assert(r.h==40)
assert(r.contains(20,40)==True)
assert(r.contains(19,40)==False)
assert(r.contains(20,39)==False)
assert(r.contains(20+20,40+40)==False)
assert(r.contains(20+19,40+39)==True)
def test_main():
test_support.run_unittest(
TestVisuals,
#... list other tests ...
)
if __name__ == "__main__":
# run some tests on Node hierarchy
test_main()
|
regular/talkshow
|
wrappers.py
|
Python
|
gpl-3.0
| 20,806 | 0.01341 |
from __future__ import division
import json
import urllib
from flask import request
from flask import render_template
from flask import abort
import jinja2
import rigor.config
import rigorwebapp.plugin
import rigorwebapp.utils
from rigorwebapp.utils import debug_detail, debug_main, debug_error
import rigorwebapp.auth
kPluginName = 'percept_search_page'
AuthClient = rigorwebapp.auth.DefaultAuthClient()
class PerceptSearchPagePlugin(rigorwebapp.plugin.BasePlugin):
def __init__(self, backend, rigor_config):
self.rigor_config = rigor_config
self.backend = backend
try:
self.thumbnail_size_max = int(rigor_config.get("webapp","thumbnail_size_max"))
except rigor.config.NoValueError:
self.thumbnail_size_max = 128
try:
self.results_per_page = int(rigor_config.get("webapp","percept_search_page_results_per_page"))
except rigor.config.NoValueError:
self.results_per_page = 30
def add_routes(self, app, backend, plugin_instances):
@app.route('/db/<db_name>/perceptsearch')
@AuthClient.check_access_and_inject_user(self.rigor_config)
def percept_search_page(db_name, username=None):
if not db_name in backend.db_names():
abort(404)
# clean up search params
search_params = request.args.to_dict()
search_params['page'] = max(1, int(search_params.get('page', 1))) # human-readable page number starts at 1, not 0
for int_param in ['random_nth', 'random_out_of']:
if int_param in search_params:
search_params[int_param] = int(search_params[int_param])
param_whitelist = """
page
device_id
collection_id
hash
annotation_domain
annotation_model
annotation_property
percept_property
locator
random_nth
random_out_of
""".strip().split()
for key in list(search_params.keys()):
if not key in param_whitelist:
del search_params[key]
if search_params[key] == '':
del search_params[key]
search_results, total_count = backend.search_percepts(db_name=db_name, query=search_params, per_page=self.results_per_page, load_paths='tags')
page_state = {
'current_view': kPluginName,
'username': username,
kPluginName: dict(
db_name=db_name,
db_names=backend.db_names(),
search_results=search_results,
total_count=total_count,
per_page=self.results_per_page,
num_pages=int(total_count / self.results_per_page + 1),
search_params=search_params,
),
}
template_slots = rigorwebapp.plugin.TemplateSlots()
rigorwebapp.plugin.augment_request(plugin_instances, page_state, template_slots)
return render_template('standard_template.html', page_state=page_state, template_slots=template_slots)
def augment_page_state(self, page_state):
pass
def augment_template_slots(self, page_state, template_slots):
# add to navbar on all pages
# first, figure out db_name
try:
db_name = page_state[page_state['current_view']]['db_name']
except KeyError:
try:
db_name = self.rigor_config.get("webapp", "initial_db")
except rigor.config.NoValueError:
db_name='?'
navbar_url = '/db/{}/perceptsearch'.format(db_name)
template_slots.append('navbar_link', '<a href="{}">Percept Search</a>'.format(navbar_url))
# if this isn't our own page, stop here
if page_state['current_view'] != kPluginName:
return
template_slots.append('js_tail_path', '/static/plugins/percept_search_page/js/index.js')
template_slots.append('css_path', '/static/plugins/percept_search_page/css/index.css')
# build next/prev links for pagination navigation
prev_link = None
next_link = None
page = page_state[kPluginName]['search_params']['page']
prev_params = page_state[kPluginName]['search_params'].copy()
prev_params['page'] -= 1
next_params = page_state[kPluginName]['search_params'].copy()
next_params['page'] += 1
if prev_params['page'] >= 1:
prev_link = 'perceptsearch?' + urllib.urlencode(prev_params)
if next_params['page'] <= page_state[kPluginName]['num_pages']:
next_link = 'perceptsearch?' + urllib.urlencode(next_params)
template_slots.append('main_panel_pager_bar', dict(
prev_link=prev_link,
next_link=next_link,
num_results=page_state[kPluginName]['total_count'],
page_num=page,
num_pages=page_state[kPluginName]['num_pages']
))
thumb_grid_template = """
{% for percept in search_results %}
<div class="searchResult">
<a href="/db/{{db_name}}/percept/{{'{}'.format(percept.id)}}">
{% if percept.x_size and percept.y_size: %}
<img class="searchResultImg" src="{{percept.img_url+'?max_size='}}{{thumbnail_size_max}}" width="{{thumbsize(percept.x_size, percept.y_size)[0]}}" height="{{thumbsize(percept.x_size, percept.y_size)[1]}}" />
{% else %}
<div class="missingImage" style="height:{{thumbnail_size_max}}px; width:{{thumbnail_size_max}}px; display: block;"></div>
{% endif %}
</a>
<div class="searchResultCaption">
{% for tag in percept.tags %}
<div class="tag"
style="background: hsl({{tag_to_hue(tag)}}, 25%, 50%)"
>
{{tag}}
</div>
{% endfor %}
</div>
</div>
{% endfor %}
"""
thumb_grid_template_context = dict(
thumbsize = lambda width,height,maxsize=self.thumbnail_size_max: self.backend.percept_image_scaled_size((width,height),int(maxsize)),
tag_to_hue = rigorwebapp.utils.hash_string_to_hue,
thumbnail_size_max = self.thumbnail_size_max,
**page_state[kPluginName]
)
template_slots.append('main_panel', jinja2.Template(thumb_grid_template).render(thumb_grid_template_context))
search_form_template = """
<div class="sidebarTitle">
Search
</div>
<form id="perceptSearchForm">
<div class="searchFormRow">
<div class="searchFormRowLabel">
Database
</div>
<select class="searchFormSelect" id="perceptSearchFormDbSelect">
{% for this_db_name in db_names %}
{% if this_db_name == db_name %}
<option value={{this_db_name}} selected>{{this_db_name}}</option>
{% else %}
<option value={{this_db_name}}>{{this_db_name}}</option>
{% endif %}
{% endfor %}
</select>
</div>
{% for facet in facets %}
<div class="searchFormRow">
<div class="searchFormRowLabel">{{facet.caption}}</div>
{% if facet.get('help_text') %}
<div class="searchFormRowHelp">{{facet.help_text}}</div>
{% endif %}
<input style="width:100%" type="text" id="{{facet.dom_id}}" value="{{facet.value}}"/>
</div>
{% endfor %}
<div class="searchFormRow">
<div class="searchFormRowLabel">Random subset</div>
<span class="searchFormRowHelp">The </span>
<input style="width:15%" type="text" id="perceptSearchFormRandomNth" value="{{random_nth_value}}"/>
<span class="searchFormRowHelp">th percept out of each</span>
<input style="width:15%" type="text" id="perceptSearchFormRandomOutOf" value="{{random_out_of_value}}"/>
</div>
<div class="searchFormButtonRow">
<input class="button" type="submit" value="Search"/>
</div>
</form>
"""
search_params = page_state[kPluginName]['search_params']
search_form_template_context = page_state[kPluginName].copy()
search_form_template_context['random_nth_value'] = search_params.get('random_nth', '')
search_form_template_context['random_out_of_value'] = search_params.get('random_out_of', '')
search_form_template_context['facets'] = [
dict(
dom_id='perceptSearchFormLocator',
value=search_params.get('locator', ''),
caption='Locator',
help_text='Use "*" as a wildcard.',
),
dict(
dom_id='perceptSearchFormCollectionId',
value=search_params.get('collection_id', ''),
caption='Collection ID',
),
dict(
dom_id='perceptSearchFormDeviceId',
value=search_params.get('device_id', ''),
caption='Device ID',
),
dict(
dom_id='perceptSearchFormHash',
value=search_params.get('hash', ''),
caption='Percept hash',
),
dict(
dom_id='perceptSearchFormAnnotationDomain',
value=search_params.get('annotation_domain', ''),
caption='Annotation domain',
),
dict(
dom_id='perceptSearchFormAnnotationModel',
value=search_params.get('annotation_model', ''),
caption='Annotation model',
help_text='Use "*" as a wildcard.',
),
dict(
dom_id='perceptSearchFormAnnotationProperty',
value=search_params.get('annotation_property', ''),
caption='Annotation property and/or value',
help_text='Format like "property", "=value", or "property=value". Combine using "AND" or "OR", but not both: "a=aaa OR b=bbb".',
),
dict(
dom_id='perceptSearchFormPerceptProperty',
value=search_params.get('percept_property', ''),
caption='Percept property and/or value',
help_text='Format like "property", "=value", or "property=value". Combine using "AND" or "OR", but not both: "a=aaa OR b=bbb".',
),
]
template_slots.append('sidebar_top', jinja2.Template(search_form_template).render(search_form_template_context))
|
blindsightcorp/rigor-webapp
|
plugins/percept_search_page/__init__.py
|
Python
|
bsd-2-clause
| 9,018 | 0.028055 |
#!/usr/bin/env python
from os import path
from collections import defaultdict
import math
root = path.dirname(path.dirname(path.dirname(__file__)))
result_dir = path.join(root, 'results')
def get_file_name(test):
test = '%s_result' % test
return path.join(result_dir, test)
def mean(l):
return float(sum(l))/len(l) if len(l) > 0 else float('nan')
def std_dev(l):
m = mean(l)
return math.sqrt(sum((x - m) ** 2 for x in l) / len(l))
def run_timing_overhead_ana():
test_name = 'timing_overhead'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
datas.append(int(l))
datas = [i for i in datas[:10000]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_loop_overhead_ana():
test_name = 'loop_overhead'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
datas.append(float(l.split(' ')[0]))
datas = [i for i in datas[:10000]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_proc_call_overhead_ana():
test_name = 'proc_call_overhead'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
if l.startswith('-'):
datas.append([])
continue
datas[-1].append(int(l.split(' ')[0]) * 1.0 / 10)
print "%s result:" % test_name
for i, data in enumerate(datas):
m = mean(data)
std = std_dev(data)
print "%f\t%f" % (m, std)
#print "%s %d mean: %f" % (test_name, i, mean(data))
#print "%s %d std dev: %f" % (test_name, i, std_dev(data))
def run_process_context_switch_ana():
test_name = 'process_context_switch'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
try:
datas.append(int(l.split(' ')[1]))
except:
pass
datas = [i for i in datas[:100]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_thread_context_switch_ana():
test_name = 'thread_context_switch'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
datas.append(int(l.split(' ')[1]))
datas = [i for i in datas[:100]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_mem_acc_ana():
test_name = 'mem_acc'
filename = get_file_name(test_name)
datas = defaultdict(lambda: defaultdict(list))
with open(filename) as f:
for l in f:
ll = l.split(' ')
step = int(ll[7])
offset = int(ll[1])
cycle = float(ll[3])
datas[step][offset].append(cycle)
results = {}
offsets = set()
for step, v in sorted(datas.items()):
result = []
for offset, cycles in sorted(v.items()):
offsets.add(offset)
m = mean(cycles)
result.append(m)
results[step] = (result)
print "mem access time result"
fl = "step/offset\t%s" % "\t".join(str(i) for i in sorted(offsets))
print fl
for step, means in sorted(results.items()):
line = "\t".join(str(i) for i in means)
line = "%s\t%s" % (str(step), line)
print line
if __name__ == '__main__':
run_timing_overhead_ana()
run_loop_overhead_ana()
run_proc_call_overhead_ana()
run_process_context_switch_ana()
run_thread_context_switch_ana()
run_mem_acc_ana()
|
sheimi/os-benchmark
|
script/analysis/analysis.py
|
Python
|
gpl-3.0
| 3,736 | 0.005621 |
"""Support for the Airly air_quality service."""
from homeassistant.components.air_quality import (
ATTR_AQI,
ATTR_PM_2_5,
ATTR_PM_10,
AirQualityEntity,
)
from homeassistant.const import CONF_NAME
from .const import (
ATTR_API_ADVICE,
ATTR_API_CAQI,
ATTR_API_CAQI_DESCRIPTION,
ATTR_API_CAQI_LEVEL,
ATTR_API_PM10,
ATTR_API_PM10_LIMIT,
ATTR_API_PM10_PERCENT,
ATTR_API_PM25,
ATTR_API_PM25_LIMIT,
ATTR_API_PM25_PERCENT,
DOMAIN,
)
ATTRIBUTION = "Data provided by Airly"
LABEL_ADVICE = "advice"
LABEL_AQI_DESCRIPTION = f"{ATTR_AQI}_description"
LABEL_AQI_LEVEL = f"{ATTR_AQI}_level"
LABEL_PM_2_5_LIMIT = f"{ATTR_PM_2_5}_limit"
LABEL_PM_2_5_PERCENT = f"{ATTR_PM_2_5}_percent_of_limit"
LABEL_PM_10_LIMIT = f"{ATTR_PM_10}_limit"
LABEL_PM_10_PERCENT = f"{ATTR_PM_10}_percent_of_limit"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Airly air_quality entity based on a config entry."""
name = config_entry.data[CONF_NAME]
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[AirlyAirQuality(coordinator, name, config_entry.unique_id)], False
)
def round_state(func):
"""Round state."""
def _decorator(self):
res = func(self)
if isinstance(res, float):
return round(res)
return res
return _decorator
class AirlyAirQuality(AirQualityEntity):
"""Define an Airly air quality."""
def __init__(self, coordinator, name, unique_id):
"""Initialize."""
self.coordinator = coordinator
self._name = name
self._unique_id = unique_id
self._icon = "mdi:blur"
@property
def name(self):
"""Return the name."""
return self._name
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
@round_state
def air_quality_index(self):
"""Return the air quality index."""
return self.coordinator.data[ATTR_API_CAQI]
@property
@round_state
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self.coordinator.data[ATTR_API_PM25]
@property
@round_state
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self.coordinator.data[ATTR_API_PM10]
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self.coordinator.last_update_success
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
LABEL_AQI_DESCRIPTION: self.coordinator.data[ATTR_API_CAQI_DESCRIPTION],
LABEL_ADVICE: self.coordinator.data[ATTR_API_ADVICE],
LABEL_AQI_LEVEL: self.coordinator.data[ATTR_API_CAQI_LEVEL],
LABEL_PM_2_5_LIMIT: self.coordinator.data[ATTR_API_PM25_LIMIT],
LABEL_PM_2_5_PERCENT: round(self.coordinator.data[ATTR_API_PM25_PERCENT]),
LABEL_PM_10_LIMIT: self.coordinator.data[ATTR_API_PM10_LIMIT],
LABEL_PM_10_PERCENT: round(self.coordinator.data[ATTR_API_PM10_PERCENT]),
}
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Airly entity."""
await self.coordinator.async_request_refresh()
|
pschmitt/home-assistant
|
homeassistant/components/airly/air_quality.py
|
Python
|
apache-2.0
| 3,907 | 0.000768 |
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class CalendarEvent(models.Model):
_inherit = 'calendar.event'
meeting_reason_id = fields.Many2one(
'calendar.event.meeting.reason',
string="Meeting reason",
ondelete="restrict")
class CalendarEventMeetingReason(models.Model):
_name = 'calendar.event.meeting.reason'
_description = 'Calendar Event Meeting Reason'
name = fields.Char('Reason', required=False, translate=True)
|
sandrafig/addons
|
calendar_event_meeting_reason/models/calendar_event.py
|
Python
|
agpl-3.0
| 488 | 0.004098 |
"""SCons.Tool.mwcc
Tool-specific initialization for the Metrowerks CodeWarrior compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwcc.py 2014/08/24 12:12:31 garyo"
import os
import os.path
import SCons.Util
def set_vars(env):
"""Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars
MWCW_VERSIONS is set to a list of objects representing installed versions
MWCW_VERSION is set to the version object that will be used for building.
MWCW_VERSION can be set to a string during Environment
construction to influence which version is chosen, otherwise
the latest one from MWCW_VERSIONS is used.
Returns true if at least one version is found, false otherwise
"""
desired = env.get('MWCW_VERSION', '')
# return right away if the variables are already set
if isinstance(desired, MWVersion):
return 1
elif desired is None:
return 0
versions = find_versions()
version = None
if desired:
for v in versions:
if str(v) == desired:
version = v
elif versions:
version = versions[-1]
env['MWCW_VERSIONS'] = versions
env['MWCW_VERSION'] = version
if version is None:
return 0
env.PrependENVPath('PATH', version.clpath)
env.PrependENVPath('PATH', version.dllpath)
ENV = env['ENV']
ENV['CWFolder'] = version.path
ENV['LM_LICENSE_FILE'] = version.license
plus = lambda x: '+%s' % x
ENV['MWCIncludes'] = os.pathsep.join(map(plus, version.includes))
ENV['MWLibraries'] = os.pathsep.join(map(plus, version.libs))
return 1
def find_versions():
"""Return a list of MWVersion objects representing installed versions"""
versions = []
### This function finds CodeWarrior by reading from the registry on
### Windows. Some other method needs to be implemented for other
### platforms, maybe something that calls env.WhereIs('mwcc')
if SCons.Util.can_read_reg:
try:
HLM = SCons.Util.HKEY_LOCAL_MACHINE
product = 'SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions'
product_key = SCons.Util.RegOpenKeyEx(HLM, product)
i = 0
while True:
name = product + '\\' + SCons.Util.RegEnumKey(product_key, i)
name_key = SCons.Util.RegOpenKeyEx(HLM, name)
try:
version = SCons.Util.RegQueryValueEx(name_key, 'VERSION')
path = SCons.Util.RegQueryValueEx(name_key, 'PATH')
mwv = MWVersion(version[0], path[0], 'Win32-X86')
versions.append(mwv)
except SCons.Util.RegError:
pass
i = i + 1
except SCons.Util.RegError:
pass
return versions
class MWVersion(object):
def __init__(self, version, path, platform):
self.version = version
self.path = path
self.platform = platform
self.clpath = os.path.join(path, 'Other Metrowerks Tools',
'Command Line Tools')
self.dllpath = os.path.join(path, 'Bin')
# The Metrowerks tools don't store any configuration data so they
# are totally dumb when it comes to locating standard headers,
# libraries, and other files, expecting all the information
# to be handed to them in environment variables. The members set
# below control what information scons injects into the environment
### The paths below give a normal build environment in CodeWarrior for
### Windows, other versions of CodeWarrior might need different paths.
msl = os.path.join(path, 'MSL')
support = os.path.join(path, '%s Support' % platform)
self.license = os.path.join(path, 'license.dat')
self.includes = [msl, support]
self.libs = [msl, support]
def __str__(self):
return self.version
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def generate(env):
"""Add Builders and construction variables for the mwcc to an Environment."""
import SCons.Defaults
import SCons.Tool
set_vars(env)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'
env['CC'] = 'mwcc'
env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'
env['CXX'] = 'mwcc'
env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = '$CCFLAGS'
env['SHCFLAGS'] = '$CFLAGS'
env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = '$CXXFLAGS'
env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS'
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cpp'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
#env['PCH'] = ?
#env['PCHSTOP'] = ?
def exists(env):
return set_vars(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
engineer0x47/SCONS
|
engine/SCons/Tool/mwcc.py
|
Python
|
mit
| 6,841 | 0.003947 |
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import libtcodpy as libtcod
import tokenizer
import match
import textfield
import textview
import command
SCREEN_WIDTH = 80
SCREEN_HEIGHT = 35
FONT_FLAGS = libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD
FONT_FILE = 'fonts/dejavu12x12_gs_tc.png'
LIMIT_FPS = 25
TITLE = 'Neko (lost kitty)'
text_field = textfield.TextField(0, SCREEN_HEIGHT - 1, SCREEN_WIDTH, 1)
text_view = textview.TextView(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT - 1)
builtins = {'me': 123} # $tags: stub, example, todo
def eval_str(s, globals):
try:
r = eval(s, globals)
return str(r)
except Exception as e:
return str(e)
def on_command(sender, args):
if args.key.vk != libtcod.KEY_ENTER:
return
s = text_field.get_text()
if match.starts_with(';', s):
r = eval_str(s[1:], builtins)
text_view.lines.append(str(r))
else:
tokens = tokenizer.tokenize(s)
cmd = command.parse(tokens)
text_view.lines.append(str(tokens))
text_view.lines.append(str(cmd))
text_field.add_handler(on_command)
libtcod.sys_set_fps(LIMIT_FPS)
libtcod.console_set_custom_font(FONT_FILE, FONT_FLAGS)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, TITLE, False)
while not libtcod.console_is_window_closed():
libtcod.console_set_default_foreground(0, libtcod.white)
libtcod.console_clear(0)
text_field.render(0)
text_view.render(0)
libtcod.console_flush()
key = libtcod.console_check_for_keypress(libtcod.KEY_PRESSED)
text_field.update(key)
|
basp/neko
|
spikes/main.py
|
Python
|
mit
| 2,241 | 0.002231 |
from django.contrib import admin
from treebeard.admin import TreeAdmin
from treebeard.forms import movenodeform_factory
from oscar.core.loading import get_model
AttributeOption = get_model('catalogue', 'AttributeOption')
AttributeOptionGroup = get_model('catalogue', 'AttributeOptionGroup')
Category = get_model('catalogue', 'Category')
Option = get_model('catalogue', 'Option')
Product = get_model('catalogue', 'Product')
ProductAttribute = get_model('catalogue', 'ProductAttribute')
ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductClass = get_model('catalogue', 'ProductClass')
ProductImage = get_model('catalogue', 'ProductImage')
ProductRecommendation = get_model('catalogue', 'ProductRecommendation')
class AttributeInline(admin.TabularInline):
model = ProductAttributeValue
class ProductRecommendationInline(admin.TabularInline):
model = ProductRecommendation
fk_name = 'primary'
raw_id_fields = ['primary', 'recommendation']
class CategoryInline(admin.TabularInline):
model = ProductCategory
extra = 1
class ProductAttributeInline(admin.TabularInline):
model = ProductAttribute
extra = 2
class ProductClassAdmin(admin.ModelAdmin):
list_display = ('name', 'requires_shipping', 'track_stock')
inlines = [ProductAttributeInline]
class ProductAdmin(admin.ModelAdmin):
date_hierarchy = 'date_created'
list_display = ('get_title', 'upc', 'get_product_class', 'structure',
'attribute_summary', 'date_created')
list_filter = ['structure', 'is_discountable']
raw_id_fields = ['parent']
inlines = [AttributeInline, CategoryInline, ProductRecommendationInline]
prepopulated_fields = {"slug": ("title",)}
search_fields = ['upc', 'title']
def get_queryset(self, request):
qs = super(ProductAdmin, self).get_queryset(request)
return (
qs
.select_related('product_class', 'parent')
.prefetch_related(
'attribute_values',
'attribute_values__attribute'))
class ProductAttributeAdmin(admin.ModelAdmin):
list_display = ('name', 'code', 'product_class', 'type')
prepopulated_fields = {"code": ("name", )}
class OptionAdmin(admin.ModelAdmin):
pass
class ProductAttributeValueAdmin(admin.ModelAdmin):
list_display = ('product', 'attribute', 'value')
class AttributeOptionInline(admin.TabularInline):
model = AttributeOption
class AttributeOptionGroupAdmin(admin.ModelAdmin):
list_display = ('name', 'option_summary')
inlines = [AttributeOptionInline, ]
class CategoryAdmin(TreeAdmin):
form = movenodeform_factory(Category)
admin.site.register(ProductClass, ProductClassAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(ProductAttribute, ProductAttributeAdmin)
admin.site.register(ProductAttributeValue, ProductAttributeValueAdmin)
admin.site.register(AttributeOptionGroup, AttributeOptionGroupAdmin)
admin.site.register(Option, OptionAdmin)
admin.site.register(ProductImage)
admin.site.register(Category, CategoryAdmin)
admin.site.register(ProductCategory)
|
itbabu/django-oscar
|
src/oscar/apps/catalogue/admin.py
|
Python
|
bsd-3-clause
| 3,190 | 0 |
# regression tree
# input is a dataframe of features
# the corresponding y value(called labels here) is the scores for each document
import pandas as pd
import numpy as np
from multiprocessing import Pool
from itertools import repeat
import scipy
import scipy.optimize
node_id = 0
def get_splitting_points(args):
# given a list
# return a list of possible splitting values
attribute, col = args
attribute.sort()
possible_split = []
for i in range(len(attribute)-1):
if attribute[i] != attribute[i+1]:
possible_split.append(np.mean((attribute[i],attribute[i+1])))
return possible_split, col
# create a dictionary, key is the attribute number, value is whole list of possible splits for that column
def find_best_split_parallel(args):
best_ls = 1000000
best_split = None
best_children = None
split_point, data, label = args
key,possible_split = split_point
for split in possible_split:
children = split_children(data, label, key, split)
#weighted average of left and right ls
ls = len(children[1])*least_square(children[1])/len(label) + len(children[3])*least_square(children[3])/len(label)
if ls < best_ls:
best_ls = ls
best_split = (key, split)
best_children = children
return best_ls, best_split, best_children
def find_best_split(data, label, split_points):
# split_points is a dictionary of possible splitting values
# return the best split
best_ls = 1000000
best_split = None
best_children = None
pool = Pool()
for ls, split, children in pool.map(find_best_split_parallel, zip(split_points.items(), repeat(data), repeat(label))):
if ls < best_ls:
best_ls = ls
best_split = split
best_children = children
pool.close()
return best_split, best_children # return a tuple(attribute, value)
def split_children(data, label, key, split):
left_index = [index for index in xrange(len(data.iloc[:,key])) if data.iloc[index,key] < split]
right_index = [index for index in xrange(len(data.iloc[:,key])) if data.iloc[index,key] >= split]
left_data = data.iloc[left_index,:]
right_data = data.iloc[right_index,:]
left_label = [label[i] for i in left_index]
right_label =[label[i] for i in right_index]
return left_data, left_label, right_data, right_label
def least_square(label):
if not len(label):
return 0
return (np.sum(label)**2)/len(set(label))
def create_leaf(label):
global node_id
node_id += 1
leaf = {'splittng_feature': None,
'left': None,
'right':None,
'is_leaf':True,
'index':node_id}
leaf['value'] = round(np.mean(label),3)
return leaf
def find_splits_parallel(args):
var_space, label, col = args
# var_space = data.iloc[:,col].tolist()
return scipy.optimize.fminbound(error_function, min(var_space), max(var_space), args = (col, var_space, label), full_output = 1)
# return,
# if not min_error or error < min_error:
# min_error = error
# split_var = col
# min_split = split
def create_tree(data, all_pos_split, label, max_depth, ideal_ls, current_depth = 0):
remaining_features = all_pos_split
#stopping conditions
if sum([len(v)!= 0 for v in remaining_features.values()]) == 0:
# If there are no remaining features to consider, make current node a leaf node
return create_leaf(label)
# #Additional stopping condition (limit tree depth)
elif current_depth > max_depth:
return create_leaf(label)
#######
min_error = None
split_var = None
min_split = None
var_spaces = [data.iloc[:,col].tolist() for col in xrange(data.shape[1])]
cols = [col for col in xrange(data.shape[1])]
pool = Pool()
for split, error, ierr, numf in pool.map(find_splits_parallel, zip(var_spaces, repeat(label), cols)):
if not min_error or error < min_error:
min_error = error
split_var = col
min_split = split
pool.close()
splitting_feature = (split_var, min_split)
children = split_children(data, label, split_var, min_split)
left_data, left_label, right_data, right_label = children
if len(left_label) == 0 or len(right_label) == 0:
return create_leaf(label)
left_least_square = least_square(left_label)
# Create a leaf node if the split is "perfect"
if left_least_square < ideal_ls:
return create_leaf(left_label)
if least_square(right_label) < ideal_ls:
return create_leaf(right_label)
# recurse on children
left_tree = create_tree(left_data, remaining_features, left_label, max_depth, ideal_ls, current_depth +1)
right_tree = create_tree(right_data, remaining_features, right_label, max_depth, ideal_ls, current_depth +1)
return {'is_leaf' : False,
'value' : None,
'splitting_feature': splitting_feature,
'left' : left_tree,
'right' : right_tree,
'index' : None}
def error_function(split_point, split_var, data, label):
data1 = []
data2 = []
for i in xrange(len(data)):
temp_dat = data[i]
if temp_dat <= split_point:
data1.append(label[i])
else:
data2.append(label[i])
return least_square(data1) + least_square(data2)
def make_prediction(tree, x, annotate = False):
if tree['is_leaf']:
if annotate:
print "At leaf, predicting %s" % tree['value']
return tree['value']
else:
# the splitting value of x.
split_feature_value = x[tree['splitting_feature'][0]]
if annotate:
print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value)
if split_feature_value < tree['splitting_feature'][1]:
return make_prediction(tree['left'], x, annotate)
else:
return make_prediction(tree['right'], x, annotate)
class RegressionTree:
def __init__(self, training_data, labels, max_depth=5, ideal_ls=100):
self.training_data = training_data
self.labels = labels
self.max_depth = max_depth
self.ideal_ls = ideal_ls
self.tree = None
def fit(self):
global node_id
node_id = 0
all_pos_split = {}
pool = Pool()
splitting_data = [self.training_data.iloc[:,col].tolist() for col in xrange(self.training_data.shape[1])]
cols = [col for col in xrange(self.training_data.shape[1])]
for dat, col in pool.map(get_splitting_points, zip(splitting_data, cols)):
all_pos_split[col] = dat
pool.close()
self.tree = create_tree(self.training_data, all_pos_split, self.labels, self.max_depth, self.ideal_ls)
def predict(self, test):
prediction = np.array([make_prediction(self.tree, x) for x in test])
return prediction
if __name__ == '__main__':
#read in data, label
data = pd.read_excel("mlr06.xls")
test = [[478, 184, 40, 74, 11, 31], [1000,10000,10000,10000,10000,1000,100000]]
label = data['X7']
del data['X7']
model = RegressionTree(data, label)
model.fit()
print model.predict(test)
|
lezzago/LambdaMart
|
RegressionTree.py
|
Python
|
mit
| 6,591 | 0.038082 |
# -*- coding: utf-8 -*-
#
# Monary documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 9 13:39:38 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import monary
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Monary'
copyright = u'2014, David J. C. Beach'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = monary.__version__
# The full version, including alpha/beta/rc tags.
release = monary.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Monarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Monary.tex', u'Monary Documentation',
u'David J. C. Beach', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'monary', u'Monary Documentation',
[u'David J. C. Beach'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Monary', u'Monary Documentation',
u'David J. C. Beach', 'Monary', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
aherlihy/Monary
|
doc/conf.py
|
Python
|
apache-2.0
| 8,410 | 0.006183 |
import logging
from flask_babel import lazy_gettext
from .jsontools import dict_to_json
from .widgets import ChartWidget, DirectChartWidget
from ..baseviews import BaseModelView, expose
from ..models.group import DirectProcessData, GroupByProcessData
from ..security.decorators import has_access
from ..urltools import get_filter_args
from ..widgets import SearchWidget
log = logging.getLogger(__name__)
class BaseChartView(BaseModelView):
"""
This is the base class for all chart views.
Use DirectByChartView or GroupByChartView, override their properties
and their base classes
(BaseView, BaseModelView, BaseChartView) to customise your charts
"""
chart_template = "appbuilder/general/charts/chart.html"
""" The chart template, override to implement your own """
chart_widget = ChartWidget
""" Chart widget override to implement your own """
search_widget = SearchWidget
""" Search widget override to implement your own """
chart_title = "Chart"
""" A title to be displayed on the chart """
title = "Title"
group_by_label = lazy_gettext("Group by")
""" The label that is displayed for the chart selection """
default_view = "chart"
chart_type = "PieChart"
""" The chart type PieChart, ColumnChart, LineChart """
chart_3d = "true"
""" Will display in 3D? """
width = 400
""" The width """
height = "400px"
group_bys = {}
""" New for 0.6.4, on test, don't use yet """
def __init__(self, **kwargs):
self._init_titles()
super(BaseChartView, self).__init__(**kwargs)
def _init_titles(self):
self.title = self.chart_title
def _get_chart_widget(self, filters=None, widgets=None, **args):
raise NotImplementedError
def _get_view_widget(self, **kwargs):
"""
:return:
Returns a widget
"""
return self._get_chart_widget(**kwargs).get("chart")
class GroupByChartView(BaseChartView):
definitions = []
"""
These charts can display multiple series,
based on columns or methods defined on models.
You can display multiple charts on the same view.
This data can be grouped and aggregated has you like.
:label: (optional) String label to display on chart selection.
:group: String with the column name or method from model.
:formatter: (optional) function that formats the output of 'group' key
:series: A list of tuples with the aggregation function and the column name
to apply the aggregation
::
[{
'label': 'String',
'group': '<COLNAME>'|'<FUNCNAME>'
'formatter: <FUNC>
'series': [(<AGGR FUNC>, <COLNAME>|'<FUNCNAME>'),...]
}
]
example::
class CountryGroupByChartView(GroupByChartView):
datamodel = SQLAInterface(CountryStats)
chart_title = 'Statistics'
definitions = [
{
'label': 'Country Stat',
'group': 'country',
'series': [(aggregate_avg, 'unemployed_perc'),
(aggregate_avg, 'population'),
(aggregate_avg, 'college_perc')
]
}
]
"""
chart_type = "ColumnChart"
chart_template = "appbuilder/general/charts/jsonchart.html"
chart_widget = DirectChartWidget
ProcessClass = GroupByProcessData
def __init__(self, **kwargs):
super(GroupByChartView, self).__init__(**kwargs)
for definition in self.definitions:
col = definition.get("group")
# Setup labels
try:
self.label_columns[col] = (
definition.get("label") or self.label_columns[col]
)
except Exception:
self.label_columns[col] = self._prettify_column(col)
if not definition.get("label"):
definition["label"] = self.label_columns[col]
# Setup Series
for serie in definition["series"]:
if isinstance(serie, tuple):
if hasattr(serie[0], "_label"):
key = serie[0].__name__ + serie[1]
self.label_columns[key] = (
serie[0]._label + " " + self._prettify_column(serie[1])
)
else:
self.label_columns[serie] = self._prettify_column(serie)
def get_group_by_class(self, definition):
"""
intantiates the processing class (Direct or Grouped) and returns it.
"""
group_by = definition["group"]
series = definition["series"]
if "formatter" in definition:
formatter = {group_by: definition["formatter"]}
else:
formatter = {}
return self.ProcessClass([group_by], series, formatter)
def _get_chart_widget(
self,
filters=None,
order_column="",
order_direction="",
widgets=None,
direct=None,
height=None,
definition="",
**args
):
height = height or self.height
widgets = widgets or dict()
joined_filters = filters.get_joined_filters(self._base_filters)
# check if order_column may be database ordered
if not self.datamodel.get_order_columns_list([order_column]):
order_column = ""
order_direction = ""
count, lst = self.datamodel.query(
filters=joined_filters,
order_column=order_column,
order_direction=order_direction,
)
if not definition:
definition = self.definitions[0]
group = self.get_group_by_class(definition)
value_columns = group.to_json(
group.apply(lst, sort=order_column == ""), self.label_columns
)
widgets["chart"] = self.chart_widget(
route_base=self.route_base,
chart_title=self.chart_title,
chart_type=self.chart_type,
chart_3d=self.chart_3d,
height=height,
value_columns=value_columns,
modelview_name=self.__class__.__name__,
**args
)
return widgets
@expose("/chart/<group_by>")
@expose("/chart/")
@has_access
def chart(self, group_by=0):
group_by = int(group_by)
form = self.search_form.refresh()
get_filter_args(self._filters)
widgets = self._get_chart_widget(
filters=self._filters,
definition=self.definitions[group_by],
order_column=self.definitions[group_by]["group"],
order_direction="asc",
)
widgets = self._get_search_widget(form=form, widgets=widgets)
self.update_redirect()
return self.render_template(
self.chart_template,
route_base=self.route_base,
title=self.chart_title,
label_columns=self.label_columns,
definitions=self.definitions,
group_by_label=self.group_by_label,
height=self.height,
widgets=widgets,
appbuilder=self.appbuilder,
)
class DirectByChartView(GroupByChartView):
"""
Use this class to display charts with multiple series,
based on columns or methods defined on models.
You can display multiple charts on the same view.
Default routing point is '/chart'
Setup definitions property to configure the chart
:label: (optional) String label to display on chart selection.
:group: String with the column name or method from model.
:formatter: (optional) function that formats the output of 'group' key
:series: A list of tuples with the aggregation function and the column name
to apply the aggregation
The **definitions** property respects the following grammar::
definitions = [
{
'label': 'label for chart definition',
'group': '<COLNAME>'|'<MODEL FUNCNAME>',
'formatter': <FUNC FORMATTER FOR GROUP COL>,
'series': ['<COLNAME>'|'<MODEL FUNCNAME>',...]
}, ...
]
example::
class CountryDirectChartView(DirectByChartView):
datamodel = SQLAInterface(CountryStats)
chart_title = 'Direct Data Example'
definitions = [
{
'label': 'Unemployment',
'group': 'stat_date',
'series': ['unemployed_perc',
'college_perc']
}
]
"""
ProcessClass = DirectProcessData
# -------------------------------------------------------
# DEPRECATED SECTION
# -------------------------------------------------------
class BaseSimpleGroupByChartView(BaseChartView): # pragma: no cover
group_by_columns = []
""" A list of columns to be possibly grouped by, this list must be filled """
def __init__(self, **kwargs):
if not self.group_by_columns:
raise Exception(
"Base Chart View property <group_by_columns> must not be empty"
)
else:
super(BaseSimpleGroupByChartView, self).__init__(**kwargs)
def _get_chart_widget(
self,
filters=None,
order_column="",
order_direction="",
widgets=None,
group_by=None,
height=None,
**args
):
height = height or self.height
widgets = widgets or dict()
group_by = group_by or self.group_by_columns[0]
joined_filters = filters.get_joined_filters(self._base_filters)
value_columns = self.datamodel.query_simple_group(
group_by, filters=joined_filters
)
widgets["chart"] = self.chart_widget(
route_base=self.route_base,
chart_title=self.chart_title,
chart_type=self.chart_type,
chart_3d=self.chart_3d,
height=height,
value_columns=value_columns,
modelview_name=self.__class__.__name__,
**args
)
return widgets
class BaseSimpleDirectChartView(BaseChartView): # pragma: no cover
direct_columns = []
"""
Make chart using the column on the dict
chart_columns = {'chart label 1':('X column','Y1 Column','Y2 Column, ...),
'chart label 2': ('X Column','Y1 Column',...),...}
"""
def __init__(self, **kwargs):
if not self.direct_columns:
raise Exception(
"Base Chart View property <direct_columns> must not be empty"
)
else:
super(BaseSimpleDirectChartView, self).__init__(**kwargs)
def get_group_by_columns(self):
"""
returns the keys from direct_columns
Used in template, so that user can choose from options
"""
return list(self.direct_columns.keys())
def _get_chart_widget(
self,
filters=None,
order_column="",
order_direction="",
widgets=None,
direct=None,
height=None,
**args
):
height = height or self.height
widgets = widgets or dict()
joined_filters = filters.get_joined_filters(self._base_filters)
count, lst = self.datamodel.query(
filters=joined_filters,
order_column=order_column,
order_direction=order_direction,
)
value_columns = self.datamodel.get_values(lst, list(direct))
value_columns = dict_to_json(
direct[0], direct[1:], self.label_columns, value_columns
)
widgets["chart"] = self.chart_widget(
route_base=self.route_base,
chart_title=self.chart_title,
chart_type=self.chart_type,
chart_3d=self.chart_3d,
height=height,
value_columns=value_columns,
modelview_name=self.__class__.__name__,
**args
)
return widgets
class ChartView(BaseSimpleGroupByChartView): # pragma: no cover
"""
**DEPRECATED**
Provides a simple (and hopefully nice) way to draw charts on your application.
This will show Google Charts based on group by of your tables.
"""
@expose("/chart/<group_by>")
@expose("/chart/")
@has_access
def chart(self, group_by=""):
form = self.search_form.refresh()
get_filter_args(self._filters)
group_by = group_by or self.group_by_columns[0]
widgets = self._get_chart_widget(filters=self._filters, group_by=group_by)
widgets = self._get_search_widget(form=form, widgets=widgets)
return self.render_template(
self.chart_template,
route_base=self.route_base,
title=self.chart_title,
label_columns=self.label_columns,
group_by_columns=self.group_by_columns,
group_by_label=self.group_by_label,
height=self.height,
widgets=widgets,
appbuilder=self.appbuilder,
)
class TimeChartView(BaseSimpleGroupByChartView): # pragma: no cover
"""
**DEPRECATED**
Provides a simple way to draw some time charts on your application.
This will show Google Charts based on count and group
by month and year for your tables.
"""
chart_template = "appbuilder/general/charts/chart_time.html"
chart_type = "ColumnChart"
def _get_chart_widget(
self,
filters=None,
order_column="",
order_direction="",
widgets=None,
group_by=None,
period=None,
height=None,
**args
):
height = height or self.height
widgets = widgets or dict()
group_by = group_by or self.group_by_columns[0]
joined_filters = filters.get_joined_filters(self._base_filters)
if period == "month" or not period:
value_columns = self.datamodel.query_month_group(
group_by, filters=joined_filters
)
elif period == "year":
value_columns = self.datamodel.query_year_group(
group_by, filters=joined_filters
)
widgets["chart"] = self.chart_widget(
route_base=self.route_base,
chart_title=self.chart_title,
chart_type=self.chart_type,
chart_3d=self.chart_3d,
height=height,
value_columns=value_columns,
modelview_name=self.__class__.__name__,
**args
)
return widgets
@expose("/chart/<group_by>/<period>")
@expose("/chart/")
@has_access
def chart(self, group_by="", period=""):
form = self.search_form.refresh()
get_filter_args(self._filters)
group_by = group_by or self.group_by_columns[0]
widgets = self._get_chart_widget(
filters=self._filters, group_by=group_by, period=period, height=self.height
)
widgets = self._get_search_widget(form=form, widgets=widgets)
return self.render_template(
self.chart_template,
route_base=self.route_base,
title=self.chart_title,
label_columns=self.label_columns,
group_by_columns=self.group_by_columns,
group_by_label=self.group_by_label,
widgets=widgets,
appbuilder=self.appbuilder,
)
class DirectChartView(BaseSimpleDirectChartView): # pragma: no cover
"""
**DEPRECATED**
This class is responsible for displaying a Google chart with
direct model values. Chart widget uses json.
No group by is processed, example::
class StatsChartView(DirectChartView):
datamodel = SQLAInterface(Stats)
chart_title = lazy_gettext('Statistics')
direct_columns = {'Some Stats': ('X_col_1', 'stat_col_1', 'stat_col_2'),
'Other Stats': ('X_col2', 'stat_col_3')}
"""
chart_type = "ColumnChart"
chart_widget = DirectChartWidget
@expose("/chart/<group_by>")
@expose("/chart/")
@has_access
def chart(self, group_by=""):
form = self.search_form.refresh()
get_filter_args(self._filters)
direct_key = group_by or list(self.direct_columns.keys())[0]
direct = self.direct_columns.get(direct_key)
if self.base_order:
order_column, order_direction = self.base_order
else:
order_column, order_direction = "", ""
widgets = self._get_chart_widget(
filters=self._filters,
order_column=order_column,
order_direction=order_direction,
direct=direct,
)
widgets = self._get_search_widget(form=form, widgets=widgets)
return self.render_template(
self.chart_template,
route_base=self.route_base,
title=self.chart_title,
label_columns=self.label_columns,
group_by_columns=self.get_group_by_columns(),
group_by_label=self.group_by_label,
height=self.height,
widgets=widgets,
appbuilder=self.appbuilder,
)
|
dpgaspar/Flask-AppBuilder
|
flask_appbuilder/charts/views.py
|
Python
|
bsd-3-clause
| 17,665 | 0.000566 |
#!/usr/bin/env python
'''
Pymodbus Asynchronous Client Examples
--------------------------------------------------------------------------
The following is an example of how to use the asynchronous modbus
client implementation from pymodbus.
'''
#---------------------------------------------------------------------------#
# import needed libraries
#---------------------------------------------------------------------------#
from twisted.internet import reactor, protocol
from pymodbus.constants import Defaults
#---------------------------------------------------------------------------#
# choose the requested modbus protocol
#---------------------------------------------------------------------------#
from pymodbus.client.async import ModbusClientProtocol
#from pymodbus.client.async import ModbusUdpClientProtocol
#---------------------------------------------------------------------------#
# configure the client logging
#---------------------------------------------------------------------------#
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
#---------------------------------------------------------------------------#
# helper method to test deferred callbacks
#---------------------------------------------------------------------------#
def dassert(deferred, callback):
def _assertor(value): assert(value)
deferred.addCallback(lambda r: _assertor(callback(r)))
deferred.addErrback(lambda _: _assertor(False))
#---------------------------------------------------------------------------#
# specify slave to query
#---------------------------------------------------------------------------#
# The slave to query is specified in an optional parameter for each
# individual request. This can be done by specifying the `unit` parameter
# which defaults to `0x00`
#---------------------------------------------------------------------------#
def exampleRequests(client):
rr = client.read_coils(1, 1, unit=0x02)
#---------------------------------------------------------------------------#
# example requests
#---------------------------------------------------------------------------#
# simply call the methods that you would like to use. An example session
# is displayed below along with some assert checks. Note that unlike the
# synchronous version of the client, the asynchronous version returns
# deferreds which can be thought of as a handle to the callback to send
# the result of the operation. We are handling the result using the
# deferred assert helper(dassert).
#---------------------------------------------------------------------------#
def beginAsynchronousTest(client):
rq = client.write_coil(1, True)
rr = client.read_coils(1,1)
dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error
dassert(rr, lambda r: r.bits[0] == True) # test the expected value
rq = client.write_coils(1, [True]*8)
rr = client.read_coils(1,8)
dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error
dassert(rr, lambda r: r.bits == [True]*8) # test the expected value
rq = client.write_coils(1, [False]*8)
rr = client.read_discrete_inputs(1,8)
dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error
dassert(rr, lambda r: r.bits == [True]*8) # test the expected value
rq = client.write_register(1, 10)
rr = client.read_holding_registers(1,1)
dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error
dassert(rr, lambda r: r.registers[0] == 10) # test the expected value
rq = client.write_registers(1, [10]*8)
rr = client.read_input_registers(1,8)
dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error
dassert(rr, lambda r: r.registers == [17]*8) # test the expected value
arguments = {
'read_address': 1,
'read_count': 8,
'write_address': 1,
'write_registers': [20]*8,
}
rq = client.readwrite_registers(**arguments)
rr = client.read_input_registers(1,8)
dassert(rq, lambda r: r.registers == [20]*8) # test the expected value
dassert(rr, lambda r: r.registers == [17]*8) # test the expected value
#-----------------------------------------------------------------------#
# close the client at some time later
#-----------------------------------------------------------------------#
reactor.callLater(1, client.transport.loseConnection)
reactor.callLater(2, reactor.stop)
#---------------------------------------------------------------------------#
# extra requests
#---------------------------------------------------------------------------#
# If you are performing a request that is not available in the client
# mixin, you have to perform the request like this instead::
#
# from pymodbus.diag_message import ClearCountersRequest
# from pymodbus.diag_message import ClearCountersResponse
#
# request = ClearCountersRequest()
# response = client.execute(request)
# if isinstance(response, ClearCountersResponse):
# ... do something with the response
#
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# choose the client you want
#---------------------------------------------------------------------------#
# make sure to start an implementation to hit against. For this
# you can use an existing device, the reference implementation in the tools
# directory, or start a pymodbus server.
#---------------------------------------------------------------------------#
defer = protocol.ClientCreator(reactor, ModbusClientProtocol
).connectTCP("localhost", Defaults.Port)
defer.addCallback(beginAsynchronousTest)
reactor.run()
|
mjfarmer/scada_py
|
pymodbus/examples/common/asynchronous-client.py
|
Python
|
gpl-3.0
| 5,916 | 0.011663 |
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py310
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": {
"title": "Q",
"type": "array",
"items": {"type": "string"},
},
"name": "q",
"in": "query",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
@pytest.fixture(name="client")
def get_client():
from docs_src.query_params_str_validations.tutorial011_py310 import app
client = TestClient(app)
return client
@needs_py310
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
@needs_py310
def test_multi_query_values(client: TestClient):
url = "/items/?q=foo&q=bar"
response = client.get(url)
assert response.status_code == 200, response.text
assert response.json() == {"q": ["foo", "bar"]}
@needs_py310
def test_query_no_values(client: TestClient):
url = "/items/"
response = client.get(url)
assert response.status_code == 200, response.text
assert response.json() == {"q": None}
|
tiangolo/fastapi
|
tests/test_tutorial/test_query_params_str_validations/test_tutorial011_py310.py
|
Python
|
mit
| 3,293 | 0.000607 |
from __future__ import division
"""
instek_pst.py
part of the CsPyController package for AQuA experiment control by Martin Lichtman
Handles sending commands to Instek PST power supplies over RS232.
created = 2015.07.09
modified >= 2015.07.09
"""
__author__ = 'Martin Lichtman'
import logging
logger = logging.getLogger(__name__)
from atom.api import Bool, Str, Member, Int
from instrument_property import Prop, IntProp, ListProp, FloatProp
from cs_instruments import Instrument
from cs_errors import PauseError
from ctypes import *
class Vaunix(Prop):
isInitialized = Bool(False)
ID = Int()
va = Member()
model = Str()
serial = Int()
frequency = Member()
power = Member()
pulsewidth = Member()
pulserep = Member()
pulseenable = Bool()
startfreq = Member()
endfreq = Member()
sweeptime = Member()
sweepmode = Bool()
sweeptype = Bool()
sweepenable = Bool()
sweepdir = Bool()
internalref = Bool()
useexternalmod = Bool()
rfonoff = Bool()
maxPower = Int()
minPower = Int()
minFreq = Int()
maxFreq = Int()
def __init__(self, name, experiment, description=''):
super(Vaunix, self).__init__(name, experiment, description)
self.frequency = FloatProp('Frequency', experiment, 'Frequency (MHz)', '0')
self.power = FloatProp('Power', experiment, 'Power (dBm)', '0')
self.pulsewidth = FloatProp('PulseWidth', experiment, 'Pulse Width (us)', '0')
self.pulserep = FloatProp('PulseRep', experiment, 'Pulse Rep Time (us)', '0')
self.startfreq = FloatProp('StartFreq', experiment, 'Start Frequency (MHz)', '0')
self.endfreq = FloatProp('EndFreq', experiment, 'End Frequency (MHz)', '0')
self.sweeptime = IntProp('SweepTime', experiment, 'Sweep Time (ms)', '0')
self.properties += ['ID', 'model', 'serial', 'frequency','power','pulsewidth','pulserep','pulseenable','startfreq','endfreq','sweeptime',
'sweepmode', 'sweeptype', 'sweepdir', 'sweepenable', 'internalref', 'useexternalmod', 'rfonoff', 'maxPower']
def initialize(self,va):
self.va = va
errcode = self.va.fnLMS_InitDevice(self.ID)
if (errcode !=0):
errcodereset = self.va.fnLMS_CloseDevice(self.ID)
if (errcodereset != 0): #if device fails to initialize, it may be because it was not closed previously. Try closing and reinitializing it.
logger.error("Failed to initialize Vaunix device {}. Error code {}.".format(self.ID,errcode))
raise PauseError
errcode = self.va.fnLMS_InitDevice(self.ID)
if (errcode != 0):
logger.error("Failed to initialize Vaunix device {}. Error code {}.".format(self.ID,errcode))
raise PauseError
self.maxPower = int(self.va.fnLMS_GetMaxPwr(self.ID)/4)
self.minPower = int(self.va.fnLMS_GetMinPwr(self.ID)/4)
self.minFreq = int(self.va.fnLMS_GetMinFreq(self.ID))
self.maxFreq = int(self.va.fnLMS_GetMaxFreq(self.ID))
return
def freq_unit(self,val):
return int(val*100000)
def power_unit(self,value):
return int((self.maxPower - value)*4)
def power_sanity_check(self,value):
if (value < self.minPower or value > self.maxPower):
logger.error("Vaunix device {} power ({} dBm) outside min/max range: {} dBm, {} dBm.".format(self.ID,value,self.minPower,self.maxPower))
raise PauseError
return
def freq_sanity_check(self,value):
if (value < self.minFreq or value > self.maxFreq):
logger.error("Vaunix device {} frequency ({} x10 Hz) outside min/max range: {} x10 Hz, {} x10 Hz.".format(self.ID,value,self.minFreq,self.maxFreq))
raise PauseError
return
def update(self):
if (self.rfonoff):
self.freq_sanity_check(self.freq_unit(self.frequency.value))
self.va.fnLMS_SetFrequency(self.ID, self.freq_unit(self.frequency.value))
self.power_sanity_check(self.power.value)
self.va.fnLMS_SetPowerLevel(self.ID, self.power_unit(self.power.value))
if (self.sweepenable):
self.freq_sanity_check(self.freq_unit(self.startfreq.value))
self.va.fnLMS_SetStartFrequency(self.ID, self.freq_unit(self.startfreq.value))
self.freq_sanity_check(self.freq_unit(self.endfreq.value))
self.va.fnLMS_SetEndFrequency(self.ID, self.freq_unit(self.endfreq.value))
self.va.fnLMS_SetSweepTime(self.ID, self.sweeptime.value)
self.va.fnLMS_SetSweepDirection(self.ID, self.sweepdir)
self.va.fnLMS_SetSweepMode(self.ID, self.sweepmode) #True: Repeat Sweep, False: Sweep Once
self.va.fnLMS_SetSweepType(self.ID, self.sweeptype) #True: Bidirectional Sweep, False: Unidirectional Sweep
self.va.fnLMS_StartSweep(self.ID, self.sweepenable)
self.va.fnLMS_SetFastPulsedOutput(self.ID, c_float(self.pulsewidth.value*1e-6), c_float(self.pulserep.value*1e-6), self.pulseenable)
self.va.fnLMS_SetUseExternalPulseMod(self.ID, self.useexternalmod)
self.va.fnLMS_SetUseInternalRef(self.ID, self.internalref) #True: internal ref, False: external ref
self.va.fnLMS_SaveSettings(self.ID)
self.va.fnLMS_SetRFOn(self.ID, self.rfonoff)
self.getparams()
return
def getparams(self):
logger.info("Parameters for Vaunix # {}".format(self.ID))
logger.info("Frequency: {} MHz".format(
self.va.fnLMS_GetFrequency(self.ID)/100000))
logger.info("Power Level: {} dBm".format(
self.va.fnLMS_GetPowerLevel(self.ID)/4))
class Vaunixs(Instrument):
version = '2015.11.19'
motors = Member()
isInitialized = Bool(False)
va = Member()
testMode = Bool(False) #Test mode: Set to False for actual use.
def __init__(self, name, experiment, description=''):
super(Vaunixs, self).__init__(name, experiment, description)
self.motors = ListProp('motors', experiment, 'A list of individual Vaunix signal generators', listElementType=Vaunix,
listElementName='Vaunix')
self.properties += ['version', 'motors']
num = self.initialize()
self.motors.length = num
self.motors.refreshGUI()
#Initialize: loads and initializes DLL
def initialize(self):
num = 0
if self.enable:
CDLL_file = "./vaunix/VNX_fmsynth.dll"
self.va = CDLL(CDLL_file)
if (self.testMode):
logger.warning("Warning: Vaunix in test mode. Set testMode=False in vaunix.py to turn off test mode.")
self.va.fnLMS_SetTestMode(self.testMode) #Test mode... this needs to be set False for actual run. Do not remove this command (default setting is True).
self.isInitialized = True
num = self.detect_generators()
return num
def preExperiment(self, hdf5):
if self.enable:
if (not self.isInitialized):
self.initialize()
for i in self.motors:
#initialize serial connection to each power supply
i.initialize(self.va)
self.isInitialized = True
def preIteration(self, iterationresults, hdf5):
"""
Every iteration, send the motors updated positions.
"""
if self.enable:
msg = ''
try:
for i in self.motors:
i.update()
except Exception as e:
logger.error('Problem updating Vaunix:\n{}\n{}\n'.format(msg, e))
self.isInitialized = False
raise PauseError
def postMeasurement(self, measurementresults, iterationresults, hdf5):
return
def postIteration(self, iterationresults, hdf5):
return
def postExperiment(self, hdf5):
return
def finalize(self,hdf5):
return
#detect_generators: Calls DLL function to check for number of generators and their IDs.
def detect_generators(self):
if (not self.isInitialized): #test if DLL is already loaded. If not, load it.
self.initialize()
num=self.va.fnLMS_GetNumDevices() #ask DLL for the number of connected devices
logger.debug("Number of vaunix devices detected: {}".format(num))
while (num>len(self.motors)): #if num connected devices > number in array, add elements.
self.motors.add()
while (num<len(self.motors)): #if <, subtract elements.
self.motors.pop(self.motors.length-1)
self.motors.length -= 1
devinfotype = c_uint*num
devinfo = devinfotype()
self.va.fnLMS_GetDevInfo(addressof(devinfo)) #get device IDs
for mn, i in enumerate(self.motors):
i.ID = int(devinfo[mn]) #copy device IDs to ID variable
modnumtype = c_char*100
modnum = modnumtype()
self.va.fnLMS_GetModelNameA(i.ID,addressof(modnum)) #get device model names
i.model = modnum.value
serial = c_int()
serial = self.va.fnLMS_GetSerialNumber(i.ID) #get device serial numbers
i.serial = serial
return num
|
QuantumQuadrate/CsPyController
|
python/vaunix.py
|
Python
|
lgpl-3.0
| 9,955 | 0.012858 |
# -*- coding: utf-8 -*-
import os
import re
import select
import socket
import struct
import time
from module.plugins.internal.Hoster import Hoster
from module.plugins.internal.misc import exists, fsjoin
class XDCC(Hoster):
__name__ = "XDCC"
__type__ = "hoster"
__version__ = "0.42"
__status__ = "testing"
__pattern__ = r'xdcc://(?P<SERVER>.*?)/#?(?P<CHAN>.*?)/(?P<BOT>.*?)/#?(?P<PACK>\d+)/?'
__config__ = [("nick", "str", "Nickname", "pyload" ),
("ident", "str", "Ident", "pyloadident" ),
("realname", "str", "Realname", "pyloadreal" ),
("ctcp_version", "str","CTCP version string", "pyLoad! IRC Interface")]
__description__ = """Download from IRC XDCC bot"""
__license__ = "GPLv3"
__authors__ = [("jeix", "jeix@hasnomail.com" ),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
def setup(self):
self.timeout = 30
self.multiDL = False
def process(self, pyfile):
#: Change request type
self.req = self.pyload.requestFactory.getRequest(self.classname, type="XDCC")
for _i in xrange(0, 3):
try:
nmn = self.do_download(pyfile.url)
self.log_info("Download of %s finished." % nmn)
return
except socket.error, e:
if hasattr(e, "errno") and e.errno is not None:
err_no = e.errno
if err_no in (10054, 10061):
self.log_warning("Server blocked our ip, retry in 5 min")
self.wait(300)
continue
else:
self.log_error(_("Failed due to socket errors. Code: %s") % err_no)
self.fail(_("Failed due to socket errors. Code: %s") % err_no)
else:
err_msg = e.args[0]
self.log_error(_("Failed due to socket errors: '%s'") % err_msg)
self.fail(_("Failed due to socket errors: '%s'") % err_msg)
self.log_error(_("Server blocked our ip, retry again later manually"))
self.fail(_("Server blocked our ip, retry again later manually"))
def do_download(self, url):
self.pyfile.setStatus("waiting")
server, chan, bot, pack = re.match(self.__pattern__, url).groups()
nick = self.config.get('nick')
ident = self.config.get('ident')
realname = self.config.get('realname')
ctcp_version = self.config.get('ctcp_version')
temp = server.split(':')
ln = len(temp)
if ln == 2:
host, port = temp
elif ln == 1:
host, port = temp[0], 6667
else:
self.fail(_("Invalid hostname for IRC Server: %s") % server)
#######################
#: CONNECT TO IRC AND IDLE FOR REAL LINK
dl_time = time.time()
sock = socket.socket()
self.log_info(_("Connecting to: %s:%s") % (host, port))
sock.connect((host, int(port)))
if nick == "pyload":
nick = "pyload-%d" % (time.time() % 1000) #: last 3 digits
sock.send("NICK %s\r\n" % nick)
sock.send("USER %s %s bla :%s\r\n" % (ident, host, realname))
self.log_info(_("Connect success."))
self.wait(5) # Wait for logon to complete
sock.send("JOIN #%s\r\n" % chan)
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
#: IRC recv loop
readbuffer = ""
retry = None
m = None
while m is None:
if retry:
if time.time() > retry:
retry = None
dl_time = time.time()
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
else:
if (dl_time + self.timeout) < time.time(): #@TODO: add in config
sock.send("QUIT :byebye\r\n")
sock.close()
self.log_error(_("XDCC Bot did not answer"))
self.fail(_("XDCC Bot did not answer"))
fdset = select.select([sock], [], [], 0)
if sock not in fdset[0]:
continue
readbuffer += sock.recv(1024)
lines = readbuffer.split("\n")
readbuffer = lines.pop()
for line in lines:
# if self.pyload.debug:
# self.log_debug("*> " + decode(line))
line = line.rstrip()
first = line.split()
if first[0] == "PING":
sock.send("PONG %s\r\n" % first[1])
if first[0] == "ERROR":
self.fail(_("IRC-Error: %s") % line)
msg = line.split(None, 3)
if len(msg) != 4:
continue
msg = {'origin': msg[0][1:],
'action': msg[1],
'target': msg[2],
'text' : msg[3][1:]}
if msg['target'][0:len(nick)] == nick and msg['action'] == "PRIVMSG":
if msg['text'] == "\x01VERSION\x01":
self.log_debug(_("Sending CTCP VERSION"))
sock.send("NOTICE %s :%s\r\n" % (msg['origin'], ctcp_version))
elif msg['text'] == "\x01TIME\x01":
self.log_debug(_("Sending CTCP TIME"))
sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
elif msg['text'] == "\x01LAG\x01":
pass #: don't know how to answer
if msg['origin'][0:len(bot)] != bot\
or msg['target'][0:len(nick)] != nick\
or msg['action'] not in ("PRIVMSG", "NOTICE"):
continue
self.log_debug(_("PrivMsg: <%s> - %s" % (msg['origin'], msg['text'])))
if "You already requested that pack" in msg['text']:
retry = time.time() + 300
elif "you must be on a known channel to request a pack" in msg['text']:
self.log_error(_("Invalid channel"))
self.fail(_("Invalid channel"))
m = re.match('\x01DCC SEND (?P<NAME>.*?) (?P<IP>\d+) (?P<PORT>\d+)(?: (?P<SIZE>\d+))?\x01', msg['text'])
#: Get connection data
ip = socket.inet_ntoa(struct.pack('!I', int(m.group('IP'))))
port = int(m.group('PORT'))
file_name = m.group('NAME')
if m.group('SIZE'):
self.req.filesize = long(m.group('SIZE'))
self.pyfile.name = file_name
dl_folder = fsjoin(self.pyload.config.get('general', 'download_folder'),
self.pyfile.package().folder if self.pyload.config.get("general",
"folder_per_package") else "")
dl_file = fsjoin(dl_folder, file_name)
if not exists(dl_folder):
os.makedirs(dl_folder)
self.set_permissions(dl_folder)
self.log_info(_("Downloading %s from %s:%d") % (file_name, ip, port))
self.pyfile.setStatus("downloading")
newname = self.req.download(ip, port, dl_file, sock, self.pyfile.setProgress)
if newname and newname != dl_file:
self.log_info(_("%(name)s saved as %(newname)s") % {'name': self.pyfile.name, 'newname': newname})
dl_file = newname
#: kill IRC socket
#: sock.send("QUIT :byebye\r\n")
sock.close()
self.last_download = dl_file
return self.last_download
|
kaarl/pyload
|
module/plugins/hoster/XDCC.py
|
Python
|
gpl-3.0
| 7,856 | 0.006237 |
import os
import argparse
import tensorflow as tf
import numpy as np
import sys
sys.path.append('../')
from reader import flickr8k_raw_data
def make_example(image_feature, caption_feature, id):
# The object we return
ex = tf.train.SequenceExample()
# A non-sequential feature of our example
sequence_length = len(caption_feature)
for f in image_feature:
ex.context.feature["image_feature"].float_list.value.append(float(f))
ex.context.feature["id"].bytes_list.value.append(id)
fl_tokens = ex.feature_lists.feature_list["caption_feature"]
for token in caption_feature:
fl_tokens.feature.add().int64_list.value.append(token)
return ex
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument('cnn_feats_path', help='a numpy.mmap expected')
parser.add_argument(
'caption_tokens_dir',
help='Directory containing train, test and dev captions.')
args = parser.parse_args()
return args
def _strip_name(paths):
return [(os.path.basename(p), i) for i, p in enumerate(paths)]
def main():
args = arguments()
# read the mmap file containing CNN features
feats_fname = os.path.splitext(os.path.basename(args.cnn_feats_path))[0]
img_name_list_path = os.path.join(
os.path.dirname(args.cnn_feats_path),
'{}_list.txt'.format(
'_'.join(feats_fname.split('_')[:-3])))
feats_shape = tuple([int(i) for i in feats_fname.split('_')[-1].split('X')])
feats_mmap = np.memmap(args.cnn_feats_path, mode='r', # read-only
shape=feats_shape, dtype=np.float32)
img_to_idx = {}
with open(img_name_list_path, 'r') as fp:
img_to_idx = dict(_strip_name(fp.read().split('\n')))
# load all the captions
train_caps, test_caps, dev_caps, vocab = flickr8k_raw_data(
args.caption_tokens_dir)
rand_idx = np.arange(0, len(train_caps['names']))
rng = np.random.RandomState(seed=1234)
rng.shuffle(rand_idx)
# dump the captions generated for debugging purpose
with open(os.path.join(args.caption_tokens_dir, 'dump.txt'), 'w') as fp:
from pprint import pformat
fp.write("\n###### vocab######\n")
fp.write(pformat(vocab))
fp.write("\n###### train ######\n")
rand_train_caps = {
'names': [train_caps['names'][i] for i in rand_idx],
'word_to_ids': [train_caps['word_to_ids'][i] for i in rand_idx],
}
fp.write(pformat([(n, w) for n, w in zip(
rand_train_caps['names'], rand_train_caps['word_to_ids'])]))
fp.write("\n###### test ######\n")
fp.write(pformat([(n, w) for n, w in zip(
test_caps['names'], test_caps['word_to_ids'])]))
fp.write("\n###### dev ######\n")
fp.write(pformat([(n, w) for n, w in zip(
dev_caps['names'], dev_caps['word_to_ids'])]))
# process train imgs and write to a record file
train_tfrecord_name = os.path.join(
args.caption_tokens_dir, '{}.train.tfrecord'.format(
'_'.join(feats_fname.split('_')[:-3])))
train_writer = tf.python_io.TFRecordWriter(train_tfrecord_name)
# for i, (img_name, cap_ids) in enumerate(
# zip(train_caps['names'], train_caps['word_to_ids'])):
for i, (idx) in enumerate(rand_idx):
img_name = train_caps['names'][idx].split('#')[0]
cap_ids = train_caps['word_to_ids'][idx]
img_feat = feats_mmap[img_to_idx[img_name], :]
train_writer.write(
make_example(img_feat, cap_ids, img_name).SerializeToString())
if i % 100 == 0:
print "train records written {}/{}".format(
i, len(train_caps['names']))
train_writer.close()
# process test imgs and write to a record file
test_tfrecord_name = os.path.join(
args.caption_tokens_dir, '{}.test.tfrecord'.format(
'_'.join(feats_fname.split('_')[:-3])))
test_writer = tf.python_io.TFRecordWriter(test_tfrecord_name)
for i, (img_name, cap_ids) in enumerate(
zip(test_caps['names'], test_caps['word_to_ids'])):
img_name = img_name.split('#')[0]
img_feat = feats_mmap[img_to_idx[img_name], :]
test_writer.write(
make_example(img_feat, cap_ids, img_name).SerializeToString())
if i % 100 == 0:
print "test records written {}/{}".format(
i, len(test_caps['names']))
test_writer.close()
# process dev imgs and write to a record file
dev_tfrecord_name = os.path.join(
args.caption_tokens_dir, '{}.dev.tfrecord'.format(
'_'.join(feats_fname.split('_')[:-3])))
dev_writer = tf.python_io.TFRecordWriter(dev_tfrecord_name)
for i, (img_name, cap_ids) in enumerate(
zip(dev_caps['names'], dev_caps['word_to_ids'])):
img_name = img_name.split('#')[0]
img_feat = feats_mmap[img_to_idx[img_name], :]
dev_writer.write(
make_example(img_feat, cap_ids, img_name).SerializeToString())
if i % 100 == 0:
print "dev records written {}/{}".format(
i, len(dev_caps['names']))
dev_writer.close()
print "Wrote to %s" % train_tfrecord_name
print "Wrote to %s" % dev_tfrecord_name
print "Wrote to %s" % test_tfrecord_name
if __name__ == '__main__':
main()
|
chintak/image-captioning
|
scripts/tfrecord_writer.py
|
Python
|
mit
| 5,331 | 0.002626 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.