text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- mode: python; coding: utf-8; -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class KeyValueSerializer(object):
def __init__(self, event, inline=()):
self.event = event
self.inline = inline
def format_string(self):
return ' '.join(
'{}=%s'.format(k)
if k not in self.inline
else '{}="{}"'.format(k, str(v).encode('unicode_escape').decode())
for k, v in self.event.items()
)
def arguments(self):
return [v for k, v in self.event.items(omit=self.inline)]
|
oxyum/python-tlogger
|
tlogger/serializers.py
|
Python
|
mit
| 668 | 0 |
import shopify
from test.test_helper import TestCase
class EventTest(TestCase):
def test_prefix_uses_resource(self):
prefix = shopify.Event._prefix(options={"resource": "orders", "resource_id": 42})
self.assertEqual("https://this-is-my-test-show.myshopify.com/admin/api/unstable/orders/42", prefix)
def test_prefix_doesnt_need_resource(self):
prefix = shopify.Event._prefix()
self.assertEqual("https://this-is-my-test-show.myshopify.com/admin/api/unstable", prefix)
|
Shopify/shopify_python_api
|
test/event_test.py
|
Python
|
mit
| 509 | 0.005894 |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.chat.TTSCWhiteListTerminal
from otp.speedchat.SCTerminal import SCTerminal
from otp.otpbase.OTPLocalizer import SpeedChatStaticText
SCStaticTextMsgEvent = 'SCStaticTextMsg'
class TTSCWhiteListTerminal(SCTerminal):
def __init__(self, textId, parentMenu = None):
SCTerminal.__init__(self)
self.parentClass = parentMenu
self.textId = textId
self.text = SpeedChatStaticText[self.textId]
print 'SpeedText %s %s' % (self.textId, self.text)
def handleSelect(self):
SCTerminal.handleSelect(self)
if not self.parentClass.whisperAvatarId:
base.localAvatar.chatMgr.fsm.request('whiteListOpenChat')
else:
base.localAvatar.chatMgr.fsm.request('whiteListAvatarChat', [self.parentClass.whisperAvatarId])
|
DedMemez/ODS-August-2017
|
chat/TTSCWhiteListTerminal.py
|
Python
|
apache-2.0
| 874 | 0.006865 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_rebel_brigadier_general_sullustan_male.iff"
result.attribute_template_id = 9
result.stfName("npc_name","sullustan_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_rebel_brigadier_general_sullustan_male.py
|
Python
|
mit
| 475 | 0.046316 |
"""
9012 : 괄호
URL : https://www.acmicpc.net/problem/9012
Input :
6
(())())
(((()())()
(()())((()))
((()()(()))(((())))()
()()()()(()()())()
(()((())()(
Output :
NO
NO
YES
NO
YES
NO
"""
N = int(input())
for _ in range(N):
ps = input()
if ps[-1] is '(':
print("NO")
else:
count = 0
for c in ps:
if c is '(':
count += 1
elif c is ')':
count -= 1
if count < 0:
break
if count is 0:
print("YES")
else:
print("NO")
|
0x1306e6d/Baekjoon
|
baekjoon/9012.py
|
Python
|
gpl-2.0
| 694 | 0 |
import configparser
parser = configparser.SafeConfigParser()
parser.add_section('bug_tracker')
parser.set('bug_tracker', 'url', 'http://localhost:8080/bugs')
parser.set('bug_tracker', 'username', 'dhellmann')
parser.set('bug_tracker', 'password', 'secret')
for section in parser.sections():
print(section)
for name, value in parser.items(section):
print(' {} = {!r}'.format(name, value))
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_application_building_blocks/configparser_populate.py
|
Python
|
apache-2.0
| 408 | 0 |
import unittest
from .. import views
class TestViews(unittest.TestCase):
def setUp(self):
pass
def test_nothing(self):
views
|
meltmedia/bouncer
|
bouncer/tests/test_views.py
|
Python
|
mit
| 153 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from intelmq.lib import utils
from intelmq.lib.bot import Bot
from intelmq.lib.message import Event
class MalwareGroupIPsParserBot(Bot):
def process(self):
report = self.receive_message()
if not report:
self.acknowledge_message()
return
if not report.contains("raw"):
self.acknowledge_message()
raw_report = utils.base64_decode(report.value("raw"))
raw_report = raw_report.split("<tbody>")[1]
raw_report = raw_report.split("</tbody>")[0]
raw_report_splitted = raw_report.split("<tr>")
for row in raw_report_splitted:
row = row.strip()
if row == "":
continue
row_splitted = row.split("<td>")
ip = row_splitted[1].split('">')[1].split("<")[0].strip()
time_source = row_splitted[6].replace("</td></tr>", "").strip()
time_source = time_source + " 00:00:00 UTC"
event = Event(report)
event.add('time.source', time_source, sanitize=True)
event.add('classification.type', u'malware')
event.add('source.ip', ip, sanitize=True)
event.add('raw', row, sanitize=True)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = MalwareGroupIPsParserBot(sys.argv[1])
bot.start()
|
sch3m4/intelmq
|
intelmq/bots/parsers/malwaregroup/parser_ips.py
|
Python
|
agpl-3.0
| 1,460 | 0.000685 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import time
from openerp.report import report_sxw
from openerp import pooler
class doctor_disability(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(doctor_disability, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'select_type': self.select_type,
'select_age': self.select_age,
'select_diseases': self.select_diseases,
'select_diseases_type': self.select_diseases_type,
'return_street_home': self.return_street_home,
'return_number_phone': self.return_number_phone,
'return_sex': self.return_sex
})
def return_street_home(self, country, state, city):
street = ""
if country:
street += country.title() + " - "
if state:
street += state.title() + " - "
if city:
street += city.title() + " - "
return street[:len(street) -2]
def return_number_phone(self, phone, mobile):
return_phone = ""
if phone:
return_phone += phone + " - "
if mobile:
return_phone += mobile + " - "
return return_phone[:len(return_phone)-2]
def return_sex(self, sex):
if sex == 'm':
return "Masculino"
return "Femenino"
def select_type(self, tipo_usuario):
if tipo_usuario:
tipo = self.pool.get('doctor.tipousuario.regimen').browse(self.cr, self.uid, tipo_usuario).name
else:
tipo= None
return tipo
def select_age(self, age):
context = {}
context.update({'lang' : self.pool.get('res.users').browse(self.cr, self.uid, self.uid, context=context).lang})
attentions = self.pool.get('doctor.attentions')
age_unit = dict(attentions.fields_get(self.cr, self.uid, 'age_unit',context=context).get('age_unit').get('selection')).get(
str(age))
return age_unit
def select_diseases(self, status):
if status== 'presumptive':
return "Impresión Diagnóstica"
if status== 'confirm':
return "Confirmado"
if status== 'recurrent':
return "Recurrente"
return ""
def select_diseases_type(self, diseases_type):
if diseases_type== 'main':
return "Principal"
if diseases_type== 'related':
return "Relacionado"
return ""
report_sxw.report_sxw('report.doctor_disability_half', 'doctor.attentions',
'addons/l10n_co_doctor/report/doctor_disability_half.rml',
parser=doctor_disability, header=False)
|
hivam/l10n_co_doctor
|
report/doctor_disability_half.py
|
Python
|
agpl-3.0
| 3,267 | 0.027565 |
#!/usr/bin/env python
from __future__ import print_function
import os
import json
import configparser
import codecs
import sys
from contextlib import contextmanager
from datetime import datetime
from sqlalchemy import and_, create_engine, Column, Integer, String, DateTime, func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, aliased
from sqlalchemy.exc import IntegrityError
from sqlalchemy_utils import database_exists, create_database
from datatables import ColumnDT, DataTables
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONFIG_FILE = os.path.join(MS_WD, "api_config.ini")
if os.path.join(MS_WD, 'libs') not in sys.path:
sys.path.append(os.path.join(MS_WD, 'libs'))
import common
Base = declarative_base()
Session = sessionmaker()
class Task(Base):
__tablename__ = "Tasks"
task_id = Column(Integer, primary_key=True)
task_status = Column(String)
sample_id = Column(String, unique=False)
timestamp = Column(DateTime)
def __repr__(self):
return '<Task("{0}","{1}","{2}","{3}")>'.format(
self.task_id, self.task_status, self.sample_id, self.timestamp
)
def to_dict(self):
return {attr.name: getattr(self, attr.name) for attr in self.__table__.columns}
def to_json(self):
return json.dumps(self.to_dict())
class Database(object):
'''
This class enables CRUD operations with the database that holds the task definitions.
Note that in the configuration file, the database type (parameter: db_type) needs to be
a SQLAlchemy dialect: sqlite, mysql, postgresql, oracle, or mssql. The driver can optionally be
specified as well, i.e., 'postgresql+psycopg2' (see http://docs.sqlalchemy.org/en/latest/core/engines.html).
'''
DEFAULTCONF = {
'db_type': 'sqlite',
'host_string': 'localhost',
'db_name': 'task_db',
'username': 'multiscanner',
'password': 'CHANGEME'
}
def __init__(self, config=None, configfile=CONFIG_FILE, regenconfig=False):
self.db_connection_string = None
self.db_engine = None
# Configuration parsing
config_parser = configparser.SafeConfigParser()
config_parser.optionxform = str
# (re)generate conf file if necessary
if regenconfig or not os.path.isfile(configfile):
self._rewrite_config(config_parser, configfile, config)
# now read in and parse the conf file
config_parser.read(configfile)
# If we didn't regen the config file in the above check, it's possible
# that the file is missing our DB settings...
if not config_parser.has_section(self.__class__.__name__):
self._rewrite_config(config_parser, configfile, config)
config_parser.read(configfile)
# If configuration was specified, use what was stored in the config file
# as a base and then override specific settings as contained in the user's
# config. This allows the user to specify ONLY the config settings they want to
# override
config_from_file = dict(config_parser.items(self.__class__.__name__))
if config:
for key_ in config:
config_from_file[key_] = config[key_]
self.config = config_from_file
def _rewrite_config(self, config_parser, configfile, usr_override_config):
"""
Regenerates the Database-specific part of the API config file
"""
if os.path.isfile(configfile):
# Read in the old config
config_parser.read(configfile)
if not config_parser.has_section(self.__class__.__name__):
config_parser.add_section(self.__class__.__name__)
if not usr_override_config:
usr_override_config = self.DEFAULTCONF
# Update config
for key_ in usr_override_config:
config_parser.set(self.__class__.__name__, key_, str(usr_override_config[key_]))
with codecs.open(configfile, 'w', 'utf-8') as conffile:
config_parser.write(conffile)
def init_db(self):
"""
Initializes the database connection based on the configuration parameters
"""
db_type = self.config['db_type']
db_name = self.config['db_name']
if db_type == 'sqlite':
# we can ignore host, username, password, etc
sql_lite_db_path = os.path.join(MS_WD, db_name)
self.db_connection_string = 'sqlite:///{}'.format(sql_lite_db_path)
else:
username = self.config['username']
password = self.config['password']
host_string = self.config['host_string']
self.db_connection_string = '{}://{}:{}@{}/{}'.format(db_type, username, password, host_string, db_name)
self.db_engine = create_engine(self.db_connection_string)
# If db not present AND type is not SQLite, create the DB
if not self.config['db_type'] == 'sqlite':
if not database_exists(self.db_engine.url):
create_database(self.db_engine.url)
Base.metadata.bind = self.db_engine
Base.metadata.create_all()
# Bind the global Session to our DB engine
global Session
Session.configure(bind=self.db_engine)
@contextmanager
def db_session_scope(self):
"""
Taken from http://docs.sqlalchemy.org/en/latest/orm/session_basics.html.
Provides a transactional scope around a series of operations.
"""
ses = Session()
try:
yield ses
ses.commit()
except:
ses.rollback()
raise
finally:
ses.close()
def add_task(self, task_id=None, task_status='Pending', sample_id=None, timestamp=None):
with self.db_session_scope() as ses:
task = Task(
task_id=task_id,
task_status=task_status,
sample_id=sample_id,
timestamp=timestamp,
)
try:
ses.add(task)
# Need to explicitly commit here in order to update the ID in the DAO
ses.commit()
except IntegrityError as e:
print('PRIMARY KEY must be unique! %s' % e)
return -1
created_task_id = task.task_id
return created_task_id
def update_task(self, task_id, task_status, timestamp=None):
with self.db_session_scope() as ses:
task = ses.query(Task).get(task_id)
if task:
task.task_status = task_status
if timestamp:
task.timestamp = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f')
return task.to_dict()
def get_task(self, task_id):
with self.db_session_scope() as ses:
task = ses.query(Task).get(task_id)
if task:
# unbind Task from Session
ses.expunge(task)
return task
def get_all_tasks(self):
with self.db_session_scope() as ses:
rs = ses.query(Task).all()
# TODO: For testing, do not use in production
task_list = []
for task in rs:
ses.expunge(task)
task_list.append(task.to_dict())
return task_list
def search(self, params, id_list=None, search_by_value=False, return_all=False):
'''Search according to Datatables-supplied parameters.
Returns results in format expected by Datatables.
'''
with self.db_session_scope() as ses:
fields = [Task.task_id, Task.sample_id, Task.task_status, Task.timestamp]
columns = [ColumnDT(f) for f in fields]
if return_all:
# History page
if id_list is None:
# Return all tasks
query = ses.query(*fields)
else:
# Query all tasks for samples with given IDs
query = ses.query(*fields).filter(Task.sample_id.in_(id_list))
else:
# Analyses page
task_alias = aliased(Task)
sample_subq = (ses.query(task_alias.sample_id,
func.max(task_alias.timestamp).label('ts_max'))
.group_by(task_alias.sample_id)
.subquery()
.alias('sample_subq'))
# Query for most recent task per sample
query = (ses.query(*fields)
.join(sample_subq,
and_(Task.sample_id == sample_subq.c.sample_id,
Task.timestamp == sample_subq.c.ts_max)))
if id_list is not None:
# Query for most recent task per sample, only for samples with given IDs
query = query.filter(Task.sample_id.in_(id_list))
if not search_by_value:
# Don't limit search by search term or it won't return anything
# (search term already handled by Elasticsearch)
del params['search[value]']
rowTable = DataTables(params, query, columns)
output = rowTable.output_result()
ses.expunge_all()
return output
def delete_task(self, task_id):
with self.db_session_scope() as ses:
task = ses.query(Task).get(task_id)
if task:
ses.delete(task)
return True
else:
return False
def exists(self, sample_id):
'''Checks if any tasks exist in the database with the given sample_id.
Returns:
Task id of the most recent task with the given sample_id if one
exists in task database, otherwise None.
'''
with self.db_session_scope() as ses:
task = ses.query(Task).filter(Task.sample_id == sample_id)
# Query for most recent task with given sample_id
task_alias = aliased(Task)
sample_subq = (ses.query(task_alias.sample_id,
func.max(task_alias.timestamp).label('ts_max'))
.group_by(task_alias.sample_id)
.subquery()
.alias('sample_subq'))
query = (ses.query(Task)
.join(sample_subq,
and_(Task.sample_id == sample_subq.c.sample_id,
Task.timestamp == sample_subq.c.ts_max)))
task = query.filter(Task.sample_id == sample_id).first()
if task:
return task.task_id
else:
return None
|
awest1339/multiscanner
|
storage/sql_driver.py
|
Python
|
mpl-2.0
| 11,174 | 0.002148 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import neighbor
class neighbors(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes IS neighbors.
"""
__slots__ = ("_path_helper", "_extmethods", "__neighbor")
_yang_name = "neighbors"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__neighbor = YANGDynClass(
base=YANGListType(
False,
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
"neighbors",
]
def _get_neighbor(self):
"""
Getter method for neighbor, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor (list)
YANG Description: This list defines ISIS extended reachability neighbor
attributes.
"""
return self.__neighbor
def _set_neighbor(self, v, load=False):
"""
Setter method for neighbor, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor() directly.
YANG Description: This list defines ISIS extended reachability neighbor
attributes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
False,
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """neighbor must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType(False,neighbor.neighbor, yang_name="neighbor", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='False', extensions=None), is_container='list', yang_name="neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__neighbor = t
if hasattr(self, "_set"):
self._set()
def _unset_neighbor(self):
self.__neighbor = YANGDynClass(
base=YANGListType(
False,
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
neighbor = __builtin__.property(_get_neighbor)
_pyangbind_elements = OrderedDict([("neighbor", neighbor)])
from . import neighbor
class neighbors(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes IS neighbors.
"""
__slots__ = ("_path_helper", "_extmethods", "__neighbor")
_yang_name = "neighbors"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__neighbor = YANGDynClass(
base=YANGListType(
False,
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
"neighbors",
]
def _get_neighbor(self):
"""
Getter method for neighbor, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor (list)
YANG Description: This list defines ISIS extended reachability neighbor
attributes.
"""
return self.__neighbor
def _set_neighbor(self, v, load=False):
"""
Setter method for neighbor, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor() directly.
YANG Description: This list defines ISIS extended reachability neighbor
attributes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
False,
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """neighbor must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType(False,neighbor.neighbor, yang_name="neighbor", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='False', extensions=None), is_container='list', yang_name="neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__neighbor = t
if hasattr(self, "_set"):
self._set()
def _unset_neighbor(self):
self.__neighbor = YANGDynClass(
base=YANGListType(
False,
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
neighbor = __builtin__.property(_get_neighbor)
_pyangbind_elements = OrderedDict([("neighbor", neighbor)])
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/__init__.py
|
Python
|
apache-2.0
| 14,460 | 0.001037 |
"""
Support for RESTful binary sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.rest/
"""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.sensor.rest import RestData
from homeassistant.const import CONF_VALUE_TEMPLATE
from homeassistant.helpers import template
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'REST Binary Sensor'
DEFAULT_METHOD = 'GET'
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup REST binary sensors."""
resource = config.get('resource', None)
method = config.get('method', DEFAULT_METHOD)
payload = config.get('payload', None)
verify_ssl = config.get('verify_ssl', True)
rest = RestData(method, resource, payload, verify_ssl)
rest.update()
if rest.data is None:
_LOGGER.error('Unable to fetch Rest data')
return False
add_devices([RestBinarySensor(
hass, rest, config.get('name', DEFAULT_NAME),
config.get(CONF_VALUE_TEMPLATE))])
# pylint: disable=too-many-arguments
class RestBinarySensor(BinarySensorDevice):
"""A REST binary sensor."""
def __init__(self, hass, rest, name, value_template):
"""Initialize a REST binary sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._state = False
self._value_template = value_template
self.update()
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
if self.rest.data is None:
return False
if self._value_template is not None:
self.rest.data = template.render_with_possible_json_value(
self._hass, self._value_template, self.rest.data, False)
return bool(int(self.rest.data))
def update(self):
"""Get the latest data from REST API and updates the state."""
self.rest.update()
|
aoakeson/home-assistant
|
homeassistant/components/binary_sensor/rest.py
|
Python
|
mit
| 2,159 | 0 |
# Copyright 2013 Allen Institute
# This file is part of dipde
# dipde is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dipde is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dipde. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import dipde
import io
import os
import sys
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
#long_description = read('README.md')
def prepend_find_packages(*roots):
''' Recursively traverse nested packages under the root directories
'''
packages = []
for root in roots:
packages += [root]
packages += [root + '.' + s for s in find_packages(root)]
return packages
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--junitxml=result.xml']
self.test_args_cov = self.test_args + ['--cov=dipde', '--cov-report=term', '--cov-report=html','--cov-config=.coveragerc']
self.test_suite = True
def run_tests(self):
import pytest
try:
errcode = pytest.main(self.test_args_cov)
except:
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='dipde',
version=dipde.__version__,
url='https://github.com/AllenBrainAtlas/DiPDE',
author='Nicholas Cain',
tests_require=['pytest'],
install_requires=[],
cmdclass={'test': PyTest},
author_email='nicholasc@alleninstitute.org',
description='Numerical solver for coupled population density equations',
long_description='',
packages=prepend_find_packages('dipde'),
include_package_data=True,
package_data={'':['*.md', '*.txt', '*.cfg']},
platforms='any',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: Apache Software License :: 2.0',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
],
extras_require={
'testing': ['pytest'],
}
)
|
nicain/dipde_dev
|
setup.py
|
Python
|
gpl-3.0
| 2,931 | 0.005459 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.compute_v1.types import compute
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.NotificationEndpointList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.NotificationEndpointList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.NotificationEndpointList],
request: compute.ListRegionNotificationEndpointsRequest,
response: compute.NotificationEndpointList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListRegionNotificationEndpointsRequest):
The initial request object.
response (google.cloud.compute_v1.types.NotificationEndpointList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListRegionNotificationEndpointsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[compute.NotificationEndpointList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[compute.NotificationEndpoint]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
googleapis/python-compute
|
google/cloud/compute_v1/services/region_notification_endpoints/pagers.py
|
Python
|
apache-2.0
| 3,203 | 0.000937 |
__author__ = 'Per'
|
Per-Starke/Visualizer
|
src/__init__.py
|
Python
|
apache-2.0
| 19 | 0 |
#!/usr/bin/env python
import TransferErrors as TE
import cPickle as pickle
with open('stuck.pkl','rb') as pklfile:
stuck = pickle.load(pklfile)
TE.makeBasicTable(stuck,TE.workdir+'html/table.html',TE.webdir+'table.html')
TE.makeCSV(stuck,TE.webdir+'data.csv')
for basis in [-6,-5,-4,-3,-1,1,2]:
TE.makeJson(stuck,TE.webdir+('stuck_%i'%basis).replace('-','m')+'.json',basis)
|
sidnarayanan/TransferErrors
|
bin/write.py
|
Python
|
mit
| 382 | 0.044503 |
"""
Certificate generation module.
"""
from OpenSSL import crypto
TYPE_RSA = crypto.TYPE_RSA
TYPE_DSA = crypto.TYPE_DSA
def createKeyPair(type, bits):
"""
Create a public/private key pair.
Arguments: type - Key type, must be one of TYPE_RSA and TYPE_DSA
bits - Number of bits to use in the key
Returns: The public/private key pair in a PKey object
"""
pkey = crypto.PKey()
pkey.generate_key(type, bits)
return pkey
def createCertRequest(pkey, digest="sha256", **name):
"""
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is md5
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
"""
req = crypto.X509Req()
subj = req.get_subject()
for (key,value) in name.items():
setattr(subj, key, value)
req.set_pubkey(pkey)
req.sign(pkey, digest)
return req
def createCertificate(req, (issuerCert, issuerKey), serial, (notBefore, notAfter), digest="sha256"):
"""
Generate a certificate given a certificate request.
Arguments: req - Certificate reqeust to use
issuerCert - The certificate of the issuer
issuerKey - The private key of the issuer
serial - Serial number for the certificate
notBefore - Timestamp (relative to now) when the certificate
starts being valid
notAfter - Timestamp (relative to now) when the certificate
stops being valid
digest - Digest method to use for signing, default is md5
Returns: The signed certificate in an X509 object
"""
cert = crypto.X509()
cert.set_serial_number(serial)
cert.gmtime_adj_notBefore(notBefore)
cert.gmtime_adj_notAfter(notAfter)
cert.set_issuer(issuerCert.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.sign(issuerKey, digest)
return cert
|
dhain/greennet
|
examples/certs/certgen.py
|
Python
|
mit
| 2,547 | 0.002356 |
from threading import Thread
import pickle
import time
from pymouse import PyMouse
# from evdev import InputDevice, ecodes, UInput
from evdev import UInput, ecodes
class History(Thread):
def __init__(self):
Thread.__init__(self)
self.n = 0
self.st = True # Stop
self.sl = False # Sleep
self.ui = UInput()
self.history = []
self.m = PyMouse()
self.flag = True
self.rec = False
def send_event(self):
i = 0
while i < len(self.history):
now = self.history[i]
if i < len(self.history)-1: after = self.history[i+1]
if self.st: break
if not self.sl:
self.m.move( now.get("mouse")[0], now.get("mouse")[1])
if now.get("event").type == ecodes.EV_KEY:
self.ui.write(ecodes.EV_KEY, now.get("event").code, now.get("event").value)
self.ui.syn()
if i < len(self.history):
time.sleep(float(after.get("event").sec - now.get("event").sec)+float(after.get("event").usec - now.get("event").usec)/1000000)
i += 1
# sobrescrevendo o metodo run()
def run(self):
while self.flag:
if not self.st:
if self.n == 0:
while not self.stop:
self.send_event()
elif self.n > 0:
for i in range(self.n):
self.send_event()
self.st = True
# print self.history
print("\nEnd")
def exit(self):
self.stop()
self.flag = False
def play(self):
self.st = False
def stop(self):
print("Stop")
self.st = True
def sleep(self):
if self.sl:
print("Play")
else:
print("Pause")
self.sl = not self.sleep
def reset(self):
self.history = []
def append_event(self, event):
# if event.type == ecodes.EV_KEY:
self.history.append({"mouse": self.m.position(), 'event': event})
# print(self.m.position())
# if self.rec:
def set_n(self, n):
self.n = n
def save(self):
s = raw_input("\nDigite o nome do arquivo: ")
pickle.dump( self.history, open(s, 'wb'), -1)
print("Salvo em %s" %s)
def load(self):
""" Carrega um historico. """
s = raw_input("\nDigite o nome do arquivo: ")
try:
self.history = pickle.load(open(s, 'rb'))
print("Carregado de %s" %s)
except FileNotFoundError:
print("Arquivo inexistente!")
if __name__ == '__main__':
h = History()
|
andredalton/bcc
|
2014/MAC0242/miniep6/history.py
|
Python
|
apache-2.0
| 2,750 | 0.005091 |
# -*- coding: utf-8 -*-
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import test_account_payment_transfer_reconcile_batch
|
acsone/bank-payment
|
account_payment_transfer_reconcile_batch/tests/__init__.py
|
Python
|
agpl-3.0
| 150 | 0 |
class PlotEnum():
point = 'POINT',
line = 'LINE',
bar = 'BAR'
|
Castronova/EMIT
|
gui/controller/enums.py
|
Python
|
gpl-2.0
| 73 | 0.013699 |
# Copyright (c) 2008 testtools developers. See LICENSE for details.
"""Test results and related things."""
__metaclass__ = type
__all__ = [
'ExtendedToOriginalDecorator',
'MultiTestResult',
'TestResult',
'ThreadsafeForwardingResult',
]
import datetime
import sys
import unittest
from testtools.compat import all, _format_exc_info, str_is_unicode, _u
# From http://docs.python.org/library/datetime.html
_ZERO = datetime.timedelta(0)
# A UTC class.
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return _ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return _ZERO
utc = UTC()
class TestResult(unittest.TestResult):
"""Subclass of unittest.TestResult extending the protocol for flexability.
This test result supports an experimental protocol for providing additional
data to in test outcomes. All the outcome methods take an optional dict
'details'. If supplied any other detail parameters like 'err' or 'reason'
should not be provided. The details dict is a mapping from names to
MIME content objects (see testtools.content). This permits attaching
tracebacks, log files, or even large objects like databases that were
part of the test fixture. Until this API is accepted into upstream
Python it is considered experimental: it may be replaced at any point
by a newer version more in line with upstream Python. Compatibility would
be aimed for in this case, but may not be possible.
:ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip.
"""
def __init__(self):
# startTestRun resets all attributes, and older clients don't know to
# call startTestRun, so it is called once here.
# Because subclasses may reasonably not expect this, we call the
# specific version we want to run.
TestResult.startTestRun(self)
def addExpectedFailure(self, test, err=None, details=None):
"""Called when a test has failed in an expected manner.
Like with addSuccess and addError, testStopped should still be called.
:param test: The test that has been skipped.
:param err: The exc_info of the error that was raised.
:return: None
"""
# This is the python 2.7 implementation
self.expectedFailures.append(
(test, self._err_details_to_string(test, err, details)))
def addError(self, test, err=None, details=None):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
"""
self.errors.append((test,
self._err_details_to_string(test, err, details)))
def addFailure(self, test, err=None, details=None):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
"""
self.failures.append((test,
self._err_details_to_string(test, err, details)))
def addSkip(self, test, reason=None, details=None):
"""Called when a test has been skipped rather than running.
Like with addSuccess and addError, testStopped should still be called.
This must be called by the TestCase. 'addError' and 'addFailure' will
not call addSkip, since they have no assumptions about the kind of
errors that a test can raise.
:param test: The test that has been skipped.
:param reason: The reason for the test being skipped. For instance,
u"pyGL is not available".
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
:return: None
"""
if reason is None:
reason = details.get('reason')
if reason is None:
reason = 'No reason given'
else:
reason = ''.join(reason.iter_text())
skip_list = self.skip_reasons.setdefault(reason, [])
skip_list.append(test)
def addSuccess(self, test, details=None):
"""Called when a test succeeded."""
def addUnexpectedSuccess(self, test, details=None):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"""Has this result been successful so far?
If there have been any errors, failures or unexpected successes,
return False. Otherwise, return True.
Note: This differs from standard unittest in that we consider
unexpected successes to be equivalent to failures, rather than
successes.
"""
return not (self.errors or self.failures or self.unexpectedSuccesses)
if str_is_unicode:
# Python 3 and IronPython strings are unicode, use parent class method
_exc_info_to_unicode = unittest.TestResult._exc_info_to_string
else:
# For Python 2, need to decode components of traceback according to
# their source, so can't use traceback.format_exception
# Here follows a little deep magic to copy the existing method and
# replace the formatter with one that returns unicode instead
from types import FunctionType as __F, ModuleType as __M
__f = unittest.TestResult._exc_info_to_string.im_func
__g = dict(__f.func_globals)
__m = __M("__fake_traceback")
__m.format_exception = _format_exc_info
__g["traceback"] = __m
_exc_info_to_unicode = __F(__f.func_code, __g, "_exc_info_to_unicode")
del __F, __M, __f, __g, __m
def _err_details_to_string(self, test, err=None, details=None):
"""Convert an error in exc_info form or a contents dict to a string."""
if err is not None:
return self._exc_info_to_unicode(err, test)
return _details_to_str(details, special='traceback')
def _now(self):
"""Return the current 'test time'.
If the time() method has not been called, this is equivalent to
datetime.now(), otherwise its the last supplied datestamp given to the
time() method.
"""
if self.__now is None:
return datetime.datetime.now(utc)
else:
return self.__now
def startTestRun(self):
"""Called before a test run starts.
New in Python 2.7. The testtools version resets the result to a
pristine condition ready for use in another test run. Note that this
is different from Python 2.7's startTestRun, which does nothing.
"""
super(TestResult, self).__init__()
self.skip_reasons = {}
self.__now = None
# -- Start: As per python 2.7 --
self.expectedFailures = []
self.unexpectedSuccesses = []
# -- End: As per python 2.7 --
def stopTestRun(self):
"""Called after a test run completes
New in python 2.7
"""
def time(self, a_datetime):
"""Provide a timestamp to represent the current time.
This is useful when test activity is time delayed, or happening
concurrently and getting the system time between API calls will not
accurately represent the duration of tests (or the whole run).
Calling time() sets the datetime used by the TestResult object.
Time is permitted to go backwards when using this call.
:param a_datetime: A datetime.datetime object with TZ information or
None to reset the TestResult to gathering time from the system.
"""
self.__now = a_datetime
def done(self):
"""Called when the test runner is done.
deprecated in favour of stopTestRun.
"""
class MultiTestResult(TestResult):
"""A test result that dispatches to many test results."""
def __init__(self, *results):
TestResult.__init__(self)
self._results = list(map(ExtendedToOriginalDecorator, results))
def __repr__(self):
return '<%s (%s)>' % (
self.__class__.__name__, ', '.join(map(repr, self._results)))
def _dispatch(self, message, *args, **kwargs):
return tuple(
getattr(result, message)(*args, **kwargs)
for result in self._results)
def startTest(self, test):
return self._dispatch('startTest', test)
def stopTest(self, test):
return self._dispatch('stopTest', test)
def addError(self, test, error=None, details=None):
return self._dispatch('addError', test, error, details=details)
def addExpectedFailure(self, test, err=None, details=None):
return self._dispatch(
'addExpectedFailure', test, err, details=details)
def addFailure(self, test, err=None, details=None):
return self._dispatch('addFailure', test, err, details=details)
def addSkip(self, test, reason=None, details=None):
return self._dispatch('addSkip', test, reason, details=details)
def addSuccess(self, test, details=None):
return self._dispatch('addSuccess', test, details=details)
def addUnexpectedSuccess(self, test, details=None):
return self._dispatch('addUnexpectedSuccess', test, details=details)
def startTestRun(self):
return self._dispatch('startTestRun')
def stopTestRun(self):
return self._dispatch('stopTestRun')
def time(self, a_datetime):
return self._dispatch('time', a_datetime)
def done(self):
return self._dispatch('done')
def wasSuccessful(self):
"""Was this result successful?
Only returns True if every constituent result was successful.
"""
return all(self._dispatch('wasSuccessful'))
class TextTestResult(TestResult):
"""A TestResult which outputs activity to a text stream."""
def __init__(self, stream):
"""Construct a TextTestResult writing to stream."""
super(TextTestResult, self).__init__()
self.stream = stream
self.sep1 = '=' * 70 + '\n'
self.sep2 = '-' * 70 + '\n'
def _delta_to_float(self, a_timedelta):
return (a_timedelta.days * 86400.0 + a_timedelta.seconds +
a_timedelta.microseconds / 1000000.0)
def _show_list(self, label, error_list):
for test, output in error_list:
self.stream.write(self.sep1)
self.stream.write("%s: %s\n" % (label, test.id()))
self.stream.write(self.sep2)
self.stream.write(output)
def startTestRun(self):
super(TextTestResult, self).startTestRun()
self.__start = self._now()
self.stream.write("Tests running...\n")
def stopTestRun(self):
if self.testsRun != 1:
plural = 's'
else:
plural = ''
stop = self._now()
self._show_list('ERROR', self.errors)
self._show_list('FAIL', self.failures)
for test in self.unexpectedSuccesses:
self.stream.write(
"%sUNEXPECTED SUCCESS: %s\n%s" % (
self.sep1, test.id(), self.sep2))
self.stream.write("\nRan %d test%s in %.3fs\n" %
(self.testsRun, plural,
self._delta_to_float(stop - self.__start)))
if self.wasSuccessful():
self.stream.write("OK\n")
else:
self.stream.write("FAILED (")
details = []
details.append("failures=%d" % (
sum(map(len, (
self.failures, self.errors, self.unexpectedSuccesses)))))
self.stream.write(", ".join(details))
self.stream.write(")\n")
super(TextTestResult, self).stopTestRun()
class ThreadsafeForwardingResult(TestResult):
"""A TestResult which ensures the target does not receive mixed up calls.
This is used when receiving test results from multiple sources, and batches
up all the activity for a single test into a thread-safe batch where all
other ThreadsafeForwardingResult objects sharing the same semaphore will be
locked out.
Typical use of ThreadsafeForwardingResult involves creating one
ThreadsafeForwardingResult per thread in a ConcurrentTestSuite. These
forward to the TestResult that the ConcurrentTestSuite run method was
called with.
target.done() is called once for each ThreadsafeForwardingResult that
forwards to the same target. If the target's done() takes special action,
care should be taken to accommodate this.
"""
def __init__(self, target, semaphore):
"""Create a ThreadsafeForwardingResult forwarding to target.
:param target: A TestResult.
:param semaphore: A threading.Semaphore with limit 1.
"""
TestResult.__init__(self)
self.result = ExtendedToOriginalDecorator(target)
self.semaphore = semaphore
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.result)
def _add_result_with_semaphore(self, method, test, *args, **kwargs):
self.semaphore.acquire()
try:
self.result.time(self._test_start)
self.result.startTest(test)
self.result.time(self._now())
try:
method(test, *args, **kwargs)
finally:
self.result.stopTest(test)
finally:
self.semaphore.release()
def addError(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addError,
test, err, details=details)
def addExpectedFailure(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addExpectedFailure,
test, err, details=details)
def addFailure(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addFailure,
test, err, details=details)
def addSkip(self, test, reason=None, details=None):
self._add_result_with_semaphore(self.result.addSkip,
test, reason, details=details)
def addSuccess(self, test, details=None):
self._add_result_with_semaphore(self.result.addSuccess,
test, details=details)
def addUnexpectedSuccess(self, test, details=None):
self._add_result_with_semaphore(self.result.addUnexpectedSuccess,
test, details=details)
def startTestRun(self):
self.semaphore.acquire()
try:
self.result.startTestRun()
finally:
self.semaphore.release()
def stopTestRun(self):
self.semaphore.acquire()
try:
self.result.stopTestRun()
finally:
self.semaphore.release()
def done(self):
self.semaphore.acquire()
try:
self.result.done()
finally:
self.semaphore.release()
def startTest(self, test):
self._test_start = self._now()
super(ThreadsafeForwardingResult, self).startTest(test)
def wasSuccessful(self):
return self.result.wasSuccessful()
class ExtendedToOriginalDecorator(object):
"""Permit new TestResult API code to degrade gracefully with old results.
This decorates an existing TestResult and converts missing outcomes
such as addSkip to older outcomes such as addSuccess. It also supports
the extended details protocol. In all cases the most recent protocol
is attempted first, and fallbacks only occur when the decorated result
does not support the newer style of calling.
"""
def __init__(self, decorated):
self.decorated = decorated
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.decorated)
def __getattr__(self, name):
return getattr(self.decorated, name)
def addError(self, test, err=None, details=None):
self._check_args(err, details)
if details is not None:
try:
return self.decorated.addError(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return self.decorated.addError(test, err)
def addExpectedFailure(self, test, err=None, details=None):
self._check_args(err, details)
addExpectedFailure = getattr(
self.decorated, 'addExpectedFailure', None)
if addExpectedFailure is None:
return self.addSuccess(test)
if details is not None:
try:
return addExpectedFailure(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return addExpectedFailure(test, err)
def addFailure(self, test, err=None, details=None):
self._check_args(err, details)
if details is not None:
try:
return self.decorated.addFailure(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return self.decorated.addFailure(test, err)
def addSkip(self, test, reason=None, details=None):
self._check_args(reason, details)
addSkip = getattr(self.decorated, 'addSkip', None)
if addSkip is None:
return self.decorated.addSuccess(test)
if details is not None:
try:
return addSkip(test, details=details)
except TypeError:
# extract the reason if it's available
try:
reason = ''.join(details['reason'].iter_text())
except KeyError:
reason = _details_to_str(details)
return addSkip(test, reason)
def addUnexpectedSuccess(self, test, details=None):
outcome = getattr(self.decorated, 'addUnexpectedSuccess', None)
if outcome is None:
try:
test.fail("")
except test.failureException:
return self.addFailure(test, sys.exc_info())
if details is not None:
try:
return outcome(test, details=details)
except TypeError:
pass
return outcome(test)
def addSuccess(self, test, details=None):
if details is not None:
try:
return self.decorated.addSuccess(test, details=details)
except TypeError:
pass
return self.decorated.addSuccess(test)
def _check_args(self, err, details):
param_count = 0
if err is not None:
param_count += 1
if details is not None:
param_count += 1
if param_count != 1:
raise ValueError("Must pass only one of err '%s' and details '%s"
% (err, details))
def _details_to_exc_info(self, details):
"""Convert a details dict to an exc_info tuple."""
return (
_StringException,
_StringException(_details_to_str(details, special='traceback')),
None)
def done(self):
try:
return self.decorated.done()
except AttributeError:
return
def progress(self, offset, whence):
method = getattr(self.decorated, 'progress', None)
if method is None:
return
return method(offset, whence)
@property
def shouldStop(self):
return self.decorated.shouldStop
def startTest(self, test):
return self.decorated.startTest(test)
def startTestRun(self):
try:
return self.decorated.startTestRun()
except AttributeError:
return
def stop(self):
return self.decorated.stop()
def stopTest(self, test):
return self.decorated.stopTest(test)
def stopTestRun(self):
try:
return self.decorated.stopTestRun()
except AttributeError:
return
def tags(self, new_tags, gone_tags):
method = getattr(self.decorated, 'tags', None)
if method is None:
return
return method(new_tags, gone_tags)
def time(self, a_datetime):
method = getattr(self.decorated, 'time', None)
if method is None:
return
return method(a_datetime)
def wasSuccessful(self):
return self.decorated.wasSuccessful()
class _StringException(Exception):
"""An exception made from an arbitrary string."""
if not str_is_unicode:
def __init__(self, string):
if type(string) is not unicode:
raise TypeError("_StringException expects unicode, got %r" %
(string,))
Exception.__init__(self, string)
def __str__(self):
return self.args[0].encode("utf-8")
def __unicode__(self):
return self.args[0]
# For 3.0 and above the default __str__ is fine, so we don't define one.
def __hash__(self):
return id(self)
def __eq__(self, other):
try:
return self.args == other.args
except AttributeError:
return False
def _format_text_attachment(name, text):
if '\n' in text:
return "%s: {{{\n%s\n}}}\n" % (name, text)
return "%s: {{{%s}}}" % (name, text)
def _details_to_str(details, special=None):
"""Convert a details dict to a string.
:param details: A dictionary mapping short names to ``Content`` objects.
:param special: If specified, an attachment that should have special
attention drawn to it. The primary attachment. Normally it's the
traceback that caused the test to fail.
:return: A formatted string that can be included in text test results.
"""
empty_attachments = []
binary_attachments = []
text_attachments = []
special_content = None
# sorted is for testing, may want to remove that and use a dict
# subclass with defined order for items instead.
for key, content in sorted(details.items()):
if content.content_type.type != 'text':
binary_attachments.append((key, content.content_type))
continue
text = _u('').join(content.iter_text()).strip()
if not text:
empty_attachments.append(key)
continue
# We want the 'special' attachment to be at the bottom.
if key == special:
special_content = '%s\n' % (text,)
continue
text_attachments.append(_format_text_attachment(key, text))
if text_attachments and not text_attachments[-1].endswith('\n'):
text_attachments.append('')
if special_content:
text_attachments.append(special_content)
lines = []
if binary_attachments:
lines.append('Binary content:\n')
for name, content_type in binary_attachments:
lines.append(' %s (%s)\n' % (name, content_type))
if empty_attachments:
lines.append('Empty attachments:\n')
for name in empty_attachments:
lines.append(' %s\n' % (name,))
if (binary_attachments or empty_attachments) and text_attachments:
lines.append('\n')
lines.append('\n'.join(text_attachments))
return _u('').join(lines)
|
lauria/Samba4
|
lib/testtools/testtools/testresult/real.py
|
Python
|
gpl-3.0
| 23,481 | 0.000596 |
from django.conf.urls import patterns, url
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import FacebookProvider
from . import views
urlpatterns = default_urlpatterns(FacebookProvider)
urlpatterns += patterns('',
url('^facebook/login/token/$', views.login_by_token,
name="facebook_login_by_token"),
url('^facebook/channel/$', views.channel, name='facebook_channel'),
)
|
tejesh95/Zubio.in
|
zubio/allauth/socialaccount/providers/facebook/urls.py
|
Python
|
mit
| 435 | 0.004598 |
import re
from decimal import Decimal
import sys
import struct
from rdbtools.parser import RdbCallback, RdbParser
ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
u'\u2028': '\\u2028',
u'\u2029': '\\u2029',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def _encode_basestring(s):
"""Return a JSON representation of a Python string"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def _encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
try :
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
except:
pass
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
def _encode(s, quote_numbers = True):
if quote_numbers:
qn = '"'
else:
qn = ''
if isinstance(s, int) or isinstance(s, long):
return qn + str(s) + qn
elif isinstance(s, float):
if s != s:
return "NaN"
elif s == PosInf:
return "Infinity"
elif s == NegInf:
return "-Infinity"
else:
return qn + str(s) + qn
else:
return _encode_basestring_ascii(s)
def encode_key(s):
return _encode(s, quote_numbers=True)
def encode_value(s):
return _encode(s, quote_numbers=False)
class JSONCallback(RdbCallback):
def __init__(self, out):
self._out = out
self._is_first_db = True
self._has_databases = False
self._is_first_key_in_db = True
self._elements_in_key = 0
self._element_index = 0
def start_rdb(self):
self._out.write('[')
def start_database(self, db_number):
if not self._is_first_db:
self._out.write('},')
self._out.write('{')
self._is_first_db = False
self._has_databases = True
self._is_first_key_in_db = True
def end_database(self, db_number):
pass
def end_rdb(self):
if self._has_databases:
self._out.write('}')
self._out.write(']')
def _start_key(self, key, length):
if not self._is_first_key_in_db:
self._out.write(',')
self._out.write('\r\n')
self._is_first_key_in_db = False
self._elements_in_key = length
self._element_index = 0
def _end_key(self, key):
pass
def _write_comma(self):
if self._element_index > 0 and self._element_index < self._elements_in_key :
self._out.write(',')
self._element_index = self._element_index + 1
def set(self, key, value, expiry, info):
self._start_key(key, 0)
self._out.write('%s:%s' % (encode_key(key), encode_value(value)))
def start_hash(self, key, length, expiry, info):
self._start_key(key, length)
self._out.write('%s:{' % encode_key(key))
def hset(self, key, field, value):
self._write_comma()
self._out.write('%s:%s' % (encode_key(field), encode_value(value)))
def end_hash(self, key):
self._end_key(key)
self._out.write('}')
def start_set(self, key, cardinality, expiry, info):
self._start_key(key, cardinality)
self._out.write('%s:[' % encode_key(key))
def sadd(self, key, member):
self._write_comma()
self._out.write('%s' % encode_value(member))
def end_set(self, key):
self._end_key(key)
self._out.write(']')
def start_list(self, key, length, expiry, info):
self._start_key(key, length)
self._out.write('%s:[' % encode_key(key))
def rpush(self, key, value) :
self._write_comma()
self._out.write('%s' % encode_value(value))
def end_list(self, key):
self._end_key(key)
self._out.write(']')
def start_sorted_set(self, key, length, expiry, info):
self._start_key(key, length)
self._out.write('%s:{' % encode_key(key))
def zadd(self, key, score, member):
self._write_comma()
self._out.write('%s:%s' % (encode_key(member), encode_value(score)))
def end_sorted_set(self, key):
self._end_key(key)
self._out.write('}')
class DiffCallback(RdbCallback):
'''Prints the contents of RDB in a format that is unix sort friendly,
so that two rdb files can be diffed easily'''
def __init__(self, out):
self._out = out
self._index = 0
self._dbnum = 0
def start_rdb(self):
pass
def start_database(self, db_number):
self._dbnum = db_number
def end_database(self, db_number):
pass
def end_rdb(self):
pass
def set(self, key, value, expiry, info):
self._out.write('db=%d %s -> %s' % (self._dbnum, encode_key(key), encode_value(value)))
self.newline()
def start_hash(self, key, length, expiry, info):
pass
def hset(self, key, field, value):
self._out.write('db=%d %s . %s -> %s' % (self._dbnum, encode_key(key), encode_key(field), encode_value(value)))
self.newline()
def end_hash(self, key):
pass
def start_set(self, key, cardinality, expiry, info):
pass
def sadd(self, key, member):
self._out.write('db=%d %s { %s }' % (self._dbnum, encode_key(key), encode_value(member)))
self.newline()
def end_set(self, key):
pass
def start_list(self, key, length, expiry, info):
self._index = 0
def rpush(self, key, value) :
self._out.write('db=%d %s[%d] -> %s' % (self._dbnum, encode_key(key), self._index, encode_value(value)))
self.newline()
self._index = self._index + 1
def end_list(self, key):
pass
def start_sorted_set(self, key, length, expiry, info):
self._index = 0
def zadd(self, key, score, member):
self._out.write('db=%d %s[%d] -> {%s, score=%s}' % (self._dbnum, encode_key(key), self._index, encode_key(member), encode_value(score)))
self.newline()
self._index = self._index + 1
def end_sorted_set(self, key):
pass
def newline(self):
self._out.write('\r\n')
|
idning/redis-rdb-tools
|
rdbtools/callbacks.py
|
Python
|
mit
| 7,595 | 0.007637 |
#
# Copyright (C) 2012-2014 Red Hat, Inc.
#
# Licensed under the GNU Lesser General Public License Version 2.1
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
from __future__ import absolute_import
from . import base
import hawkey
import sys
import unittest
class TestQuery(base.TestCase):
def setUp(self):
self.sack = base.TestSack(repo_dir=self.repo_dir)
self.sack.load_system_repo()
def test_sanity(self):
q = hawkey.Query(self.sack)
q.filterm(name__eq="flying")
self.assertEqual(q.count(), 1)
def test_creation_empty_sack(self):
s = hawkey.Sack(make_cache_dir=True)
q = hawkey.Query(s)
def test_exception(self):
q = hawkey.Query(self.sack)
self.assertRaises(hawkey.ValueException, q.filter, flying__eq="name")
self.assertRaises(hawkey.ValueException, q.filter, flying="name")
def test_unicode(self):
q = hawkey.Query(self.sack)
q.filterm(name__eq=u"flying")
self.assertEqual(q.count(), 1)
q = hawkey.Query(self.sack)
q.filterm(name__eq=[u"flying", "penny"])
self.assertEqual(q.count(), 2)
def test_count(self):
q = hawkey.Query(self.sack).filter(name=["flying", "penny"])
self.assertIsNone(q.result)
self.assertEqual(len(q), 2)
self.assertIsNotNone(q.result)
self.assertEqual(len(q), q.count())
self.assertTrue(q)
q = hawkey.Query(self.sack).filter(name="naturalE")
self.assertFalse(q)
self.assertIsNotNone(q.result)
def test_kwargs_check(self):
q = hawkey.Query(self.sack)
self.assertRaises(hawkey.ValueException, q.filter,
name="flying", upgrades="maracas")
def test_kwargs(self):
q = hawkey.Query(self.sack)
# test combining several criteria
q.filterm(name__glob="*enny*", summary__substr="eyes")
self.assertEqual(q.count(), 1)
# test shortcutting for equality comparison type
q = hawkey.Query(self.sack)
q.filterm(name="flying")
self.assertEqual(q.count(), 1)
# test flags parsing
q = hawkey.Query(self.sack).filter(name="FLYING")
self.assertEqual(q.count(), 0)
q = hawkey.Query(self.sack).filter(hawkey.ICASE, name="FLYING")
self.assertEqual(q.count(), 1)
def test_in(self):
q = hawkey.Query(self.sack)
q.filterm(name__substr=["ool", "enny-li"])
self.assertEqual(q.count(), 2)
def test_in_set(self):
q = hawkey.Query(self.sack)
q.filterm(name__substr=set(["ool", "enny-li"]))
self.assertEqual(q.count(), 2)
def test_iteration(self):
q = hawkey.Query(self.sack)
q.filterm(name__substr=["penny"])
self.assertEqual(q.count(), 2)
self.assertNotEqual(q[0], q[1])
def test_clone(self):
q = hawkey.Query(self.sack)
q.filterm(name__substr=["penny"])
q_clone = hawkey.Query(query=q)
del q
self.assertEqual(q_clone.count(), 2)
self.assertNotEqual(q_clone[0], q_clone[1])
def test_clone_with_evaluation(self):
q = hawkey.Query(self.sack)
q.filterm(name__substr="penny")
q.run()
q_clone = hawkey.Query(query=q)
del q
self.assertTrue(q_clone.evaluated)
self.assertLength(q_clone.result, 2)
def test_immutability(self):
q = hawkey.Query(self.sack).filter(name="jay")
q2 = q.filter(evr="5.0-0")
self.assertEqual(q.count(), 2)
self.assertEqual(q2.count(), 1)
def test_copy_lazyness(self):
q = hawkey.Query(self.sack).filter(name="jay")
self.assertIsNone(q.result)
q2 = q.filter(evr="5.0-0")
self.assertIsNone(q.result)
def test_empty(self):
q = hawkey.Query(self.sack).filter(empty=True)
self.assertLength(q, 0)
q = hawkey.Query(self.sack)
self.assertRaises(hawkey.ValueException, q.filter, empty=False)
def test_epoch(self):
q = hawkey.Query(self.sack).filter(epoch__gt=4)
self.assertEqual(len(q), 1)
self.assertEqual(q[0].epoch, 6)
def test_version(self):
q = hawkey.Query(self.sack).filter(version__gte="5.0")
self.assertEqual(len(q), 3)
q = hawkey.Query(self.sack).filter(version__glob="1.2*")
self.assertLength(q, 2)
def test_package_in(self):
pkgs = list(hawkey.Query(self.sack).filter(name=["flying", "penny"]))
q = hawkey.Query(self.sack).filter(pkg=pkgs)
self.assertEqual(len(q), 2)
q2 = q.filter(version__gt="3")
self.assertEqual(len(q2), 1)
def test_nevra_match(self):
query = hawkey.Query(self.sack).filter(nevra__glob="*lib*64")
self.assertEqual(len(query), 1)
self.assertEqual(str(query[0]), 'penny-lib-4-1.x86_64')
def test_repeated(self):
q = hawkey.Query(self.sack).filter(name="jay")
q.filterm(latest_per_arch=True)
self.assertEqual(len(q), 1)
def test_latest(self):
q = hawkey.Query(self.sack).filter(name="pilchard")
q.filterm(latest_per_arch=True)
self.assertEqual(len(q), 2)
q.filterm(latest=True)
self.assertEqual(len(q), 1)
def test_reldep(self):
flying = base.by_name(self.sack, "flying")
requires = flying.requires
q = hawkey.Query(self.sack).filter(provides=requires[0])
self.assertEqual(len(q), 1)
self.assertEqual(str(q[0]), "penny-lib-4-1.x86_64")
self.assertRaises(hawkey.QueryException, q.filter,
provides__gt=requires[0])
def test_reldep_list(self):
self.sack.load_test_repo("updates", "updates.repo")
fool = base.by_name_repo(self.sack, "fool", "updates")
q = hawkey.Query(self.sack).filter(provides=fool.obsoletes)
self.assertEqual(str(q.run()[0]), "penny-4-1.noarch")
def test_disabled_repo(self):
self.sack.disable_repo(hawkey.SYSTEM_REPO_NAME)
q = hawkey.Query(self.sack).filter(name="jay")
self.assertLength(q.run(), 0)
self.sack.enable_repo(hawkey.SYSTEM_REPO_NAME)
q = hawkey.Query(self.sack).filter(name="jay")
self.assertLength(q.run(), 2)
def test_multiple_flags(self):
q = hawkey.Query(self.sack).filter(name__glob__not=["p*", "j*"])
self.assertItemsEqual(list(map(lambda p: p.name, q.run())),
['baby', 'dog', 'flying', 'fool', 'gun', 'tour'])
class TestQueryAllRepos(base.TestCase):
def setUp(self):
self.sack = base.TestSack(repo_dir=self.repo_dir)
self.sack.load_system_repo()
self.sack.load_test_repo("main", "main.repo")
self.sack.load_test_repo("updates", "updates.repo")
def test_requires(self):
reldep = hawkey.Reldep(self.sack, "semolina = 2")
q = hawkey.Query(self.sack).filter(requires=reldep)
self.assertItemsEqual(list(map(str, q.run())),
['walrus-2-5.noarch', 'walrus-2-6.noarch'])
reldep = hawkey.Reldep(self.sack, "semolina > 1.0")
q = hawkey.Query(self.sack).filter(requires=reldep)
self.assertItemsEqual(list(map(str, q.run())),
['walrus-2-5.noarch', 'walrus-2-6.noarch'])
def test_obsoletes(self):
reldep = hawkey.Reldep(self.sack, "penny < 4-0")
q = hawkey.Query(self.sack).filter(obsoletes=reldep)
self.assertItemsEqual(list(map(str, q.run())), ['fool-1-5.noarch'])
def test_downgradable(self):
query = hawkey.Query(self.sack).filter(downgradable=True)
self.assertEqual({str(pkg) for pkg in query},
{'baby-6:5.0-11.x86_64', 'jay-5.0-0.x86_64'})
class TestQueryUpdates(base.TestCase):
def setUp(self):
self.sack = base.TestSack(repo_dir=self.repo_dir)
self.sack.load_system_repo()
self.sack.load_test_repo("updates", "updates.repo")
def test_upgradable(self):
query = hawkey.Query(self.sack).filter(upgradable=True)
self.assertEqual({str(pkg) for pkg in query},
{'dog-1-1.x86_64', 'flying-2-9.noarch',
'fool-1-3.noarch', 'pilchard-1.2.3-1.i686',
'pilchard-1.2.3-1.x86_64'})
def test_updates_noarch(self):
q = hawkey.Query(self.sack)
q.filterm(name="flying", upgrades=1)
self.assertEqual(q.count(), 3)
def test_updates_arch(self):
q = hawkey.Query(self.sack)
pilchard = q.filter(name="dog", upgrades=True)
self.assertItemsEqual(list(map(str, pilchard.run())), ['dog-1-2.x86_64'])
def test_glob_arch(self):
q = hawkey.Query(self.sack)
pilchard = q.filter(name="pilchard", version="1.2.4", release="1",
arch__glob="*6*")
res = list(map(str, pilchard.run()))
self.assertItemsEqual(res, ["pilchard-1.2.4-1.x86_64",
"pilchard-1.2.4-1.i686"])
def test_obsoletes(self):
q = hawkey.Query(self.sack).filter(name="penny")
o = hawkey.Query(self.sack)
self.assertRaises(hawkey.QueryException, o.filter, obsoletes__gt=q)
self.assertRaises(hawkey.ValueException, o.filter, requires=q)
o = hawkey.Query(self.sack).filter(obsoletes=q)
self.assertLength(o, 1)
self.assertEqual(str(o[0]), 'fool-1-5.noarch')
def test_subquery_evaluated(self):
q = hawkey.Query(self.sack).filter(name="penny")
self.assertFalse(q.evaluated)
self.assertIsNone(q.result)
o = hawkey.Query(self.sack).filter(obsoletes=q)
self.assertTrue(q.evaluated)
self.assertIsInstance(q.result, list)
self.assertLength(o, 1)
class TestOddArch(base.TestCase):
def setUp(self):
self.sack = base.TestSack(repo_dir=self.repo_dir)
self.sack.load_test_repo("ppc", "ppc.repo")
def test_latest(self):
q = hawkey.Query(self.sack).filter(latest=True)
self.assertEqual(len(q), 1)
q = hawkey.Query(self.sack).filter(latest_per_arch=True)
self.assertEqual(len(q), 1)
class TestQuerySubclass(base.TestCase):
class CustomQuery(hawkey.Query):
pass
def test_instance(self):
sack = base.TestSack(repo_dir=self.repo_dir)
q = self.CustomQuery(sack)
self.assertIsInstance(q, self.CustomQuery)
q = q.filter(name="pepper")
self.assertIsInstance(q, self.CustomQuery)
|
yavor-atanasov/hawkey
|
tests/python/tests/test_query.py
|
Python
|
lgpl-2.1
| 11,221 | 0.000446 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.framework.python.ops import add_arg_scope as contrib_add_arg_scope
from tensorflow.contrib.framework.python.ops import gen_variable_ops
from tensorflow.contrib.util import loader
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
__all__ = ['add_model_variable',
'assert_global_step',
'assert_or_get_global_step',
'assign_from_checkpoint',
'assign_from_checkpoint_fn',
'assign_from_values',
'assign_from_values_fn',
'create_global_step',
'filter_variables',
'get_global_step',
'get_or_create_global_step',
'get_local_variables',
'get_model_variables',
'get_trainable_variables',
'get_unique_variable',
'get_variables_by_name',
'get_variables_by_suffix',
'get_variables_to_restore',
'get_variables',
'local_variable',
'model_variable',
'variable',
'VariableDeviceChooser',
'zero_initializer']
def zero_initializer(ref, use_locking=True, name="zero_initializer"):
"""Initialize 'ref' with all zeros, ref tensor should be uninitialized.
If already initialized, you will get ValueError. This op is intended to
save memory during initialization.
Args:
ref: ref of the tensor need to be zero initialized.
name: optional name for this operation.
Returns:
ref that initialized.
Raises:
ValueError: If ref tensor is initialized.
"""
loader.load_op_library(
resource_loader.get_path_to_datafile("_variable_ops.so"))
return gen_variable_ops.zero_initializer(ref, name=name)
def assert_global_step(global_step_tensor):
training_util.assert_global_step(global_step_tensor)
def assert_or_get_global_step(graph=None, global_step_tensor=None):
"""Verifies that a global step tensor is valid or gets one if None is given.
If `global_step_tensor` is not None, check that it is a valid global step
tensor (using `assert_global_step`). Otherwise find a global step tensor using
`get_global_step` and return it.
Args:
graph: The graph to find the global step tensor for.
global_step_tensor: The tensor to check for suitability as a global step.
If None is given (the default), find a global step tensor.
Returns:
A tensor suitable as a global step, or `None` if none was provided and none
was found.
"""
if global_step_tensor is None:
# Get the global step tensor the same way the supervisor would.
global_step_tensor = get_global_step(graph)
else:
assert_global_step(global_step_tensor)
return global_step_tensor
def get_global_step(graph=None):
return training_util.get_global_step(graph)
def create_global_step(graph=None):
"""Create global step tensor in graph.
Args:
graph: The graph in which to create the global step. If missing, use default
graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step key is already defined.
"""
graph = ops.get_default_graph() if graph is None else graph
if get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
collections = [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]
return variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
def get_or_create_global_step(graph=None):
"""Returns and create (if necessary) the global step variable.
Args:
graph: The graph in which to create the global step. If missing, use default
graph.
Returns:
the tensor representing the global step variable.
"""
graph = ops.get_default_graph() if graph is None else graph
globalstep = get_global_step(graph)
if globalstep is None:
globalstep = create_global_step(graph)
return globalstep
def local_variable(initial_value, validate_shape=True, name=None):
"""Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
Returns:
New variable.
"""
return variables.Variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape, name=name)
@contrib_add_arg_scope
def variable(name, shape=None, dtype=None, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None,
partitioner=None, custom_getter=None):
"""Gets an existing variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
If None it would default to `tf.GraphKeys.GLOBAL_VARIABLES`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
Returns:
The created or existing variable.
"""
collections = list(collections or [ops.GraphKeys.GLOBAL_VARIABLES])
# Remove duplicates
collections = set(collections)
getter = variable_scope.get_variable
if custom_getter is not None:
getter = custom_getter
with ops.device(device or ''):
return getter(name, shape=shape, dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner)
@contrib_add_arg_scope
def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None, partitioner=None,
custom_getter=None):
"""Gets an existing model variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the
`GraphKeys.GLOBAL_VARIABLES` and `GraphKeys.MODEL_VARIABLES` collections.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
var = variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections,
caching_device=caching_device, device=device,
partitioner=partitioner, custom_getter=custom_getter)
return var
def add_model_variable(var):
"""Adds a variable to the `GraphKeys.MODEL_VARIABLES` collection.
Args:
var: a variable.
"""
if var not in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES):
ops.add_to_collection(ops.GraphKeys.MODEL_VARIABLES, var)
def get_variables(scope=None, suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return. Can be a
variable scope or a string.
suffix: an optional suffix for filtering the variables to return.
collection: in which collection search for. Defaults to
`GraphKeys.GLOBAL_VARIABLES`.
Returns:
a list of variables in collection with scope and suffix.
"""
if isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
if ':' not in suffix:
suffix += ':'
scope = (scope or '') + '.*' + suffix
return ops.get_collection(collection, scope)
def get_model_variables(scope=None, suffix=None):
"""Gets the list of model variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.MODEL_VARIABLES)
def get_local_variables(scope=None, suffix=None):
"""Gets the list of local variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.LOCAL_VARIABLES)
def get_trainable_variables(scope=None, suffix=None):
"""Gets the list of trainable variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in the trainable collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.TRAINABLE_VARIABLES)
def get_variables_to_restore(include=None, exclude=None):
"""Gets the list of the variables to restore.
Args:
include: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to include. None would include all
the variables.
exclude: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to exclude. None it would not
exclude any.
Returns:
a list of variables to restore.
Raises:
TypeError: include or exclude is provided but is not a list or a tuple.
"""
if include is None:
# Include all variables.
vars_to_include = get_variables()
else:
if not isinstance(include, (list, tuple)):
raise TypeError('include is provided but is not a list or a tuple.')
vars_to_include = []
for scope in include:
vars_to_include += get_variables(scope)
vars_to_exclude = set()
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
raise TypeError('exclude is provided but is not a list or a tuple.')
for scope in exclude:
vars_to_exclude |= set(get_variables(scope))
# Exclude the variables in vars_to_exclude
return [v for v in vars_to_include if v not in vars_to_exclude]
def get_variables_by_suffix(suffix, scope=None):
"""Gets the list of variables that end with the given suffix.
Args:
suffix: suffix for filtering the variables to return.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=suffix)
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without any scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and scope.
"""
suffix = '/' + given_name + ':|^' + given_name + ':'
return get_variables(scope=scope, suffix=suffix)
def get_unique_variable(var_op_name):
"""Gets the variable uniquely identified by that var_op_name.
Args:
var_op_name: the full name of the variable op, including the scope.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = get_variables(scope=var_op_name)
if not candidates:
raise ValueError('Couldnt find variable %s' % var_op_name)
for candidate in candidates:
if candidate.op.name == var_op_name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable',
var_op_name)
def assign_from_values(var_names_to_values):
"""Creates an assignment operation from a given mapping.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
assign_op: An `Operation` that assigns each of the given variables to the
requested values.
feed_dict: The feed dictionary to use when evaluating `assign_op`.
Raises:
ValueError: if any of the given variable names were not found.
"""
feed_dict = {}
assign_ops = []
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)
if not var:
raise ValueError('Variable %s wasnt found', var_name)
elif len(var) > 1:
# tf.get_collection is just a filter on the prefix: find the exact match:
found = False
for v in var:
if v.op.name == var_name:
var = v
found = True
break
if not found:
raise ValueError('Variable %s doesnt uniquely identify a variable',
var_name)
else:
var = var[0]
# TODO(nsilberman): ensure placeholder and assign are on the same device.
# Assign a placeholder to the value that will be filled later.
placeholder_name = 'placeholder/' + var.op.name
placeholder_value = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name=placeholder_name)
assign_ops.append(var.assign(placeholder_value))
feed_dict[placeholder_value] = var_value.reshape(var.get_shape())
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
def assign_from_values_fn(var_names_to_values):
"""Returns a function that assigns specific variables from the given values.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: if any of the given variable names were not found.
"""
assign_op, feed_dict = assign_from_values(var_names_to_values)
def callback(session):
return session.run(assign_op, feed_dict)
return callback
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
#
# TODO(nsilberman): add flag to load exponential moving averages instead
def assign_from_checkpoint(model_path, var_list):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of (possibly partitioned) `Variable` objects
or a dictionary mapping names in the checkpoint to the
corresponding variables or list of variables to initialize
from that checkpoint value. For partitioned Variables, the
name in the checkpoint must be the full variable, not the
name of the partitioned variable, eg. "my_var" rather than
"my_var/part_4". If empty, returns no_op(), {}.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If the checkpoint specified at `model_path` is missing one of
the variables in `var_list`.
"""
# Normalize var_list into a dictionary mapping names in the
# checkpoint to the list of variables to initialize from that
# checkpoint variable. Sliced (including partitioned) variables will
# end up under the same key.
grouped_vars = {}
if isinstance(var_list, (tuple, list)):
for var in var_list:
if var._save_slice_info:
ckpt_name = var._save_slice_info.full_name
else:
ckpt_name = var.op.name
if ckpt_name not in grouped_vars:
grouped_vars[ckpt_name] = []
grouped_vars[ckpt_name].append(var)
else:
for ckpt_name, value in var_list.iteritems():
if isinstance(value, (tuple, list)):
grouped_vars[ckpt_name] = value
else:
grouped_vars[ckpt_name] = [value]
# Read each checkpoint entry. Create a placeholder variable and
# add the (possibly sliced) data from the checkpoint to the feed_dict.
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
feed_dict = {}
assign_ops = []
for ckpt_name in grouped_vars:
if not reader.has_tensor(ckpt_name):
raise ValueError(
'Checkpoint is missing variable [%s]' % ckpt_name)
ckpt_value = reader.get_tensor(ckpt_name)
for var in grouped_vars[ckpt_name]:
placeholder_tensor = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name='placeholder/' + ckpt_name)
assign_ops.append(var.assign(placeholder_tensor))
if not var._save_slice_info:
if var.get_shape() != ckpt_value.shape:
raise ValueError(
'Total size of new array must be unchanged for %s '
'lh_shape: [%s], rh_shape: [%s]'
% (ckpt_name, str(ckpt_value.shape), str(var.get_shape())))
feed_dict[placeholder_tensor] = ckpt_value.reshape(ckpt_value.shape)
else:
slice_dims = zip(var._save_slice_info.var_offset,
var._save_slice_info.var_shape)
slice_dims = [(start, start + size) for (start, size) in slice_dims]
slice_dims = [slice(*x) for x in slice_dims]
slice_value = ckpt_value[slice_dims]
feed_dict[placeholder_tensor] = slice_value
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
# pylint: enable=protected-access
def assign_from_checkpoint_fn(model_path, var_list, ignore_missing_vars=False,
reshape_variables=False):
"""Returns a function that assigns specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the correspoing variables to initialize. If empty or None,
it would return no_op(), None.
ignore_missing_vars: Boolean, if True it would ignore variables missing in
the checkpoint with a warning instead of failing.
reshape_variables: Boolean, if True it would automatically reshape variables
which are of different shape then the ones stored in the checkpoint but
which have the same number of elements.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: If the checkpoint specified at `model_path` is missing one of
the variables in `var_list`.
"""
if ignore_missing_vars:
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
if isinstance(var_list, dict):
var_dict = var_list
else:
var_dict = {var.op.name: var for var in var_list}
available_vars = {}
for var in var_dict:
if reader.has_tensor(var):
available_vars[var] = var_dict[var]
else:
logging.warning(
'Variable %s missing in checkpoint %s', var, model_path)
var_list = available_vars
saver = tf_saver.Saver(var_list, reshape=reshape_variables)
def callback(session):
saver.restore(session, model_path)
return callback
class VariableDeviceChooser(object):
"""Device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU or CPU placement.
"""
def __init__(self,
num_tasks=0,
job_name='ps',
device_type='CPU',
device_index=0):
"""Initialize VariableDeviceChooser.
Usage:
To use with 2 parameter servers:
VariableDeviceChooser(2)
To use without parameter servers:
VariableDeviceChooser()
VariableDeviceChooser(device_type='GPU') # For GPU placement
Args:
num_tasks: number of tasks.
job_name: String, a name for the parameter server job.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self._job_name = job_name
self._device_type = device_type
self._device_index = device_index
self._num_tasks = num_tasks
self._next_task_id = 0
def __call__(self, op):
device_spec = tf_device.DeviceSpec(device_type=self._device_type,
device_index=self._device_index)
if self._num_tasks > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_tasks
device_spec.job = self._job_name
device_spec.task = task_id
return device_spec.to_string()
def filter_variables(var_list, include_patterns=None, exclude_patterns=None,
reg_search=True):
"""Filter a list of variables using regular expressions.
First includes variables according to the list of include_patterns.
Afterwards, eliminates variables according to the list of exclude_patterns.
For example, one can obtain a list of variables with the weights of all
convolutional layers (depending on the network definition) by:
```python
variables = tf.contrib.framework.get_model_variables()
conv_weight_variables = tf.contrib.framework.filter_variables(
variables,
include_patterns=['Conv'],
exclude_patterns=['biases', 'Logits'])
```
Args:
var_list: list of variables.
include_patterns: list of regular expressions to include. Defaults to None,
which means all variables are selected according to the include rules.
A variable is included if it matches any of the include_patterns.
exclude_patterns: list of regular expressions to exclude. Defaults to None,
which means all variables are selected according to the exclude rules.
A variable is excluded if it matches any of the exclude_patterns.
reg_search: boolean. If True (default), performs re.search to find matches
(i.e. pattern can match any substring of the variable name). If False,
performs re.match (i.e. regexp should match from the beginning of the
variable name).
Returns:
filtered list of variables.
"""
if reg_search:
reg_exp_func = re.search
else:
reg_exp_func = re.match
# First include variables.
if include_patterns is None:
included_variables = list(var_list)
else:
included_variables = []
for var in var_list:
if any(reg_exp_func(ptrn, var.name) for ptrn in include_patterns):
included_variables.append(var)
# Afterwards, exclude variables.
if exclude_patterns is None:
filtered_variables = included_variables
else:
filtered_variables = []
for var in included_variables:
if not any(reg_exp_func(ptrn, var.name) for ptrn in exclude_patterns):
filtered_variables.append(var)
return filtered_variables
|
nikste/tensorflow
|
tensorflow/contrib/framework/python/ops/variables.py
|
Python
|
apache-2.0
| 27,377 | 0.005698 |
import numpy as np
class LinfinityRegression:
def __init__(self, iterations, learning_rate, regularization_strength):
self.iterations = iterations
self.learning_rate = learning_rate
self.regularization_strength = regularization_strength
@staticmethod
def soft_thresholding_operator(x, l):
"""
This method is used to update the weights when performing Gradient Descent.
Whenever the loss function is just the least square loss function, we can minimize by taking the derivative.
However, we cannot minimise the Lasso Loss function in the same weight because the function is not differentiable
at w = 0 (where w is any of the weight component)
:param x:
:param l:
:return:
"""
maxw = max(x)
for i in range(0, len(x)):
if np.abs(x[i]) > np.abs(maxw):
x[i] = l*np.sign(x[i])
elif np.abs(x[i]) < np.abs(maxw) :
x[i] = l*np.sign(maxw)
return x
@staticmethod
def mse_cost_function(predicted_output, actual_output):
"""
This method calculates the error and the MSE cost function given a predicted_value and the actual_value
:param predicted_output:
:param actual_output:
:return: Mean Square Error, Error.
"""
error = predicted_output - actual_output
mse_cost = np.sum(error ** 2) /(2*len(actual_output))
return mse_cost, error
def calculate_weights(self, training_records, output):
mse_costs = []
weights = np.random.rand(training_records.shape[1])
weights_table = [weights]
predicted_outputs = []
for i in range(self.iterations):
predicted_output = np.dot(training_records, weights)
predicted_outputs.append(predicted_output)
mse_cost, error = LinfinityRegression.mse_cost_function(predicted_output, output)
mse_costs.append(mse_cost)
slope = training_records.T.dot(error)/(len(output))
weights = LinfinityRegression.soft_thresholding_operator(weights - self.learning_rate*slope,
self.regularization_strength)
weights_table.append(weights.copy())
return weights_table, mse_costs, predicted_outputs
|
aosingh/Regularization
|
LinfinityRegularization/LinfinityRegularizer.py
|
Python
|
mit
| 2,373 | 0.006321 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/loot_schematic/shared_park_bench_schematic.iff"
result.attribute_template_id = -1
result.stfName("craft_item_ingredients_n","park_bench")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/loot/loot_schematic/shared_park_bench_schematic.py
|
Python
|
mit
| 479 | 0.045929 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from ctypes import sizeof, windll, addressof, create_unicode_buffer
from ctypes.wintypes import DWORD, HANDLE
PROCESS_TERMINATE = 0x0001
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_VM_READ = 0x0010
def get_pids(process_name):
BIG_ARRAY = DWORD * 4096
processes = BIG_ARRAY()
needed = DWORD()
pids = []
result = windll.psapi.EnumProcesses(processes,
sizeof(processes),
addressof(needed))
if not result:
return pids
num_results = needed.value / sizeof(DWORD)
for i in range(num_results):
pid = processes[i]
process = windll.kernel32.OpenProcess(PROCESS_QUERY_INFORMATION |
PROCESS_VM_READ,
0, pid)
if process:
module = HANDLE()
result = windll.psapi.EnumProcessModules(process,
addressof(module),
sizeof(module),
addressof(needed))
if result:
name = create_unicode_buffer(1024)
result = windll.psapi.GetModuleBaseNameW(process, module,
name, len(name))
# TODO: This might not be the best way to
# match a process name; maybe use a regexp instead.
if name.value.startswith(process_name):
pids.append(pid)
windll.kernel32.CloseHandle(module)
windll.kernel32.CloseHandle(process)
return pids
def kill_pid(pid):
process = windll.kernel32.OpenProcess(PROCESS_TERMINATE, 0, pid)
if process:
windll.kernel32.TerminateProcess(process, 0)
windll.kernel32.CloseHandle(process)
|
vladikoff/fxa-mochitest
|
tests/venv/lib/python2.7/site-packages/mozprocess/wpk.py
|
Python
|
mpl-2.0
| 2,115 | 0.000946 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# elementaryplus-installer.py elementary+ Installer/Configurator
#
# Copyright (C) 2015 Stefan Ric (cybre)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Some code taken from Evolve-SC (https://github.com/solus-project/evolve-sc)
from gi.repository import Gtk, Gdk, Gio, Notify
import sys
import os
from os import symlink
from os.path import expanduser
import shutil
import subprocess
import apt
if not (Gtk.get_major_version() == 3 and Gtk.get_minor_version() >= 14):
sys.exit("You need to have GTK 3.14 or newer to run this script")
appName = "elementary+ Configurator"
iconThemeName = "elementaryPlus"
fromPPA = False
if fromPPA is True:
whatToUse = "link"
else:
whatToUse = "copy"
scripts = os.getcwd() + "/scripts/"
schema = "/usr/share/glib-2.0/schemas/apps.elementaryPlusConfigurator.gschema.xml"
if os.path.isfile(schema) is False:
subprocess.call(["pkexec", scripts + "first_start.sh", scripts])
settings = Gio.Settings.new("apps.elementaryPlusConfigurator")
patchedSniqt = settings.get_boolean("sniqt-patched")
systemSettings = Gio.Settings.new("org.gnome.desktop.interface")
home = expanduser("~")
iconMegaList = [
# [
# "Name",
# "Sni-qt prefix",
# "Binary/Static File", [/path/to/file]
# "Description",
# "Icon",
# "Install Method", ["custom", "standard"]
# ],
[
"Core icon theme",
"",
"",
"The core elementary+ icon theme",
"preferences-desktop",
"custom"
],
[
"Bitcoin",
"bitcoin-qt",
"/usr/bin/bitcoin-qt",
"Bitcoin is a free open source peer-to-peer electronic cash system that is completely decentralized, without the need for a central server or trusted parties",
"bitcoin128",
"standard"
],
[
"flareGet",
"flareget",
"/usr/bin/flareget",
"FlareGet is a full featured, multi-threaded download manager and accelerator for Windows, Mac and Linux",
"flareget",
"standard"
],
[
"Google Music Manager",
"MusicManager",
"/opt/google/musicmanager/google-musicmanager",
"With Google Play Music for Chrome or Music Manager, you can add your personal music library to the cloud",
"google-musicmanager",
"standard"
],
[
"HP Linux Printing and Imaging",
"python2.7",
"/usr/bin/hp-systray",
"The HP Linux Printing and Imaging System provides full support for printing on most HP SFP inkjets and many LaserJets, and for scanning, sending faxes and for photo-card access on most HP MFP printers",
"HPmenu",
"standard"
],
[
"MEGAsync",
"megasync",
"/usr/bin/megasync",
"MEGAsync is a free online storage service",
"mega",
"standard"
],
[
"Mumble",
"mumble",
"/usr/bin/mumble",
"Mumble is a low-latency, high quality voice chat program for gaming",
"mumble",
"standard"
],
[
"OwnCloud",
"owncloud",
"/usr/bin/owncloud",
"An enterprise file sharing solution for online collaboration and storage",
"owncloud",
"standard"
],
[
"RescueTime",
"rescuetime",
"/usr/bin/rescuetime",
"A personal analytics service that shows you how you spend your time and provides tools to help you be more productive.",
"rescuetime",
"standard"
],
[
"ScreenCloud",
"screencloud",
"/usr/bin/screencloud",
"ScreenCloud is a Screenshot sharing tool",
"screencloud",
"standard"
],
[
"Seafile Client",
"seafile-applet",
"/usr/bin/seafile-applet",
"The Seafile desktop client",
"seafile",
"standard"
],
[
"Skype",
"skype",
"/usr/bin/skype",
"Stay in touch with your family and friends for free on Skype",
"skype",
"standard"
],
[
"Spotify",
"spotify",
["/opt/spotify/spotify-client/spotify", "/usr/bin/spotify"],
"Spotify is a digital music service that gives you access to millions of songs",
"spotify-client",
"custom"
],
[
"Teamviewer",
"teamviewer",
"/usr/bin/teamviewer",
"TeamViewer is a software package for remote control, desktop sharing, online meetings, web conferencing and file transfer between computers",
"teamviewer",
"custom"
],
[
"Telegram Desktop",
"telegram",
"/usr/bin/telegram",
"Telegram is a messaging app with a focus on speed and security, it's super fast, simple and free",
"telegram",
"custom"
],
[
"Tomahawk",
"tomahawk",
"/usr/bin/tomahawk",
"A new kind of music player that invites all your streams, downloads, cloud music storage, playlists, radio stations and friends to the same party. It's about time they all mingle",
"tomahawk",
"standard"
],
[
"WizNote",
"WizNote",
"/usr/bin/WizNote",
"Wiznote is a cloud based notes solution which helps personal and professional to take notes and collaborate with team members",
"wiznote",
"standard"
]
]
customCheckLocations = [
[
"telegram_desktop",
["%s/.local/share/TelegramDesktop/tdata/ticons/elementaryPlus.installed" % (home)]
]
]
installedComponents = []
availableComponents = []
iconTheme = Gtk.IconTheme
defaultIconTheme = iconTheme.get_default()
def checkIfInstalled(appName):
for customLocation in customCheckLocations:
if appName in customLocation:
for location in customLocation[1]:
if os.path.isfile(location):
installedComponents.append(appName)
if os.path.isdir("%s/.local/share/sni-qt/icons/" % (home) + codeName):
installedComponents.append(codeName)
for a in iconMegaList:
name = a[0]
codeName = a[0].lower().replace(" ", "_")
shortDesc = (a[3][:60] + '...') if len(a[3]) > 60 else a[3]
icon = ("package-x-generic") if iconTheme.has_icon(defaultIconTheme, a[4]) == False else a[4]
installMethod = a[5]
sniqtPrefix = a[1]
if isinstance(a[2], list):
for checkLocation in a[2]:
print checkLocation
if os.path.isfile(checkLocation):
enabled = True
break
else:
enabled = False
else:
enabled = (True) if os.path.isfile(a[2]) or codeName == "core_icon_theme" else False
checkIfInstalled(codeName)
availableComponents.append([name, codeName, shortDesc, icon, installMethod, sniqtPrefix, enabled])
availableComponents.sort(key=lambda x: x[6], reverse=True)
print "installed", installedComponents
class InstallerWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title=appName)
self.set_size_request(500, 500)
self.set_icon_name("preferences-desktop")
self.error = 0
self.hb = Gtk.HeaderBar()
self.hb.set_show_close_button(True)
self.hb.props.title = "elementary+"
self.hb.set_subtitle("Configurator")
self.set_titlebar(self.hb)
searchIcon = Gtk.Image.new_from_icon_name("edit-find-symbolic", Gtk.IconSize.LARGE_TOOLBAR)
searchButton = Gtk.ToggleButton()
searchButton.set_image(searchIcon)
searchButton.connect('clicked', self.search_handler)
self.searchButton = searchButton
self.hb.pack_start(searchButton)
Notify.init(appName)
self.add(self.build_ui())
style_provider = Gtk.CssProvider()
css = """
.search-bar {
border-width: 0;
}
"""
style_provider.load_from_data(css)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
def notify(self, messageOne, messageTwo, icon):
try:
notification = Notify.Notification.new(messageOne, messageTwo, icon)
notification.set_urgency(1)
notification.show()
del notification
except:
pass
def build_ui(self):
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
self.searchBar = Gtk.SearchBar()
self.searchBar.get_style_context().add_class("primary-toolbar")
self.searchBar.set_halign(Gtk.Align.FILL)
self.searchBar.set_show_close_button(True)
entry = Gtk.SearchEntry()
entry.connect("search-changed", self.search_changed)
self.searchBar.add(entry)
self.searchBar.connect_entry(entry)
vbox.pack_start(self.searchBar, False, False, 0)
self.searchEntry = entry
self.connect("key-press-event", lambda x, y: self.searchBar.handle_event(y))
iconsPage = self.create_icons_page()
vbox.pack_start(iconsPage, True, True, 0)
return vbox
def create_icons_page(self):
scroller = Gtk.ScrolledWindow(None, None)
scroller.set_border_width(10)
scroller.set_shadow_type(Gtk.ShadowType.IN)
scroller.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.lbox = Gtk.ListBox()
self.lbox.set_selection_mode(Gtk.SelectionMode.NONE)
placeholder = Gtk.Label()
self.placeholder = placeholder
placeholder.set_use_markup(True)
placeholder.get_style_context().add_class("dim-label")
self.lbox.set_placeholder(placeholder)
placeholder.show_all()
scroller.add(self.lbox)
for i in range(len(availableComponents)):
for sublist in iconMegaList:
if sublist[0] == availableComponents[i][0]:
longDesc = sublist[3]
item = self.create_item(availableComponents[i][0], availableComponents[i][3], availableComponents[i][2], availableComponents[i][6])
componentSwitch = Gtk.Switch()
componentSwitch.set_name(availableComponents[i][0].lower())
componentSwitch.props.halign = Gtk.Align.END
componentSwitch.props.valign = Gtk.Align.CENTER
componentSwitch.connect("notify::active", self.callback, availableComponents[i][1], availableComponents[i][4], availableComponents[i][5])
if availableComponents[i][1] in installedComponents:
componentSwitch.set_active(True)
if availableComponents[i][1] == "core_icon_theme":
currentTheme = systemSettings.get_string("icon-theme")
if currentTheme == iconThemeName:
componentSwitch.set_active(True)
wrap = Gtk.HBox(0)
wrap.pack_start(item, True, True, 0)
wrap.pack_end(componentSwitch, False, False, 2)
if availableComponents[i][6] is False:
wrap.set_sensitive(False)
wrap.set_tooltip_text(longDesc)
self.lbox.add(wrap)
return scroller
def create_item(self, name, iconName, shortDesc, enabled):
grid = Gtk.Grid()
grid.set_border_width(16)
grid.set_row_spacing(4)
grid.set_column_spacing(16)
if enabled is True:
label = Gtk.Label("<big>%s</big>" % name)
else:
label = Gtk.Label("<big>%s (Not installed)</big>" % name)
label.set_use_markup(True)
label.set_alignment(0.0, 0.5)
icon = Gtk.Image.new_from_icon_name(iconName, Gtk.IconSize.DIALOG)
desc = Gtk.Label(shortDesc)
desc.get_style_context().add_class("dim-label")
desc.set_alignment(0.0, 0.5)
grid.attach(icon, 0, 0, 1, 2)
grid.attach(label, 1, 0, 1, 1)
grid.attach(desc, 1, 1, 1, 1)
return grid
def search_handler(self, w):
w.freeze_notify()
self.searchBar.set_search_mode(w.get_active())
w.thaw_notify()
def search_changed(self, w, data=None):
text = w.get_text().strip()
if text == "":
self.searchBar.set_search_mode(False)
act = False if text == "" else True
self.searchButton.freeze_notify()
self.searchButton.set_active(act)
self.searchButton.thaw_notify()
self.searching(w)
def searching(self, entry, event=None):
text = entry.get_text().strip()
self.lbox.set_filter_func(self.filter, text)
res = False
for child in self.lbox.get_children():
if child.get_visible() and child.get_child_visible():
res = True
break
if not res:
self.placeholder.set_markup("<big>No results</big>")
def filter(self, row, text):
name = row.get_children()[0].get_children()[0].get_children()[1].get_text()
desc = row.get_children()[0].get_tooltip_text()
if text.lower() in name.lower() or text.lower() in desc.lower():
return True
else:
return False
def callback(self, widget, event, data, method, sniqtPrefix):
if widget.get_active() == 1:
if data == "core_icon_theme":
self.toggleTheme("install")
elif data not in installedComponents:
self.install(data, method, sniqtPrefix)
else:
if data == "core_icon_theme":
self.toggleTheme("remove")
elif data in installedComponents and self.error == 0:
self.remove(data, method, sniqtPrefix)
def install(self, appName, installMethod, sniqtPrefix):
patchedSniqt = settings.get_boolean("sniqt-patched")
if appName != "core_icon_theme" and appName != "telegram_desktop" and patchedSniqt is False and fromPPA is False:
print "Installing patched sni-qt"
self.notify('This may take a while', 'Please don\'t close the window', 'preferences-desktop')
if subprocess.call(['pkexec', scripts + "sni-qt.sh"]) == 0:
cache = apt.Cache()
version = cache["sni-qt"].candidate.version
if "0.2.7" in version:
print "Succesfully patched sni-qt"
settings.set_boolean("sniqt-patched", True)
else:
print "Failed to patch sni-qt"
else:
print "Unknown error"
out = 0
if installMethod == "standard":
out = self.installQtIndicatorIcons(appName, sniqtPrefix)
else:
out = subprocess.call(['python', scripts + "custom/" + appName + ".py", "--install", whatToUse, scripts])
print out
if out == 1:
self.error = 1
if appName != "spotify":
self.notify('elementary+ Configurator', 'Error while installing ' + appName.replace("_", " ").capitalize(), 'error')
if self.error == 0:
installedComponents.append(appName)
def remove(self, appName, installMethod, sniqtPrefix):
out = 0
if installMethod == "standard":
out = self.removeQtIndicatorIcons(sniqtPrefix)
else:
out = subprocess.call(['python', scripts + "custom/" + appName + ".py", "--remove", whatToUse, scripts])
if out == 1:
self.error = 1
self.notify('elementary+ Configurator', 'Error while removing ' + appName.replace("_", " ").capitalize(), 'error')
if self.error == 0:
installedComponents.remove(appName)
def installQtIndicatorIcons(self, appName, sniqtPrefix):
iconDir = scripts + "icons/" + appName + "/"
destDir = home + "/.local/share/sni-qt/icons/" + sniqtPrefix + "/"
if whatToUse == "copy":
if os.path.exists(destDir):
try:
shutil.rmtree(destDir)
except:
return False
copy = shutil.copytree(iconDir, destDir)
return copy
elif whatToUse == "link":
if not os.path.exists(destDir):
os.makedirs(destDir)
else:
try:
shutil.rmtree(destDir)
except:
return False
os.makedirs(destDir)
for icon in os.listdir(iconDir):
link = symlink(iconDir + icon, destDir + icon)
return link
else:
print "Invalid operation!"
def removeQtIndicatorIcons(self, sniqtPrefix):
destDir = home + "/.local/share/sni-qt/icons/" + sniqtPrefix
if os.path.exists(destDir):
try:
shutil.rmtree(destDir)
except:
return False
else:
return True
def toggleTheme(self, operation):
currentTheme = systemSettings.get_string("icon-theme")
previousIconTheme = settings.get_string("previous-icon-theme")
if os.path.isdir(home + "/.local/share/icons/elementaryPlus"):
shutil.rmtree(home + "/.local/share/icons/elementaryPlus")
if os.path.isdir("/usr/share/icons/elementaryPlus"):
print "/usr/share... exists"
if operation == "install":
if fromPPA is True:
if os.path.isdir(home + "/.icons/elementaryPlus"):
print "Remove from .icons"
shutil.rmtree(home + "/.icons/elementaryPlus")
if currentTheme != iconThemeName:
settings.set_string("previous-icon-theme", currentTheme)
systemSettings.set_string("icon-theme", iconThemeName)
else:
out = subprocess.call(['python', scripts + "custom/core_icon_theme.py", "--install", whatToUse, scripts])
if currentTheme != iconThemeName:
settings.set_string("previous-icon-theme", currentTheme)
systemSettings.set_string("icon-theme", iconThemeName)
else:
if fromPPA is True:
systemSettings.set_string("icon-theme", previousIconTheme)
else:
out = subprocess.call(['python', scripts + "custom/core_icon_theme.py", "--remove", whatToUse, scripts])
systemSettings.set_string("icon-theme", previousIconTheme)
else:
print "/usr/share... does not exist"
if operation == "install":
out = subprocess.call(['python', scripts + "custom/core_icon_theme.py", "--install", whatToUse, scripts])
settings.set_string("previous-icon-theme", currentTheme)
systemSettings.set_string("icon-theme", iconThemeName)
else:
out = subprocess.call(['python', scripts + "custom/core_icon_theme.py", "--remove", whatToUse, scripts])
systemSettings.set_string("icon-theme", previousIconTheme)
win = InstallerWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
mank319/elementaryPlus
|
scripts/elementaryplus-installer.py
|
Python
|
gpl-3.0
| 19,933 | 0.002057 |
# coding=utf-8
from ._commandbase import RadianceCommand
from ..datatype import RadiancePath
import os
class Epw2wea(RadianceCommand):
"""epw2wea transforms an EnergyPlus weather data (.epw) file into
the DAYSIM weather file format, for use with the RADIANCE gendaymtx
program.
Attributes:
epw_file: Filepath of the epw file that is to be converted into wea
format.
Usage:
from honeybee_plus.radiance.command.epw2wea import Epw2wea.
#create an epw2wea command.
epwWea = Epw2wea(epw_fileName='c:/ladybug/test.epw')
"""
_epw_file = RadiancePath('_epw_file',
descriptive_name='Epw weather data file',
relative_path=None, check_exists=False)
output_wea_file = RadiancePath('output_wea_file',
descriptive_name='Output wea file',
relative_path=None, check_exists=False)
def __init__(self, epw_file=None, output_wea_file=None):
RadianceCommand.__init__(self)
self.epw_file = epw_file
"""The path of the epw file that is to be converted to a wea file."""
self.output_wea_file = output_wea_file
"""The path of the output wea file. Note that this path will be created
if not specified by the user."""
@property
def epw_file(self):
return self._epw_file
@epw_file.setter
def epw_file(self, value):
"""The path of the epw file that is to be converted to a wea file."""
if value:
self._epw_file = value
if not self.output_wea_file._value:
self.output_wea_file = os.path.splitext(value)[0] + '.wea'
else:
self._epw_file = None
def to_rad_string(self, relative_path=False):
"""Return full radiance command as string"""
rad_string = "%s %s %s" % (
'"%s"' % os.path.join(self.radbin_path, 'epw2wea'),
self.epw_file.to_rad_string(),
self.output_wea_file.to_rad_string())
# self.check_input_files(rad_string)
return rad_string
@property
def input_files(self):
"""Return input files specified by user."""
return self.epw_file.normpath,
|
ladybug-analysis-tools/honeybee
|
honeybee_plus/radiance/command/epw2wea.py
|
Python
|
gpl-3.0
| 2,274 | 0 |
from Timeline.Server.Constants import TIMELINE_LOGGER
from twisted.internet.defer import Deferred, inlineCallbacks, returnValue
from twistar.dbobject import DBObject
from twistar.registry import Registry
from collections import deque
import logging, time, json
class Penguin(DBObject):
HASONE = ['avatar', 'currency', 'ninja']
HASMANY = ['assets', 'bans', 'careItems', 'coins', 'friends', 'ignores', 'requests', 'inventories', 'mails', 'memberships',
'musicTracks', 'puffles', 'stamps', 'stampCovers', 'igloos']
class Coin(DBObject):
pass
class Igloo(DBObject):
HASMANY = ['iglooFurnitures', 'iglooLikes']
@inlineCallbacks
def get_likes_count(self):
likes = yield Registry.getConfig().execute("SELECT COALESCE(SUM(likes), 0) FROM igloo_likes where "
"igloo_id = %s" % (self.id))
returnValue(likes[0][0])
@inlineCallbacks
def get_furnitures(self):
furnitures = yield self.iglooFurnitures.get()
returnValue(furnitures)
@inlineCallbacks
def get_furnitures_string(self):
furnitures = yield self.get_furnitures()
furn_data = map(lambda i: '|'.join(map(str, map(int, [i.furn_id, i.x, i.y, i.rotate, i.frame]))), furnitures)
returnValue(','.join(furn_data))
@inlineCallbacks
def updateFurnitures(self, furnitures):
yield self.refresh()
yield IglooFurniture.deleteAll(where=['igloo_id = ?', self.id])
furn = [IglooFurniture(igloo_id=self.id, furn_id=x[0], x=x[1], y=x[2], rotate=x[3], frame=x[4])
for x in furnitures]
[(yield i.save()) for i in furn]
yield self.iglooFurnitures.set(furn)
class IglooFurniture(DBObject):
pass
class IglooLike(DBObject):
def get_time(self):
return int(time.mktime(self.time.timetuple()))
class Avatar(DBObject):
pass
class Currency(DBObject):
pass
class Ninja(DBObject):
pass
class Asset(DBObject):
def getPurchasedTimestamp(self):
return int(time.mktime(self.purchased.timetuple()))
class Ban(DBObject):
def banned(self):
return hours > 0
def hours(self):
expire = int(time.mktime(self.expire.timetuple()))
hours = (expire - time.time()) / (60 * 60.0) if expire > time.time() else 0
return hours
class CareItem(DBObject):
pass
class Friend(DBObject):
friend_id = -1
class Ignore(DBObject):
pass
class Request(DBObject):
pass
class Inventory(DBObject):
pass
class Mail(DBObject):
def get_sent_on(self):
return int(time.mktime(self.sent_on.timetuple()))
class Membership(DBObject):
pass
class MusicTrack(DBObject):
shared = False
def __len__(self):
return self.length
def __str__(self, withNotes = False):
if not withNotes:
return '|'.join(map(str, [self.id, self.name, int(self.shared), self.likes]))
return '%'.join(map(str, [self.id, self.name, int(self.shared), self.notes, self.hash, self.likes]))
def __int__(self):
return self.id
class Puffle(DBObject):
state = x = y = 0
def __str__(self):
# puffle id|type|sub_type|name|adoption|food|play|rest|clean|hat|x|y|is_walking
return '|'.join(map(str, [int(self.id), int(self.type), self.subtype if int(self.subtype) != 0 else '',
self.name, self.adopt(), int(self.food), int(self.play), int(self.rest),
int(self.clean), int(self.hat), int(self.x), int(self.y), int(self.walking)]))
def adopt(self):
return int(time.mktime(self.adopted.timetuple()))
def updatePuffleStats(self, engine):
care_history = json.loads(self.lastcare)
now = time.time()
if care_history is None or len(care_history) < 1 or bool(int(self.backyard)) or self.walking:
care_history['food'] = care_history['play'] = care_history['bath'] = now
self.lastcare = json.dumps(care_history)
self.save()
return # ULTIMATE PUFFLE <indefinite health and energy>
last_fed = care_history['food']
last_played = care_history['play']
last_bathed = care_history['bath']
food, play, clean = int(self.food), int(self.play), int(self.clean)
puffleCrumb = engine.puffleCrumbs[self.subtype]
max_food, max_play, max_clean = puffleCrumb.hunger, 100, puffleCrumb.health
self.rest = 100 # It's in the igloo all this time?
self.save()
''' It afterall is a poor creature to be taken care of.
if not int(puffle.id) in self.penguin.engine.puffleCrumbs.defautPuffles:
return # They aren't to be taken care of
'''
'''
if remaining % < 10 : send a postcard blaming (hungry, dirty, or unhappy)
if remaining % < 2 : move puffle to pet store, delete puffle, send a postcard, sue 1000 coins as penalty
'''
fed_percent = food - 5 * ((now - last_fed)/86400) # delta_food = -5% per day
play_percent = play - 5 * ((now - last_played)/86400) # delta_play = -5% per day
clean_percent = clean - 10 * ((now - last_bathed)/86400) # delta_clean = -10% per day
total_percent = (fed_percent + play_percent + clean_percent) / 3.0
if fed_percent < 3 or total_percent < 6:
self.backyard = 1
self.food = 100
self.play = 100
self.clean = 100
self.save()
return
if fed_percent < 10:
pid = self.penguin_id
pname = self.name
def sendMail(mail):
if mail is not None:
sent = mail.sent_on
delta = (time.time() - sent)/3600/12
if delta < 1:
return
Mail(penguin_id=pid, from_user=0, type=110, description=str(pname)).save()
last_mail = Mail.find(where=['penguin_id = ? AND type = 110 AND description = ?', self.penguin_id, self.name], orderby='sent_on DESC', limit=1).addCallback(sendMail)
self.food = fed_percent
self.play = play_percent
self.clean = clean_percent
care_history['food'] = care_history['play'] = care_history['bath'] = now
self.lastcare = json.dumps(care_history)
self.save()
class Stamp(DBObject):
def __int__(self):
return int(self.stamp)
class StampCover(DBObject):
pass
class EPFCom(DBObject):
TABLENAME = 'epfcoms'
def getTime(self):
return int(time.mktime(self.time.timetuple()))
def __str__(self):
return '|'.join(map(str, [self.message, self.getTime(), self.mascot]))
class PenguinDB(object):
"""
<Server.Penguin> will extend this to get db operations
Syntax:
def db_<FunctionName> (*a, **kwa): << must be deferred and mustreturn a defer
> recommended to use with inlineCallbacks
"""
def __init__(self):
self.logger = logging.getLogger(TIMELINE_LOGGER)
self.dbpenguin = None
@inlineCallbacks
def db_init(self):
if self.dbpenguin is None:
column, value = 'username', self.penguin.username
if not self.penguin.id is None:
column, value = 'ID', self.penguin.id
elif not self.penguin.swid is None:
column, value = 'swid', self.penguin.swid
self.dbpenguin = yield Penguin.find(where = ['%s = ?' % column, value], limit = 1)
if self.dbpenguin is None:
raise Exception("[TE201] Penguin not found with {1} - {0}".format(value, column))
returnValue(True)
@inlineCallbacks
def db_nicknameUpdate(self, nick):
p_nickname = self.dbpenguin.nickname
self.dbpenguin.nickname = nick
done = self.dbpenguin.save()
if len(done.errors) > 0:
self.dbpenguin.nickname = p_nickname
for error in done.errors:
self.log('error', "[TE200] MySQL update nickname failed. Error :", error)
returnValue(False)
else:
returnValue(True)
@inlineCallbacks
def db_penguinExists(self, criteria = 'ID', value = None):
exists = yield Penguin.exists(["`%s` = ?" % criteria, value])
returnValue(exists)
@inlineCallbacks
def db_getPenguin(self, criteria, *values):
wh = [criteria] + list(values)
p = yield Penguin.find(where = wh, limit = 1)
returnValue(p)
@inlineCallbacks
def db_refresh(self):
yield self.dbpenguin.refresh()
|
Times-0/Timeline
|
Timeline/Database/DB.py
|
Python
|
gpl-3.0
| 8,784 | 0.008311 |
from abc import ABCMeta, abstractmethod
import json
import os
try:
import redis
except ImportError:
pass
from .config import Config
def get_cache(cache, config=None):
if isinstance(cache, str):
if cache == 'JsonCache':
return JsonCache()
elif cache == 'RedisCache':
if config is None:
config = Config()
return RedisCache(config)
else:
raise(Exception('Invalid string cache option specified.'))
else:
return cache
class Cache(object):
__metaclass__ = ABCMeta
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value):
pass
@abstractmethod
def delete(self, key):
pass
class JsonCache(Cache):
def __init__(self, filename='cache.json', root_directory=None):
if root_directory is not None:
self.data_location = '{}/{}'.format(root_directory, filename)
else:
self.data_location = '{}/{}'.format(os.getcwd(), filename)
# if the file doesn't exist create a empty file with a json object
if not os.path.isfile(self.data_location):
with open(self.data_location, 'w+') as data_file:
data_file.write('{}')
def get(self, key):
with open(self.data_location) as data_file:
data = json.load(data_file)
if key in data:
value = data[key]
else:
value = None
return value
def set(self, key, value):
with open(self.data_location, 'r+') as data_file:
data = json.load(data_file)
data[key] = value
data_file.seek(0)
data_file.write(json.dumps(data))
data_file.truncate()
return True
def delete(self, key):
with open(self.data_location, 'r+') as data_file:
data = json.load(data_file)
if key not in data:
return False
data.pop(key, None)
data_file.seek(0)
data_file.write(json.dumps(data))
data_file.truncate()
return True
class RedisCache(Cache):
# currently loading is only from config file
def __init__(self, config):
self.redis_uri = config.get_config(None, 'URI', root='redis')
self.redis = None
self.redis_config = config.get_section_config('redis')
def get(self, key):
self._connect()
value = self.redis.get(key)
if value is not None:
value = value.decode('UTF-8')
return value
def set(self, key, value):
self._connect()
result = self.redis.set(key, value)
if result > 0:
return True
else:
return False
def delete(self, key):
self._connect()
result = self.redis.delete(key)
if result > 0:
return True
else:
return False
def _connect(self):
if self.redis is None:
self.redis = redis.StrictRedis.from_url(self.redis_uri)
for name, value in self.redis_config.items():
try:
self.redis.config_set(name, value)
except:
pass
# log...
|
Grungnie/microsoftbotframework
|
microsoftbotframework/cache.py
|
Python
|
mit
| 3,321 | 0.000602 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-17 10:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('basicviz', '0049_auto_20170216_2228'),
]
operations = [
migrations.CreateModel(
name='Decomposition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Experiment')),
],
),
migrations.CreateModel(
name='DecompositionFeatureInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('intensity', models.FloatField()),
('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Document')),
],
),
migrations.CreateModel(
name='DocumentGlobalFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('intensity', models.FloatField()),
('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Document')),
],
),
migrations.CreateModel(
name='FeatureMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='FeatureSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('description', models.CharField(max_length=1024, null=True)),
],
),
migrations.CreateModel(
name='GlobalFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('min_mz', models.FloatField()),
('max_mz', models.FloatField()),
('featureset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.FeatureSet')),
],
),
migrations.CreateModel(
name='GlobalMotif',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('originalmotif', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Mass2Motif')),
],
),
migrations.CreateModel(
name='GlobalMotifGlobalFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('probability', models.FloatField()),
('feature', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature')),
('motif', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalMotif')),
],
),
migrations.AddField(
model_name='featuremap',
name='globalfeature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature'),
),
migrations.AddField(
model_name='featuremap',
name='localfeature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Feature'),
),
migrations.AddField(
model_name='documentglobalfeature',
name='feature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature'),
),
migrations.AddField(
model_name='decompositionfeatureinstance',
name='feature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature'),
),
]
|
sdrogers/ms2ldaviz
|
ms2ldaviz/decomposition/migrations/0001_initial.py
|
Python
|
mit
| 4,500 | 0.004222 |
# -*- coding: utf-8 -*-
import django
from django.contrib.auth.models import User
from django.test import TestCase, Client
# compat thing!
if django.VERSION[:2] < (1, 10):
from django.core.urlresolvers import reverse
else:
from django.urls import reverse
class FilerUtilsTests(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_superuser(
username='fred',
password='test',
email='test@test.fred',
)
def tearDown(self):
pass
def test_has_css(self):
self.client.login(username='fred', password='test')
url = reverse('admin:filer_folder_changelist')
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
|
rouxcode/django-filer-addons
|
filer_addons/tests/test_utils.py
|
Python
|
mit
| 793 | 0 |
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016 Jonathan Labéjof <jonathan.labejof@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Base schema package."""
from b3j0f.utils.version import OrderedDict
from inspect import getmembers
from six import iteritems
from uuid import uuid4
__all__ = ['Schema', 'DynamicValue']
class DynamicValue(object):
"""Handle a function in order to dynamically lead a value while cleaning a
schema.
For example, the schema attribute ``uuid`` uses a DynamicValue in order to
ensure default generation per instanciation.
"""
__slots__ = ['func']
def __init__(self, func, *args, **kwargs):
""":param func: function to execute while cleaning a schema."""
super(DynamicValue, self).__init__(*args, **kwargs)
self.func = func
def __call__(self):
return self.func()
class Schema(property):
"""Schema description.
A schema is identified by a string such as an universal unique identifier,
and optionnally a name.
Any setted value respect those conditions in this order:
1. if the value is a lambda expression, the value equals its execution.
2. the value is validated with this method `validate`.
3. the value is given to a custom setter (`fget` constructor parameter) if
given or setted to this attribute `_value`.
Once you defined your schema inheriting from this class, your schema will
be automatically registered in the registry and becomes accessible from the
`b3j0f.schema.reg.getschemabyuid` function.
"""
name = '' #: schema name. Default is self name.
#: schema universal unique identifier.
uuid = DynamicValue(lambda: str(uuid4()))
doc = '' #: schema description.
default = None #: schema default value.
required = [] #: required schema names.
version = '1' #: schema version.
nullable = True #: if True (default), value can be None.
def __init__(
self, fget=None, fset=None, fdel=None, doc=None, **kwargs
):
"""Instance attributes are setted related to arguments or inner schemas.
:param default: default value. If lambda, called at initialization.
"""
super(Schema, self).__init__(
fget=self._getter, fset=self._setter, fdel=self._deleter,
doc=doc
)
# set custom getter/setter/deleter
if fget or not hasattr(self, '_fget_'):
self._fget_ = fget
if fset or not hasattr(self, '_fset_'):
self._fset_ = fset
if fdel or not hasattr(self, '_fdel_'):
self._fdel_ = fdel
if doc is not None:
kwargs['doc'] = doc
cls = type(self)
# set inner schema values
for name, member in getmembers(cls):
if name[0] != '_' and name not in [
'fget', 'fset', 'fdel', 'setter', 'getter', 'deleter',
'default'
]:
if name in kwargs:
val = kwargs[name]
else:
val = member
if isinstance(val, DynamicValue):
val = val()
if isinstance(val, Schema):
val = val.default
if isinstance(val, DynamicValue):
val = val()
setattr(self, self._attrname(name=name), val)
if member != val:
setattr(self, name, val)
default = kwargs.get('default', self.default)
self._default_ = default
if default is not None:
self.default = default
def _attrname(self, name=None):
"""Get attribute name to set in order to keep the schema value.
:param str name: attribute name. Default is this name or uuid.
:return:
:rtype: str
"""
return '_{0}_'.format(name or self._name_ or self._uuid_)
def __repr__(self):
return '{0}({1}/{2})'.format(type(self).__name__, self.uuid, self.name)
def __hash__(self):
return hash(self.uuid)
def _getter(self, obj):
"""Called when the parent element tries to get this property value.
:param obj: parent object.
"""
result = None
if self._fget_ is not None:
result = self._fget_(obj)
if result is None:
result = getattr(obj, self._attrname(), self._default_)
# notify parent schema about returned value
if isinstance(obj, Schema):
obj._getvalue(self, result)
return result
def _getvalue(self, schema, value):
"""Fired when inner schema returns a value.
:param Schema schema: inner schema.
:param value: returned value.
"""
def _setter(self, obj, value):
"""Called when the parent element tries to set this property value.
:param obj: parent object.
:param value: new value to use. If lambda, updated with the lambda
result.
"""
if isinstance(value, DynamicValue): # execute lambda values.
fvalue = value()
else:
fvalue = value
self._validate(data=fvalue, owner=obj)
if self._fset_ is not None:
self._fset_(obj, fvalue)
else:
setattr(obj, self._attrname(), value)
# notify obj about the new value.
if isinstance(obj, Schema):
obj._setvalue(self, fvalue)
def _setvalue(self, schema, value):
"""Fired when inner schema change of value.
:param Schema schema: inner schema.
:param value: new value.
"""
def _deleter(self, obj):
"""Called when the parent element tries to delete this property value.
:param obj: parent object.
"""
if self._fdel_ is not None:
self._fdel_(obj)
else:
delattr(obj, self._attrname())
# notify parent schema about value deletion.
if isinstance(obj, Schema):
obj._delvalue(self)
def _delvalue(self, schema):
"""Fired when inner schema delete its value.
:param Schema schema: inner schema.
"""
def _validate(self, data, owner=None):
"""Validate input data in returning an empty list if true.
:param data: data to validate with this schema.
:param Schema owner: schema owner.
:raises: Exception if the data is not validated.
"""
if isinstance(data, DynamicValue):
data = data()
if data is None and not self.nullable:
raise ValueError('Value can not be null')
elif data is not None:
isdict = isinstance(data, dict)
for name, schema in iteritems(self.getschemas()):
if name == 'default':
continue
if name in self.required:
if (
(isdict and name not in data) or
(not isdict and not hasattr(data, name))
):
part1 = (
'Mandatory property {0} by {1} is missing in {2}.'.
format(name, self, data)
)
part2 = '{0} expected.'.format(schema)
error = '{0} {1}'.format(part1, part2)
raise ValueError(error)
elif (isdict and name in data) or hasattr(data, name):
value = data[name] if isdict else getattr(data, name)
schema._validate(data=value, owner=self)
@classmethod
def getschemas(cls):
"""Get inner schemas by name.
:return: ordered dict by name.
:rtype: OrderedDict
"""
members = getmembers(cls, lambda member: isinstance(member, Schema))
result = OrderedDict()
for name, member in members:
result[name] = member
return result
@classmethod
def apply(cls, *args, **kwargs):
"""Decorator for schema application with parameters."""
return lambda fget: cls(fget, *args, **kwargs)
|
b3j0f/schema
|
b3j0f/schema/base.py
|
Python
|
mit
| 9,410 | 0.000106 |
import os
from ..helper import freq_to_mel
from ..praat import PraatAnalysisFunction
class PraatMfccFunction(PraatAnalysisFunction):
def __init__(self, praat_path=None, window_length=0.025, time_step=0.01, max_frequency=7800,
num_coefficients=13):
script_dir = os.path.dirname(os.path.abspath(__file__))
script = os.path.join(script_dir, 'mfcc.praat')
arguments = [num_coefficients, window_length, time_step, freq_to_mel(max_frequency)]
super(PraatMfccFunction, self).__init__(script, praat_path=praat_path, arguments=arguments)
|
mmcauliffe/python-acoustic-similarity
|
conch/analysis/mfcc/praat.py
|
Python
|
mit
| 586 | 0.005119 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationListResult(Model):
"""The list of available operations for Data Lake Store.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar value: the results of the list operation.
:vartype value: list[~azure.mgmt.datalake.store.models.Operation]
:ivar next_link: the link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self):
super(OperationListResult, self).__init__()
self.value = None
self.next_link = None
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-datalake-store/azure/mgmt/datalake/store/models/operation_list_result.py
|
Python
|
mit
| 1,314 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'RssFeed'
db.create_table(u'rsssync_rssfeed', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'rsssync', ['RssFeed'])
# Adding model 'RssEntry'
db.create_table(u'rsssync_rssentry', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=500)),
('summary', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('link', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('feed', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rsssync.RssFeed'])),
))
db.send_create_signal(u'rsssync', ['RssEntry'])
def backwards(self, orm):
# Deleting model 'RssFeed'
db.delete_table(u'rsssync_rssfeed')
# Deleting model 'RssEntry'
db.delete_table(u'rsssync_rssentry')
models = {
u'rsssync.rssentry': {
'Meta': {'object_name': 'RssEntry'},
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsssync.RssFeed']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'rsssync.rssfeed': {
'Meta': {'object_name': 'RssFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['rsssync']
|
ebrelsford/django-rsssync
|
rsssync/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 2,776 | 0.008285 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
import re
from os.path import join, dirname
from setuptools import setup, find_packages
RE_MD_CODE_BLOCK = re.compile(r'```(?P<language>\w+)?\n(?P<lines>.*?)```', re.S)
RE_SELF_LINK = re.compile(r'\[(.*?)\]\[\]')
RE_LINK_TO_URL = re.compile(r'\[(?P<text>.*?)\]\((?P<url>.*?)\)')
RE_LINK_TO_REF = re.compile(r'\[(?P<text>.*?)\]\[(?P<ref>.*?)\]')
RE_LINK_REF = re.compile(r'^\[(?P<key>[^!].*?)\]:\s*(?P<url>.*)$', re.M)
RE_BADGE = re.compile(r'^\[\!\[(?P<text>.*?)\]\[(?P<badge>.*?)\]\]\[(?P<target>.*?)\]$', re.M)
RE_TITLE = re.compile(r'^(?P<level>#+)\s*(?P<title>.*)$', re.M)
BADGES_TO_KEEP = []
RST_TITLE_LEVELS = ['=', '-', '*']
RST_BADGE = '''\
.. image:: {badge}
:target: {target}
:alt: {text}
'''
def md2pypi(filename):
'''
Load .md (markdown) file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badges
'''
content = io.open(filename).read()
for match in RE_MD_CODE_BLOCK.finditer(content):
rst_block = '\n'.join(
['.. code-block:: {language}'.format(**match.groupdict()), ''] +
[' {0}'.format(l) for l in match.group('lines').split('\n')] +
['']
)
content = content.replace(match.group(0), rst_block)
refs = dict(RE_LINK_REF.findall(content))
content = RE_LINK_REF.sub('.. _\g<key>: \g<url>', content)
content = RE_SELF_LINK.sub('`\g<1>`_', content)
content = RE_LINK_TO_URL.sub('`\g<text> <\g<url>>`_', content)
for match in RE_BADGE.finditer(content):
if match.group('badge') not in BADGES_TO_KEEP:
content = content.replace(match.group(0), '')
else:
params = match.groupdict()
params['badge'] = refs[match.group('badge')]
params['target'] = refs[match.group('target')]
content = content.replace(match.group(0),
RST_BADGE.format(**params))
# Must occur after badges
for match in RE_LINK_TO_REF.finditer(content):
content = content.replace(match.group(0), '`{text} <{url}>`_'.format(
text=match.group('text'),
url=refs[match.group('ref')]
))
for match in RE_TITLE.finditer(content):
underchar = RST_TITLE_LEVELS[len(match.group('level')) - 1]
title = match.group('title')
underline = underchar * len(title)
full_title = '\n'.join((title, underline))
content = content.replace(match.group(0), full_title)
return content
long_description = '\n'.join((
md2pypi('README.md'),
md2pypi('CHANGELOG.md'),
''
))
setup(
name='my-theme',
version='0.1.0',
description='My awesome theme',
long_description=long_description,
url='https://theme.opendata.team',
author='OpenDataTeam',
author_email='me@somewhere.com',
packages=['my_theme'],
include_package_data=True,
install_requires=[],
entry_points={
'udata.themes': [
'awesome-theme = my_theme'
]
},
license='AGPL',
zip_safe=False,
keywords='udata, theme, My Theme',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: System :: Software Distribution',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
opendatateam/docker-udata
|
samples/theme/my-theme/setup.py
|
Python
|
mit
| 3,719 | 0.002151 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import unittest
from openerp.tools.translate import quote, unquote, xml_translate
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
def test_translate_xml_base(self):
""" Test xml_translate() without formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah blah blah</h1>
Put some more text here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah', 'Put some more text here'])
def test_translate_xml_inline1(self):
""" Test xml_translate() with formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah <i>blah</i> blah</h1>
Put some <b>more text</b> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put some <b>more text</b> here'])
def test_translate_xml_inline2(self):
""" Test xml_translate() with formatting elements embedding other elements. """
terms = []
source = """<form string="Form stuff">
<b><h1>Blah <i>blah</i> blah</h1></b>
Put <em>some <b>more text</b></em> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put <em>some <b>more text</b></em> here'])
def test_translate_xml_inline3(self):
""" Test xml_translate() with formatting elements without actual text. """
terms = []
source = """<form string="Form stuff">
<div>
<span class="before"/>
<h1>Blah blah blah</h1>
<span class="after">
<i class="hack"/>
</span>
</div>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah'])
def test_translate_xml_t(self):
""" Test xml_translate() with t-* attributes. """
terms = []
source = """<t t-name="stuff">
stuff before
<span t-field="o.name"/>
stuff after
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_off(self):
""" Test xml_translate() with attribute translate="off". """
terms = []
source = """<div>
stuff before
<div translation="off">Do not translate this</div>
stuff after
</div>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_attribute(self):
""" Test xml_translate() with <attribute> elements. """
terms = []
source = """<field name="foo" position="attributes">
<attribute name="string">Translate this</attribute>
<attribute name="option">Do not translate this</attribute>
</field>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Translate this'])
def test_translate_xml_a(self):
""" Test xml_translate() with <a> elements. """
terms = []
source = """<t t-name="stuff">
<ul class="nav navbar-nav">
<li>
<a class="oe_menu_leaf" href="/web#menu_id=42&action=54">
<span class="oe_menu_text">Blah</span>
</a>
</li>
<li class="dropdown" id="menu_more_container" style="display: none;">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">More <b class="caret"/></a>
<ul class="dropdown-menu" id="menu_more"/>
</li>
</ul>
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['<span class="oe_menu_text">Blah</span>', 'More <b class="caret"/>'])
|
minhphung171093/GreenERP
|
openerp/addons/base/tests/test_translate.py
|
Python
|
gpl-3.0
| 6,065 | 0.003462 |
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# from python and deps
from six.moves import StringIO
import json
import os
import shlex
# from Ansible
from ansible import __version__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes
REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = "# POWERSHELL_COMMON"
REPLACER_WINARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>"
REPLACER_JSONARGS = "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = '# -*- coding: utf-8 -*-'
# we've moved the module_common relative to the snippets, so fix the path
_SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % path)
fd = open(path)
data = fd.read()
fd.close()
return data
def _find_snippet_imports(module_data, module_path, strip_comments):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_style = 'old'
if REPLACER in module_data:
module_style = 'new'
elif REPLACER_WINDOWS in module_data:
module_style = 'new'
elif REPLACER_JSONARGS in module_data:
module_style = 'new'
elif 'from ansible.module_utils.' in module_data:
module_style = 'new'
elif 'WANT_JSON' in module_data:
module_style = 'non_native_want_json'
output = StringIO()
lines = module_data.split('\n')
snippet_names = []
for line in lines:
if REPLACER in line:
output.write(_slurp(os.path.join(_SNIPPET_PATH, "basic.py")))
snippet_names.append('basic')
if REPLACER_WINDOWS in line:
ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
output.write(ps_data)
snippet_names.append('powershell')
elif line.startswith('from ansible.module_utils.'):
tokens=line.split(".")
import_error = False
if len(tokens) != 3:
import_error = True
if " import *" not in line:
import_error = True
if import_error:
raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
snippet_name = tokens[2].split()[0]
snippet_names.append(snippet_name)
output.write(_slurp(os.path.join(_SNIPPET_PATH, snippet_name + ".py")))
else:
if strip_comments and line.startswith("#") or line == '':
pass
output.write(line)
output.write("\n")
if not module_path.endswith(".ps1"):
# Unixy modules
if len(snippet_names) > 0 and not 'basic' in snippet_names:
raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
else:
# Windows modules
if len(snippet_names) > 0 and not 'powershell' in snippet_names:
raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
return (output.getvalue(), module_style)
# ******************************************************************************
def modify_module(module_path, module_args, task_vars=dict(), strip_comments=False):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
All modules are required to import at least basic, though there will also
be other snippets.
For powershell, there's equivalent conventions like this:
# POWERSHELL_COMMON
which results in the inclusion of the common code from powershell.ps1
"""
### TODO: Optimization ideas if this code is actually a source of slowness:
# * Fix comment stripping: Currently doesn't preserve shebangs and encoding info (but we unconditionally add encoding info)
# * Use pyminifier if installed
# * comment stripping/pyminifier needs to have config setting to turn it
# off for debugging purposes (goes along with keep remote but should be
# separate otherwise users wouldn't be able to get info on what the
# minifier output)
# * Only split into lines and recombine into strings once
# * Cache the modified module? If only the args are different and we do
# that as the last step we could cache sll the work up to that point.
with open(module_path) as f:
# read in the module source
module_data = f.read()
(module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments)
module_args_json = json.dumps(module_args).encode('utf-8')
python_repred_args = repr(module_args_json)
# these strings should be part of the 'basic' snippet which is required to be included
module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)
module_data = module_data.replace(REPLACER_WINARGS, module_args_json)
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
if module_style == 'new':
facility = C.DEFAULT_SYSLOG_FACILITY
if 'ansible_syslog_facility' in task_vars:
facility = task_vars['ansible_syslog_facility']
module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
lines = module_data.split(b"\n", 1)
shebang = None
if lines[0].startswith(b"#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
if interpreter_config in task_vars:
interpreter = to_bytes(task_vars[interpreter_config], errors='strict')
lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:]))
if os.path.basename(interpreter).startswith('python'):
lines.insert(1, ENCODING_STRING)
else:
# No shebang, assume a binary module?
pass
module_data = b"\n".join(lines)
return (module_data, module_style, shebang)
|
attakei/ansible
|
lib/ansible/executor/module_common.py
|
Python
|
gpl-3.0
| 8,252 | 0.003393 |
#!/usr/bin/python
# LICENSE: GPL2
# (c) 2013 Tom Schouten <tom@getbeep.com>
# (c) 2014, Kamil Wartanowicz <k.wartanowicz@gmail.com>
import logging
import os
import threading
import time
import usb
import plac
import sim_shell
import sim_ctrl_2g
import sim_ctrl_3g
import sim_reader
import sim_card
from util import types_g
from util import types
from util import hextools
ROUTER_MODE_DISABLED = 0
ROUTER_MODE_INTERACTIVE = 1
ROUTER_MODE_TELNET = 2
ROUTER_MODE_DBUS = 3
_version_ = 1.1
# SIMtrace slave commands and events, see iso7816_slave.h
CMD_SET_ATR = 0
CMD_SET_SKIP = 1
CMD_HALT = 2
CMD_POLL = 3
CMD_R_APDU = 4
# TODO: check why this event is received, at best remove it
EVT_UNKNOWN = 0
EVT_RESET = 2
EVT_C_APDU = 4
SIMTRACE_OFFLINE = 0
SIMTRACE_ONLINE = 1
MAIN_INTERFACE = 0
CTRL_INTERFACE = 1
INJECT_READY = 0
INJECT_NO_FORWARD = 1
INJECT_WITH_FORWARD = 2
INJECT_RESET = 3
TRY_ANOTHER_CARD_ON_AUTH_FAILURE = True
LOG_NONE_APDU_IN_FILE = True
class SimRouter(object):
def __init__(self,
cards,
atr=None,
type=types.TYPE_USIM,
mode=SIMTRACE_ONLINE):
self.loggingApdu = self.setupLogger()
if LOG_NONE_APDU_IN_FILE:
self.logging = self.loggingApdu
else:
self.logging = logging
self.atr = atr
self.simType = type
self.mode = mode
self.cardsDict = self.addControlCard(cards)
self.lastUpdate = 0
self.apduInjectedCard = None
self.apduInjectedData = None
self.interpreter = None
self.routerMode = ROUTER_MODE_DISABLED
if self.mode != SIMTRACE_OFFLINE:
self.dev = self.usb_find(0x03eb, 0x6119)
if self.dev is None:
self.logging.warning("Simtrace not connected!")
self.mode = SIMTRACE_OFFLINE
self.simCtrl = None
self.loop = None
self.shell = None
self.lock = threading.Lock()
self.rapduInject = None
self.inject = INJECT_READY
def addControlCard(self, cards):
cardDicts = []
for cardMain in cards:
cardCtrl = sim_card.SimCard(mode=cardMain.mode, type=self.simType)
if cardMain.mode == sim_reader.MODE_SIM_SOFT:
#TODO: try to remove
cardCtrl.simReader = cardMain.simReader
#TODO: reimplement to not copy all parameter
cardCtrl.index = cardMain.index
cardCtrl.atr = cardMain.atr
#cardCtrl.swNoError = cardMain.swNoError
cardCtrl.type = cardMain.type
cardCtrl.logicalChannelClosed = cardMain.logicalChannelClosed
# Do not apply ins and file forwarding rules on control interface.
cardCtrl.removeRoutingAttr()
cardDict = {MAIN_INTERFACE : cardMain, CTRL_INTERFACE : cardCtrl}
cardDicts.append(cardDict)
return cardDicts
def usbCtrlOut(self, req, buf):
if self.mode == SIMTRACE_OFFLINE:
return []
return self.dev.ctrl_transfer(0x40,
bRequest=req, # R-APDU
data_or_wLength=buf,
timeout=500)
def usbCtrlIn(self, req):
return self.dev.ctrl_transfer(0xC0,
bRequest=req,
data_or_wLength=512,
timeout=512)
def receiveData(self, cmd):
if self.mode == SIMTRACE_OFFLINE:
return []
try:
return self.usbCtrlIn(cmd)
except:
time.sleep(0.2)
return self.usbCtrlIn(cmd)
def sendData(self, msg):
return self.usbCtrlOut(CMD_R_APDU, msg)
def resetCards(self, soft=True):
if soft:
resetThread = ResetThread(self)
resetThread.setDaemon(True)
# Start handling C-APDUs.
resetThread.start()
else:
for cardDict in self.cardsDict:
cardDict[MAIN_INTERFACE].reset()
def receiveCommandApdu(self):
msg = []
# FIXME: This is the main event loop. Move it to top level.
msg = list(self.receiveData(CMD_POLL))
if not len(msg):
return None, None
data = None
evt = msg[0]
if evt == EVT_C_APDU:
data = msg[4:]
elif evt == EVT_RESET:
pass
elif evt == EVT_UNKNOWN:
return None, None
else:
self.loggingApdu.info("unknown event: %s\n" % hextools.bytes2hex(msg))
return (evt, data)
def sendResponseApdu(self, msg):
self.sendData(msg)
def command(self, tag, payload=[]): # dummy byte
self.loggingApdu.debug("CMD %d %s" % (tag, hextools.bytes2hex(payload)))
self.usbCtrlOut(tag, payload)
def aidCommon(self, card):
if not card.routingAttr:
return False
return set(sim_card.FILES_AID).issubset(set(card.routingAttr.filesCommon))
def getSoftCardDict(self):
for cardDict in self.cardsDict:
if cardDict[MAIN_INTERFACE].mode == sim_reader.MODE_SIM_SOFT:
return cardDict
return None
def getFileHandler(self, file):
#by default execute apdu in card 0
cards = [self.cardsDict[0][MAIN_INTERFACE]]
for cardDict in self.cardsDict:
if cardDict == self.cardsDict[0]:
#cardDict already in cards
continue
card = cardDict[MAIN_INTERFACE]
if file in card.routingAttr.filesCommon:
cards.append(card)
elif file in card.routingAttr.filesReplaced:
return [card]
return cards
def getInsHandler(self, ins, apdu):
#by default execute apdu in card 0
cards = [self.cardsDict[0][MAIN_INTERFACE]]
for cardDict in self.cardsDict:
if cardDict == self.cardsDict[0]:
#cardDict already in cards
continue
card = cardDict[MAIN_INTERFACE]
if (ins == 'GET_RESPONSE' and
card.routingAttr.getFileSelected(apdu[0]) == 'AUTH' and
'INTERNAL_AUTHENTICATE' in card.routingAttr.insReplaced):
return [card]
elif ins in card.routingAttr.insCommon:
if (ins in ['GET_RESPONSE','SELECT_FILE'] and
card.routingAttr.getFileSelected(apdu[0]) in card.routingAttr.filesReplaced):
cards.insert(0, card)
else:
cards.append(card)
elif ins in card.routingAttr.insReplaced:
if ins == 'INTERNAL_AUTHENTICATE':
card.routingAttr.setFileSelected('AUTH', apdu[0])
return [card]
return cards
def addLeftHandlers(self, cards):
for cardDict in self.cardsDict:
card = cardDict[MAIN_INTERFACE]
if card in cards:
continue
cards.append(card)
return cards
def getHandlers(self, apdu, inject=None):
cardsData = []
if inject == INJECT_NO_FORWARD:
if self.apduInjectedCard:
cardsData.append([self.apduInjectedCard, 0])
else:
cardsData.append([self.getCtrlCard(0), 0])
return cardsData
ins = types.insName(apdu)
if ins == 'SELECT_FILE':
for cardDict in self.cardsDict:
card = cardDict[MAIN_INTERFACE]
#TODO: handle read/write/update command with SFI in P1
card.routingAttr.setFileSelected(self.fileName(apdu), apdu[0])
if ins in sim_card.FILE_INS:
cards = self.getFileHandler(self.cardsDict[0][MAIN_INTERFACE].routingAttr.getFileSelected(apdu[0]))
else:
cards = self.getInsHandler(ins, apdu)
i = 0;
forwardApdu = True
for card in cards:
if i != 0:
forwardApdu = False
cardsData.append([card, forwardApdu])
i += 1
return cardsData
def handleApdu(self, cardData, apdu):
card = cardData[0]
sendData = cardData[1]
if card == None:
raise Exception("card not initialized")
ins = types.insName(apdu)
if card != self.getMainCard(0):
origApdu = apdu
if ( self.aidCommon(card) and
card.routingAttr.aidToSelect and
self.getMainCard(0).routingAttr.aidToSelect == hextools.bytes2hex(apdu) and #origin apdu is AID
int(card.routingAttr.aidToSelect[0:2], 16) == apdu[0]): #check the same class
apdu = hextools.hex2bytes(card.routingAttr.aidToSelect)
card.routingAttr.aidToSelect = None
elif ( self.aidCommon(card) and
card.routingAttr.getFileSelected(apdu[0]) == 'EF_DIR' and
ins == 'READ_RECORD' and
card.routingAttr.recordEfDirLength):
apdu[4] = card.routingAttr.recordEfDirLength
if origApdu != apdu:
self.loggingApdu.info("")
self.loggingApdu.info("*C-APDU%d: %s" %(self.getSimId(card), hextools.bytes2hex(apdu)))
if self.simType == types.TYPE_SIM and (apdu[0] & 0xF0) != 0xA0:
#force 2G on USIM cards
sw = types_g.sw.CLASS_NOT_SUPPORTED
sw1 = sw>>8
sw2 = sw & 0x00FF
responseApdu = [sw1, sw2]
elif ins == 'GET_RESPONSE' and card.routingAttr.getResponse:
responseApdu = card.routingAttr.getResponse
card.routingAttr.getResponse = None
else:
responseApdu = card.apdu(apdu)
if card != self.getMainCard(0):
if (self.aidCommon(card) and
card.routingAttr.getFileSelected(apdu[0]) == 'EF_DIR' and
ins == 'GET_RESPONSE' and
types.swNoError(responseApdu) and
len(responseApdu) > 7):
card.routingAttr.recordEfDirLength = responseApdu[7]
if (TRY_ANOTHER_CARD_ON_AUTH_FAILURE and
self.getNbrOfCards() > 1 and
card.routingAttr.getFileSelected(apdu[0]) == 'AUTH' and
types.sw(responseApdu) == types_g.sw.AUTHENTICATION_ERROR_APPLICATION_SPECIFIC):
sw1Name, swName = types.swName(types.sw(responseApdu) >> 8, types.sw(responseApdu) & 0x00FF)
self.logging.warning("Response not expected. SW1: %s, SW: %s" %(sw1Name, swName))
self.logging.warning("Change card to process AUTHENTICATION")
if card == self.getMainCard(0):
cardTmp = self.getMainCard(1)
else:
cardTmp = self.getMainCard(0)
responseApdu = cardTmp.apdu(apdu)
cardTmp.routingAttr.setFileSelected('AUTH', apdu[0])
card.routingAttr.setFileSelected(None, apdu[0])
# TODO: check if exist
cardTmp.routingAttr.insReplaced.append('INTERNAL_AUTHENTICATE')
if types.sw1(responseApdu) in [types_g.sw1.RESPONSE_DATA_AVAILABLE_2G, types_g.sw1.RESPONSE_DATA_AVAILABLE_3G]:
# cache 'GET_RESPONSE'
getResponseLength = types.sw2(responseApdu)
cla = apdu[0]
apduTmp = "%02XC00000%02X" %(cla, getResponseLength)
self.loggingApdu.info("**C-APDU%d: %s" %(self.getSimId(cardTmp), apduTmp))
cardTmp.routingAttr.getResponse = cardTmp.apdu(apduTmp)
if card.routingAttr.getFileSelected(apdu[0]) == 'EF_IMSI' and types.swNoError(responseApdu):
#cache imsi
responseData = types.responseData(responseApdu)
if ins == 'READ_BINARY' and types.p1(apdu) == 0 and types.p2(apdu) == 0:
#When P1=8X then SFI is used to select the file.
#Remove the check when SFI checking is implemented
imsi = hextools.decode_BCD(responseData)[3:]
#TODO: remove length check when name for the file comes from
#the whole path and not fid. 6f07 is also in ADF_ISIM
if len(imsi) > 10:
card.imsi = imsi
#update associated interface
if self.isCardCtrl(card):
self.getRelatedMainCard(card).imsi = imsi
else:
self.getRelatedCtrlCard(card).imsi = imsi
elif ins == 'UPDATE_BINARY':
card.imsi = None
responseApduHex = hextools.bytes2hex(responseApdu)
#example of APDU modification
if responseApduHex == "02542D4D6F62696C652E706CFFFFFFFFFF9000":
#change SPN name 'T-mobile.pl' for 'Tmobile-SPN'
responseApdu = hextools.hex2bytes("02546D6F62696C652D53504EFFFFFFFFFF9000")
if sendData:
if ((types.sw(responseApdu) == types_g.sw.NO_ERROR or
types.sw1(responseApdu) == types_g.sw1.NO_ERROR_PROACTIVE_DATA) and
self.getNbrOfCards() > 1):
# Check for pending SAT command
for cardDict in self.cardsDict:
cardTmp = cardDict[MAIN_INTERFACE]
if card == cardTmp:
continue
if set(sim_card.SAT_INS) <= set(cardTmp.routingAttr.insReplaced):
swNoError = cardTmp.swNoError
if types.unpackSw(swNoError)[0] == types_g.sw1.NO_ERROR_PROACTIVE_DATA:
#update r-apdu with proactive data information
responseApdu[-2] = swNoError >> 8
responseApdu[-1] = swNoError & 0x00FF
break
self.sendResponseApdu(responseApdu)
if card == self.getMainCard(0) or sendData:
self.pretty_apdu(apdu)
responseApduHex = hextools.bytes2hex(responseApdu)
self.loggingApdu.info("R-APDU%d: %s" %(self.getSimId(card), responseApduHex))
# gsmtap.log(apdu,responseApdu) # Uncomment for wireshark
return responseApdu
def updateHandler(self, cardData, apdu, rapdu):
if ( self.aidCommon(cardData[0]) and not
cardData[0].routingAttr.aidToSelect and
cardData[0].routingAttr.getFileSelected(apdu[0]) == 'EF_DIR' and
types.insName(apdu) == 'READ_RECORD' and
len(rapdu) > 3 and rapdu[3] != 0xFF and
types.swNoError(rapdu)):
# keep the same class - apdu[0], change length and avalue of selected AID
cardData[0].routingAttr.aidToSelect = "%02XA40404%s" %(apdu[0], hextools.bytes2hex(rapdu[3 : (rapdu[3] + 4)]))
if types.sw1(rapdu) in [types_g.sw1.RESPONSE_DATA_AVAILABLE_2G, types_g.sw1.RESPONSE_DATA_AVAILABLE_3G]:
# cache 'GET_RESPONSE'
getResponseLength = types.sw2(rapdu)
cla = apdu[0]
apdu = "%02XC00000%02X" %(cla, getResponseLength)
cardData[0].routingAttr.getResponse = cardData[0].apdu(apdu)
def tick(self):
with self.lock:
inject = INJECT_READY
evt, apdu = self.receiveCommandApdu()
if evt == EVT_RESET:
self.resetCards()
return
if not apdu:
if (not self.inject or
self.rapduInject): # Wait until rapduInject is consumed
return
else:
inject = self.inject
apdu = self.apduInjectedData
self.apduInjectedData = None
if not apdu:
raise Exception("APDU is empty")
self.lastUpdate = time.time()
cardsData = self.getHandlers(apdu, inject)
responseApdu = None
for cardData in cardsData:
if cardData == cardsData[0]:
apduHex = hextools.bytes2hex(apdu)
self.loggingApdu.info("")
self.loggingApdu.info("C-APDU%d: %s" %(self.getSimId(cardData[0]), apduHex))
responseApduTemp = self.handleApdu(cardData, apdu)
if cardData[1]:
if cardData[0] != self.getMainCard(0):
self.loggingApdu.info("*R-APDU%d" %self.getSimId(cardData[0]))
responseApdu = responseApduTemp
self.updateHandler(cardData, apdu, responseApduTemp)
if not responseApdu and not inject:
raise Exception("No response received")
if inject:
self.rapduInject = responseApduTemp
def mainloop(self):
while 1:
if self.mode == ROUTER_MODE_DBUS:
import gevent
gevent.sleep(0.001)
self.tick()
if time.time() - self.lastUpdate > 0.1:
time.sleep(0.1)
def getNbrOfCards(self):
return len(self.cardsDict)
def getSimId(self, card):
i = 0
for cardDict in self.cardsDict:
if card in [cardDict[MAIN_INTERFACE], cardDict[CTRL_INTERFACE]]:
return i
i += 1
raise Exception("Card not found")
def getCardDictFromId(self, simId):
if simId >= self.getNbrOfCards() or simId < 0:
raise Exception("simId: " + str(simId) + " not found")
return self.cardsDict[simId]
def isCardCtrl(self, card):
for cardDict in self.cardsDict:
if cardDict[CTRL_INTERFACE] == card:
return True
return False
def getMainCard(self, simId):
cardDict = self.getCardDictFromId(simId)
return cardDict[MAIN_INTERFACE]
def getCtrlCard(self, simId):
cardDict = self.getCardDictFromId(simId)
return cardDict[CTRL_INTERFACE]
def getRelatedMainCard(self, cardCtrl):
for cardDict in self.cardsDict:
if cardDict[CTRL_INTERFACE] == cardCtrl:
return cardDict[MAIN_INTERFACE]
return None
def getRelatedCtrlCard(self, cardMain):
for cardDict in self.cardsDict:
if cardDict[MAIN_INTERFACE] == cardMain:
return cardDict[CTRL_INTERFACE]
return None
def swapCards(self, simId1, simId2):
cardDict1 = self.getCardDictFromId(simId1)
cardDict2 = self.getCardDictFromId(simId2)
#with self.lock:
self.cardsDict[simId1] = cardDict2
self.cardsDict[simId2] = cardDict1
def copyFiles(self, cardMainFrom, cardMainTo, files):
simIdFrom = self.getSimId(self.getRelatedCtrlCard(cardMainFrom))
simIdTo = self.getSimId(self.getRelatedCtrlCard(cardMainTo))
self.shell.select_sim_card(simIdFrom)
fileDict = {}
for file in files:
status, data = self.shell.read(file)
self.shell.assertOk(status, data)
value = types.getDataValue(data)
fileDict.update({file : value})
self.shell.select_sim_card(simIdTo)
for fileName, value in fileDict.iteritems():
status, data = self.shell.write(fileName, value)
self.shell.assertOk(status, data)
def getATR(self):
if self.atr is not None:
return self.atr
else:
return self.getMainCard(0).getATR()
def waitInjectReady(self, timeout=15):
startTime = time.time()
while True:
with self.lock:
if self.inject == INJECT_READY and not self.rapduInject:
break
currentTime = time.time()
if currentTime - startTime > timeout: #sec
if self.rapduInject:
logging.error("RAPDU injected response not consumed")
self.logging.error("Timeout. Previous apdu injected has not finished within %ds" %timeout)
self.rapduInject = None
self.inject = INJECT_READY
break
time.sleep(0.001)
def waitRapduInject(self, timeout=30):
startTime = time.time()
while True:
with self.lock:
rapduInject = self.rapduInject
if rapduInject:
self.rapduInject = None
self.inject = INJECT_READY
return rapduInject
currentTime = time.time()
if currentTime - startTime > timeout:
self.inject = INJECT_READY
raise Exception("Timeout. No rapdu for injected data received within %ds" %timeout)
time.sleep(0.001)
def injectApdu(self, apdu, card, mode=INJECT_NO_FORWARD):
# TODO: add inject tag to logs
self.waitInjectReady()
with self.lock:
self.apduInjectedCard = card
self.apduInjectedData = hextools.hex2bytes(apdu)
self.inject = mode
return self.waitRapduInject()
def setPowerSkip(self, skip):
self.command(CMD_SET_SKIP, hextools.u32(skip))
def powerHalt(self):
self.command(CMD_HALT)
def run(self, mode=ROUTER_MODE_INTERACTIVE):
if self.loop and self.routerMode == ROUTER_MODE_DISABLED:
self.shell.updateInteractive(self.getInteractiveFromMode(mode))
self.startPlacServer(mode)
return
self.routerMode = mode
time.sleep(0.1) # Truncated logs
self.loggingApdu.info("============")
self.loggingApdu.info("== simLAB ==")
self.loggingApdu.info("== ver %s==" %_version_)
self.loggingApdu.info("============")
self.command(CMD_SET_ATR, self.getATR())
self.setPowerSkip(skip=1)
self.powerHalt()
self.loop = MainLoopThread(self)
self.loop.setDaemon(True)
# Start handling incoming phone C-APDUs.
self.loop.start()
# Default card control interface.
if self.simType == types.TYPE_SIM:
self.simCtrl = sim_ctrl_2g.SimCtrl(self)
else:
self.simCtrl = sim_ctrl_3g.SimCtrl(self)
self.simCtrl.init()
interactive = self.getInteractiveFromMode(mode)
# Plac telnet server works without interactive mode
self.shell = sim_shell.SimShell(self.simCtrl, interactive)
self.startPlacServer(mode)
def getInteractiveFromMode(self, mode):
if mode in [ROUTER_MODE_INTERACTIVE, ROUTER_MODE_DBUS]:
return True
return False
def startPlacServer(self, mode):
if mode == ROUTER_MODE_DISABLED:
return
self.interpreter = plac.Interpreter(self.shell)
if mode == ROUTER_MODE_TELNET:
self.interpreter.start_server() # Loop
elif mode == ROUTER_MODE_DBUS:
from util import dbus_ctrl
dbus_ctrl.startDbusProcess(self) # Loop
elif mode == ROUTER_MODE_INTERACTIVE:
path = self.simCtrl.getCurrentFile().path
self.interpreter.interact(prompt="\n%s>"%path)
else:
raise Exception("Unexpected mode")
def setShellPrompt(self, prompt):
if self.interpreter != None:
self.interpreter.prompt = prompt
def setupLogger(self):
logger = logging.getLogger("router")
#dont't propagate to root logger
logger.propagate=False
logger.handlers = []
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
dir = os.path.dirname(__file__)
resultFile = dir + "/../apdu.log"
fileHandler = logging.FileHandler(resultFile, mode='w')
fileHandler.setLevel(logging.INFO)
# create formatter and add it to the handlers
consoleFormatter = logging.Formatter(fmt='%(message)s')
fileFormatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%H:%M:%S')
consoleHandler.setFormatter(consoleFormatter)
fileHandler.setFormatter(fileFormatter)
# add the handlers to the logger
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
if extHandler:
#add handler for test runner
logger.addHandler(extHandler)
return logger
def fileName(self, apdu):
if types.p1(apdu) != types.SELECT_BY_DF_NAME:
fid = types.fileId(apdu)
fid = "%04X" %fid
if fid == "7FFF":
return "ADF"
try:
fileName = self.simCtrl.simFiles.getNameFromFid(fid)
except:
#TODO: try to remove
fileName = fid
else:
# AID
fileName = hextools.bytes2hex(types.aid(apdu)) #'A000'
return fileName
def pretty_apdu(self, apdu):
str = types.insName(apdu)
if str == 'SELECT_FILE':
str += " " + self.fileName(apdu)
self.loggingApdu.info(str)
def usb_find(self, idVendor, idProduct):
LIBUSB_PATH = "/usr/lib/libusb-1.0.so"
try:
dev = usb.core.find(idVendor=idVendor, idProduct=idProduct)
except:
backend = usb.backend.libusb1.get_backend(find_library=lambda x: LIBUSB_PATH)
if not backend:
logging.error("libusb-1.0 not found")
return None
dev = usb.core.find(idVendor=idVendor, idProduct=idProduct, backend=backend)
return dev
extHandler = None
def setLoggerExtHandler(handler):
global extHandler
extHandler = handler
class MainLoopThread(threading.Thread):
def __init__(self, simRouter):
threading.Thread.__init__(self)
self.simRouter = simRouter
threading.Thread.setName(self, 'MainLoopThread')
self.__lock = threading.Lock()
def run(self):
self.__lock.acquire()
for cardDict in self.simRouter.cardsDict:
if cardDict[MAIN_INTERFACE].mode == sim_reader.MODE_SIM_SOFT:
# Set SimRouter class in SatCtrl.
card = cardDict[CTRL_INTERFACE].simReader.getHandler().getCard(cardDict[CTRL_INTERFACE].index)
card.satCtrl.setSimRouter(self.simRouter)
self.simRouter.mainloop()
self.__lock.release()
def stop(self):
self.join()
class ResetThread(threading.Thread):
def __init__(self, simRouter):
threading.Thread.__init__(self)
self.simRouter = simRouter
threading.Thread.setName(self, 'ResetThread')
self.__lock = threading.Lock()
def run(self):
self.__lock.acquire()
self.softReset()
self.__lock.release()
def softReset(self):
self.simRouter.logging.info("\n")
self.simRouter.logging.info("<- Soft reset")
for cardDict in self.simRouter.cardsDict:
if (not cardDict[MAIN_INTERFACE].routingAttr or
#skip SIM with no common instruction
cardDict[MAIN_INTERFACE].routingAttr.insCommon == [] or
not self.simRouter.simCtrl):
continue
#select MF
if self.simRouter.simType == types.TYPE_USIM:
apdu = "00A40004023F00"
else:
apdu = "A0A40000023F00"
rapdu = self.simRouter.injectApdu(apdu, cardDict[MAIN_INTERFACE], mode=INJECT_NO_FORWARD)
if not rapdu:
#Skip resetting if there is USB apdu to handle
self.simRouter.logging.info("Soft reset not completed, USB apdu ongoing")
return
# Close opened logical channel so the are not exhousted when UE
# assign new channels after SIM reset.
ctrlLogicalChannel = self.simRouter.simCtrl.logicalChannel
for channel in range(1,4):
if channel != ctrlLogicalChannel: #skip control logical channel
originChannel = 0
if self.simRouter.simType == types.TYPE_SIM:
cla = 0xA0
else:
cla = 0x00
cla = cla | (originChannel & 0x0F)
apdu = "%02X7080%02X00" %(cla, channel)
rapdu = self.simRouter.injectApdu(apdu, cardDict[MAIN_INTERFACE], mode=INJECT_NO_FORWARD)
if not rapdu:
#Skip resetting if there is USB apdu to handle
self.simRouter.logging.info("Soft reset not completed, USB apdu ongoing")
break
self.simRouter.logging.info("-> reset end")
def stop(self):
self.join()
|
kamwar/simLAB
|
sim/sim_router.py
|
Python
|
gpl-2.0
| 28,870 | 0.004434 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
class Dense(base._Layer): # pylint: disable=protected-access
"""Densely-connected layer class.
This layer implements the operation `outputs = activation(inputs.w + b)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `w` is a weights matrix created by the layer,
and `b` is a bias vector created by the layer (only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `w`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
weights_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
weights_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
weights_initializer: Initializer instance (or name) for the weight matrix.
bias_initializer: Initializer instance (or name) for the bias.
weights_regularizer: Regularizer instance for the weight matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
weights: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
weights_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.weights_initializer = weights_initializer
self.bias_initializer = bias_initializer
self.weights_regularizer = weights_regularizer
self.bias_regularizer = bias_regularizer
self.activity_regularizer = activity_regularizer
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape.ndims is None:
raise ValueError('Inputs to `Dense` should have known rank.')
if len(input_shape) < 2:
raise ValueError('Inputs to `Dense` should have rank >= 2.')
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
# Note that we set `trainable=True` because this is a trainable
# weight of the layer. If the layer is not trainable
# (self.trainable = False), the variable will not be added to
# tf.trainable_variables(), and self.trainable_weights will be empty.
self.w = vs.get_variable('weights',
shape=[input_shape[-1].value, self.units],
initializer=self.weights_initializer,
regularizer=self.weights_regularizer,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = vs.get_variable('bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
def call(self, inputs):
shape = inputs.get_shape().as_list()
input_dim = shape[-1]
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Reshape the input to 2D.
output_shape_tensors = array_ops.unpack(array_ops.shape(inputs))
output_shape_tensors[-1] = self.units
output_shape_tensor = array_ops.stack(output_shape_tensors)
inputs = array_ops.reshape(inputs, [-1, input_dim])
outputs = standard_ops.matmul(inputs, self.w)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if len(output_shape) > 2:
# Reshape the output back to the original ndim of the input.
outputs = array_ops.reshape(outputs, output_shape_tensor)
outputs.set_shape(output_shape)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def dense(
inputs, units,
activation=None,
use_bias=True,
weights_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
reuse=False):
"""Functional interface for the densely-connected layer.
This layer implements the operation `outputs = activation(inputs.w + b)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `w` is a weights matrix created by the layer,
and `b` is a bias vector created by the layer (only if `use_bias` is `True`).
Note: if the `inputs` tensor has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `w`.
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
weights_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
weights_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
"""
layer = Dense(units,
activation=activation,
use_bias=use_bias,
weights_initializer=weights_initializer,
bias_initializer=bias_initializer,
weights_regularizer=weights_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
class Dropout(base._Layer): # pylint: disable=protected-access
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: The name of the layer (string).
"""
def __init__(self, rate=0.5,
noise_shape=None,
seed=None,
name=None,
**kwargs):
super(Dropout, self).__init__(name=name, **kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
def call(self, inputs, training=False):
def dropped_inputs():
return nn.dropout(inputs, 1 - self.rate,
noise_shape=self.noise_shape,
seed=self.seed)
return utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
def dropout(inputs,
rate=0.5,
noise_shape=None,
seed=None,
training=False,
name=None):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(apply dropout) or in inference mode (return the input untouched).
name: The name of the layer (string).
Returns:
Output tensor.
"""
layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
return layer.apply(inputs, training=training)
# Aliases
FullyConnected = Dense
fully_connected = dense
|
AndreasMadsen/tensorflow
|
tensorflow/python/layers/core.py
|
Python
|
apache-2.0
| 12,263 | 0.002609 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Idea: Run or debug an experiment script using exactly the same code,
i.e., for both testing and online data acquisition. To debug timing,
you can emulate sync pulses and user responses.
Limitations: pyglet only; keyboard events only.
"""
import threading
from psychopy import visual, event, core, logging
from psychopy.sound import Sound # for SyncGenerator tone
__author__ = 'Jeremy Gray'
class ResponseEmulator(threading.Thread):
def __init__(self, simResponses=None):
"""Class to allow simulation of a user's keyboard responses
during a scan.
Given a list of response tuples (time, key), the thread will
simulate a user pressing a key at a specific time (relative to
the start of the run).
Author: Jeremy Gray; Idea: Mike MacAskill
"""
if not simResponses:
self.responses = []
else:
self.responses = sorted(simResponses) # sort by onset times
self.clock = core.Clock()
self.stopflag = False
threading.Thread.__init__(self, None, 'ResponseEmulator', None)
self.running = False
def run(self):
self.running = True
self.clock.reset()
last_onset = 0.000
# wait until next event requested, and simulate a key press
for onset, key in self.responses:
core.wait(float(onset) - last_onset)
if type(key) == int:
# avoid cryptic error if int
key = "{}".format(key)[0]
if type(key) == type(""):
event._onPygletKey(symbol=key, modifiers=0, emulated=True)
else:
logging.error('ResponseEmulator: only keyboard events '
'are supported')
last_onset = onset
if self.stopflag:
break
self.running = False
return self
def stop(self):
self.stopflag = True
class SyncGenerator(threading.Thread):
def __init__(self, TR=1.0, TA=1.0, volumes=10, sync='5', skip=0,
sound=False, **kwargs):
"""Class for a character-emitting metronome thread
(emulate MR sync pulse).
Aim: Allow testing of temporal robustness of fMRI scripts by emulating
a hardware sync pulse. Adds an arbitrary 'sync' character to the key
buffer, with sub-millisecond precision (less precise if CPU is maxed).
Recommend: TR=1.000 or higher and less than 100% CPU. Shorter TR
--> higher CPU load.
Parameters:
TR: seconds between volume acquisitions
TA: seconds to acquire one volume
volumes: number of 3D volumes to obtain in a given scanning run
sync: character used as flag for sync timing, default='5'
skip: how many frames to silently omit initially during T1
stabilization, no sync pulse. Not needed to test script
timing, but will give more accurate feel to start of run.
aka "discdacqs".
sound: simulate scanner noise
"""
if TR < 0.1:
msg = 'SyncGenerator: whole-brain TR < 0.1 not supported'
raise ValueError(msg)
self.TR = TR
self.TA = TA
self.hogCPU = 0.035
self.timesleep = self.TR
self.volumes = int(volumes)
self.sync = sync
self.skip = skip
self.playSound = sound
if self.playSound: # pragma: no cover
self.sound1 = Sound(800, secs=self.TA, volume=0.15, autoLog=False)
self.sound2 = Sound(813, secs=self.TA, volume=0.15, autoLog=False)
self.clock = core.Clock()
self.stopflag = False
threading.Thread.__init__(self, None, 'SyncGenerator', None)
self.running = False
def run(self):
self.running = True
if self.skip:
for i in range(int(self.skip)):
if self.playSound: # pragma: no cover
self.sound1.play()
self.sound2.play()
# emulate T1 stabilization without data collection
core.wait(self.TR, hogCPUperiod=0)
self.clock.reset()
for vol in range(1, self.volumes + 1):
if self.playSound: # pragma: no cover
self.sound1.play()
self.sound2.play()
if self.stopflag:
break
# "emit" a sync pulse by placing a key in the buffer:
event._onPygletKey(symbol=self.sync, modifiers=0,
emulated=True)
# wait for start of next volume, doing our own hogCPU for
# tighter sync:
core.wait(self.timesleep - self.hogCPU, hogCPUperiod=0)
while self.clock.getTime() < vol * self.TR:
pass # hogs the CPU for tighter sync
self.running = False
return self
def stop(self):
self.stopflag = True
def launchScan(win, settings, globalClock=None, simResponses=None,
mode=None, esc_key='escape',
instr='select Scan or Test, press enter',
wait_msg="waiting for scanner...",
wait_timeout=300, log=True):
"""Accepts up to four fMRI scan parameters (TR, volumes, sync-key, skip),
and launches an experiment in one of two modes: Scan, or Test.
:Usage:
See Coder Demo -> experiment control -> fMRI_launchScan.py.
In brief: 1) from psychopy.hardware.emulator import launchScan;
2) Define your args; and 3) add 'vol = launchScan(args)'
at the top of your experiment script.
launchScan() waits for the first sync pulse and then returns, allowing
your experiment script to proceed. The key feature is that, in test mode,
it first starts an autonomous thread that emulates sync pulses (i.e.,
emulated by your CPU rather than generated by an MRI machine). The
thread places a character in the key buffer, exactly like a keyboard
event does. launchScan will wait for the first such sync pulse (i.e.,
character in the key buffer). launchScan returns the number of sync pulses
detected so far (i.e., 1), so that a script can account for them
explicitly.
If a globalClock is given (highly recommended), it is reset to 0.0 when
the first sync pulse is detected. If a mode was not specified when calling
launchScan, the operator is prompted to select Scan or Test.
If **scan mode** is selected, the script will wait until the first scan
pulse is detected. Typically this would be coming from the scanner, but
note that it could also be a person manually pressing that key.
If **test mode** is selected, launchScan() starts a separate thread to
emit sync pulses / key presses. Note that this thread is effectively
nothing more than a key-pressing metronome, emitting a key at the start
of every TR, doing so with high temporal precision.
If your MR hardware interface does not deliver a key character as a sync
flag, you can still use launchScan() to test script timing. You have to
code your experiment to trigger on either a sync character (to test
timing) or your usual sync flag (for actual scanning).
:Parameters:
win: a :class:`~psychopy.visual.Window` object (required)
settings : a dict containing up to 5 parameters
(2 required: TR, volumes)
TR :
seconds per whole-brain volume (minimum value = 0.1s)
volumes :
number of whole-brain (3D) volumes to obtain in a given
scanning run.
sync :
(optional) key for sync timing, default = '5'.
skip :
(optional) how many volumes to silently omit initially
(during T1 stabilization, no sync pulse). default = 0.
sound :
(optional) whether to play a sound when simulating scanner
sync pulses
globalClock :
optional but highly recommended :class:`~psychopy.core.Clock` to
be used during the scan; if one is given, it is reset to 0.000
when the first sync pulse is received.
simResponses :
optional list of tuples [(time, key), (time, key), ...]. time
values are seconds after the first scan pulse is received.
esc_key :
key to be used for user-interrupt during launch.
default = 'escape'
mode :
if mode is 'Test' or 'Scan', launchScan() will start in that mode.
instr :
instructions to be displayed to the scan operator during mode
selection.
wait_msg :
message to be displayed to the subject while waiting for the
scan to start (i.e., after operator indicates start but before
the first scan pulse is received).
wait_timeout :
time in seconds that launchScan will wait before assuming
something went wrong and exiting. Defaults to 300sec (5 min).
Raises a RuntimeError if no sync pulse is received in the
allowable time.
"""
if not 'sync' in settings:
settings.update({'sync': '5'})
if not 'skip' in settings:
settings.update({'skip': 0})
try:
wait_timeout = max(0.01, float(wait_timeout))
except ValueError:
msg = "wait_timeout must be number-like, but instead it was {}."
raise ValueError(msg.format(wait_timeout))
settings['sync'] = "{}".format(settings['sync']) # convert to str/unicode
settings['TR'] = float(settings['TR'])
settings['volumes'] = int(settings['volumes'])
settings['skip'] = int(settings['skip'])
msg = "vol: %(volumes)d TR: %(TR).3fs skip: %(skip)d sync: '%(sync)s'"
runInfo = msg % settings
if log: # pragma: no cover
logging.exp('launchScan: ' + runInfo)
instructions = visual.TextStim(
win, text=instr, height=.05, pos=(0, 0), color=.4, autoLog=False)
parameters = visual.TextStim(
win, text=runInfo, height=.05, pos=(0, -0.5), color=.4, autoLog=False)
# if a valid mode was specified, use it; otherwise query via RatingScale:
mode = "{}".format(mode).capitalize()
if mode not in ['Scan', 'Test']:
run_type = visual.RatingScale(win, choices=['Scan', 'Test'],
marker='circle',
markerColor='DarkBlue', size=.8,
stretch=.3, pos=(0.8, -0.9),
markerStart='Test',
lineColor='DarkGray', autoLog=False)
while run_type.noResponse:
instructions.draw()
parameters.draw()
run_type.draw()
win.flip()
if event.getKeys([esc_key]):
break
mode = run_type.getRating()
doSimulation = bool(mode == 'Test')
win.mouseVisible = False
if doSimulation:
wait_msg += ' (simulation)'
msg = visual.TextStim(win, color='DarkGray', text=wait_msg, autoLog=False)
msg.draw()
win.flip()
event.clearEvents() # do before starting the threads
if doSimulation:
syncPulse = SyncGenerator(**settings)
syncPulse.start() # start emitting sync pulses
core.runningThreads.append(syncPulse)
if simResponses:
roboResponses = ResponseEmulator(simResponses)
# start emitting simulated user responses
roboResponses.start()
core.runningThreads.append(roboResponses)
# wait for first sync pulse:
timeoutClock = core.Clock() # zeroed now
allKeys = []
while not settings['sync'] in allKeys:
allKeys = event.getKeys()
if esc_key and esc_key in allKeys: # pragma: no cover
core.quit()
if timeoutClock.getTime() > wait_timeout:
msg = 'Waiting for scanner has timed out in %.3f seconds.'
raise RuntimeError(msg % wait_timeout)
if globalClock:
globalClock.reset()
if log: # pragma: no cover
logging.exp('launchScan: start of scan')
# blank the screen on first sync pulse received:
win.flip()
# one sync pulse has been caught so far:
elapsed = 1
return elapsed
|
psychopy/psychopy
|
psychopy/hardware/emulator.py
|
Python
|
gpl-3.0
| 12,656 | 0.000237 |
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import tarfile
import platform
import subprocess
import re
import os
import headphones
from headphones import logger, version, request
def runGit(args):
if headphones.CONFIG.GIT_PATH:
git_locations = ['"' + headphones.CONFIG.GIT_PATH + '"']
else:
git_locations = ['git']
if platform.system().lower() == 'darwin':
git_locations.append('/usr/local/git/bin/git')
output = err = None
for cur_git in git_locations:
cmd = cur_git + ' ' + args
try:
logger.debug('Trying to execute: "' + cmd + '" with shell in ' + headphones.PROG_DIR)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,
cwd=headphones.PROG_DIR)
output, err = p.communicate()
output = output.strip()
logger.debug('Git output: ' + output)
except OSError:
logger.debug('Command failed: %s', cmd)
continue
if 'not found' in output or "not recognized as an internal or external command" in output:
logger.debug('Unable to find git with command ' + cmd)
output = None
elif 'fatal:' in output or err:
logger.error('Git returned bad info. Are you sure this is a git installation?')
output = None
elif output:
break
return (output, err)
def getVersion():
if version.HEADPHONES_VERSION.startswith('win32build'):
headphones.INSTALL_TYPE = 'win'
# Don't have a way to update exe yet, but don't want to set VERSION to None
return 'Windows Install', 'master'
elif os.path.isdir(os.path.join(headphones.PROG_DIR, '.git')):
headphones.INSTALL_TYPE = 'git'
output, err = runGit('rev-parse HEAD')
if not output:
logger.error('Couldn\'t find latest installed version.')
cur_commit_hash = None
cur_commit_hash = str(output)
if not re.match('^[a-z0-9]+$', cur_commit_hash):
logger.error('Output doesn\'t look like a hash, not using it')
cur_commit_hash = None
if headphones.CONFIG.DO_NOT_OVERRIDE_GIT_BRANCH and headphones.CONFIG.GIT_BRANCH:
branch_name = headphones.CONFIG.GIT_BRANCH
else:
branch_name, err = runGit('rev-parse --abbrev-ref HEAD')
branch_name = branch_name
if not branch_name and headphones.CONFIG.GIT_BRANCH:
logger.error(
'Could not retrieve branch name from git. Falling back to %s' % headphones.CONFIG.GIT_BRANCH)
branch_name = headphones.CONFIG.GIT_BRANCH
if not branch_name:
logger.error('Could not retrieve branch name from git. Defaulting to master')
branch_name = 'master'
return cur_commit_hash, branch_name
else:
headphones.INSTALL_TYPE = 'source'
version_file = os.path.join(headphones.PROG_DIR, 'version.txt')
if not os.path.isfile(version_file):
return None, 'master'
with open(version_file, 'r') as f:
current_version = f.read().strip(' \n\r')
if current_version:
return current_version, headphones.CONFIG.GIT_BRANCH
else:
return None, 'master'
def checkGithub():
headphones.COMMITS_BEHIND = 0
# Get the latest version available from github
logger.info('Retrieving latest version information from GitHub')
url = 'https://api.github.com/repos/%s/headphones/commits/%s' % (
headphones.CONFIG.GIT_USER, headphones.CONFIG.GIT_BRANCH)
version = request.request_json(url, timeout=20, validator=lambda x: type(x) == dict)
if version is None:
logger.warn(
'Could not get the latest version from GitHub. Are you running a local development version?')
return headphones.CURRENT_VERSION
headphones.LATEST_VERSION = version['sha']
logger.debug("Latest version is %s", headphones.LATEST_VERSION)
# See how many commits behind we are
if not headphones.CURRENT_VERSION:
logger.info(
'You are running an unknown version of Headphones. Run the updater to identify your version')
return headphones.LATEST_VERSION
if headphones.LATEST_VERSION == headphones.CURRENT_VERSION:
logger.info('Headphones is up to date')
return headphones.LATEST_VERSION
logger.info('Comparing currently installed version with latest GitHub version')
url = 'https://api.github.com/repos/%s/headphones/compare/%s...%s' % (
headphones.CONFIG.GIT_USER, headphones.LATEST_VERSION, headphones.CURRENT_VERSION)
commits = request.request_json(url, timeout=20, whitelist_status_code=404,
validator=lambda x: type(x) == dict)
if commits is None:
logger.warn('Could not get commits behind from GitHub.')
return headphones.LATEST_VERSION
try:
headphones.COMMITS_BEHIND = int(commits['behind_by'])
logger.debug("In total, %d commits behind", headphones.COMMITS_BEHIND)
except KeyError:
logger.info('Cannot compare versions. Are you running a local development version?')
headphones.COMMITS_BEHIND = 0
if headphones.COMMITS_BEHIND > 0:
logger.info(
'New version is available. You are %s commits behind' % headphones.COMMITS_BEHIND)
elif headphones.COMMITS_BEHIND == 0:
logger.info('Headphones is up to date')
return headphones.LATEST_VERSION
def update():
if headphones.INSTALL_TYPE == 'win':
logger.info('Windows .exe updating not supported yet.')
elif headphones.INSTALL_TYPE == 'git':
output, err = runGit('pull origin ' + headphones.CONFIG.GIT_BRANCH)
if not output:
logger.error('Couldn\'t download latest version')
for line in output.split('\n'):
if 'Already up-to-date.' in line:
logger.info('No update available, not updating')
logger.info('Output: ' + str(output))
elif line.endswith('Aborting.'):
logger.error('Unable to update from git: ' + line)
logger.info('Output: ' + str(output))
else:
tar_download_url = 'https://github.com/%s/headphones/tarball/%s' % (
headphones.CONFIG.GIT_USER, headphones.CONFIG.GIT_BRANCH)
update_dir = os.path.join(headphones.PROG_DIR, 'update')
version_path = os.path.join(headphones.PROG_DIR, 'version.txt')
logger.info('Downloading update from: ' + tar_download_url)
data = request.request_content(tar_download_url)
if not data:
logger.error("Unable to retrieve new version from '%s', can't update", tar_download_url)
return
download_name = headphones.CONFIG.GIT_BRANCH + '-github'
tar_download_path = os.path.join(headphones.PROG_DIR, download_name)
# Save tar to disk
with open(tar_download_path, 'wb') as f:
f.write(data)
# Extract the tar to update folder
logger.info('Extracting file: ' + tar_download_path)
tar = tarfile.open(tar_download_path)
tar.extractall(update_dir)
tar.close()
# Delete the tar.gz
logger.info('Deleting file: ' + tar_download_path)
os.remove(tar_download_path)
# Find update dir name
update_dir_contents = [x for x in os.listdir(update_dir) if
os.path.isdir(os.path.join(update_dir, x))]
if len(update_dir_contents) != 1:
logger.error("Invalid update data, update failed: " + str(update_dir_contents))
return
content_dir = os.path.join(update_dir, update_dir_contents[0])
# walk temp folder and move files to main folder
for dirname, dirnames, filenames in os.walk(content_dir):
dirname = dirname[len(content_dir) + 1:]
for curfile in filenames:
old_path = os.path.join(content_dir, dirname, curfile)
new_path = os.path.join(headphones.PROG_DIR, dirname, curfile)
if os.path.isfile(new_path):
os.remove(new_path)
os.renames(old_path, new_path)
# Update version.txt
try:
with open(version_path, 'w') as f:
f.write(str(headphones.LATEST_VERSION))
except IOError as e:
logger.error(
"Unable to write current version to version.txt, update not complete: %s",
e
)
return
|
maxkoryukov/headphones
|
headphones/versioncheck.py
|
Python
|
gpl-3.0
| 9,322 | 0.002145 |
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsx_gw_devices
Revision ID: 19180cf98af6
Revises: 117643811bca
Create Date: 2014-02-26 02:46:26.151741
"""
# revision identifiers, used by Alembic.
revision = '19180cf98af6'
down_revision = '117643811bca'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade():
if not migration.schema_has_table('networkgatewaydevices'):
# Assume that, in the database we are migrating from, the
# configured plugin did not create any nsx tables.
return
op.create_table(
'networkgatewaydevicereferences',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name'))
# Copy data from networkgatewaydevices into networkgatewaydevicereference
op.execute("INSERT INTO networkgatewaydevicereferences SELECT "
"id, network_gateway_id, interface_name FROM "
"networkgatewaydevices")
# drop networkgatewaydevices
op.drop_table('networkgatewaydevices')
op.create_table(
'networkgatewaydevices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('connector_type', sa.String(length=10), nullable=True),
sa.Column('connector_ip', sa.String(length=64), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id'))
# Create a networkgatewaydevice for each existing reference.
# For existing references nsx_id == neutron_id
# Do not fill conenctor info as they would be unknown
op.execute("INSERT INTO networkgatewaydevices (id, nsx_id, tenant_id) "
"SELECT gw_dev_ref.id, gw_dev_ref.id as nsx_id, tenant_id "
"FROM networkgatewaydevicereferences AS gw_dev_ref "
"INNER JOIN networkgateways AS net_gw ON "
"gw_dev_ref.network_gateway_id=net_gw.id")
|
yuewko/neutron
|
neutron/db/migration/alembic_migrations/versions/19180cf98af6_nsx_gw_devices.py
|
Python
|
apache-2.0
| 3,023 | 0.000992 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for tf.contrib.data when eager execution is enabled."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.data.util import nest
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
_uid_counter = 0
_uid_lock = threading.Lock()
def _iterator_shared_name():
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "eager_iterator_{}".format(uid)
class Iterator(object):
"""An iterator producing tf.Tensor objects from a tf.contrib.data.Dataset."""
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.contrib.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Args:
dataset: A `tf.contrib.data.Dataset` object.
Raises:
RuntimeError: When invoked without eager execution enabled.
"""
if not context.in_eager_mode():
raise RuntimeError(
"{} objects only make sense when eager execution is enabled".format(
type(self)))
with ops.device("/device:CPU:0"):
ds_variant = dataset._as_variant_tensor() # pylint: disable=protected-access
self._output_types = dataset.output_types
self._flat_output_types = nest.flatten(dataset.output_types)
self._flat_output_shapes = nest.flatten(dataset.output_shapes)
self._resource = gen_dataset_ops.iterator(
container="",
shared_name=_iterator_shared_name(),
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
gen_dataset_ops.make_iterator(ds_variant, self._resource)
def __del__(self):
if self._resource is not None:
with ops.device("/device:CPU:0"):
resource_variable_ops.destroy_resource_op(self._resource)
self._resource = None
def __iter__(self):
return self
def __next__(self): # For Python 3 compatibility
return self.next()
def next(self):
"""Return the next tf.Tensor from the dataset."""
try:
# TODO(ashankar): Consider removing this ops.device() contextmanager
# and instead mimic ops placement in graphs: Operations on resource
# handles execute on the same device as where the resource is placed.
with ops.device("/device:CPU:0"):
ret = gen_dataset_ops.iterator_get_next(
self._resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
return nest.pack_sequence_as(self._output_types, ret)
except errors.OutOfRangeError:
raise StopIteration
|
benoitsteiner/tensorflow-opencl
|
tensorflow/contrib/eager/python/datasets.py
|
Python
|
apache-2.0
| 3,562 | 0.006176 |
####################################
# Sample Receiver Script
# [Usage]
# python receiver.py
# python receiver.py > data.csv
# [Data Format]
# id,time,x,y,z
# [Exaple]
# 1,118.533,-0.398,-0.199,-0.978
####################################
import sys
import os
import math
import time
import SocketServer
PORTNO = 10552
class handler(SocketServer.DatagramRequestHandler):
def handle(self):
newmsg = self.rfile.readline().rstrip()
print newmsg
s = SocketServer.UDPServer(('',PORTNO), handler)
print "Awaiting UDP messages on port %d" % PORTNO
s.serve_forever()
|
shivekkhurana/learning
|
python/scripts/user_input/receiver.py
|
Python
|
mit
| 590 | 0.018644 |
'''
Servants is of primary interest to Python component developers.
The module names should sufficiently described their intended uses.
'''
__revision__ = "$Id: __init__.py,v 1.4 2005/02/25 23:42:32 dfugate Exp $"
|
ACS-Community/ACS
|
LGPL/CommonSoftware/acspy/src/Acspy/Servants/__init__.py
|
Python
|
lgpl-2.1
| 214 | 0 |
#!/usr/bin/env python
strings=['hey','guys','i','am','a','string']
parameter_list=[[strings]]
def features_string_char (strings):
from shogun import StringCharFeatures, RAWBYTE
from numpy import array
#create string features
f=StringCharFeatures(strings, RAWBYTE)
#and output several stats
#print("max string length", f.get_max_vector_length())
#print("number of strings", f.get_num_vectors())
#print("length of first string", f.get_vector_length(0))
#print("string[5]", ''.join(f.get_feature_vector(5)))
#print("strings", f.get_features())
#replace string 0
f.set_feature_vector(array(['t','e','s','t']), 0)
#print("strings", f.get_features())
return f.get_string_list(), f
if __name__=='__main__':
print('StringCharFeatures')
features_string_char(*parameter_list[0])
|
lambday/shogun
|
examples/undocumented/python/features_string_char.py
|
Python
|
bsd-3-clause
| 792 | 0.050505 |
# Copyright (C) 2006, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Wrappers for PSEA, a program for secondary structure assignment.
See this citation for P-SEA, PMID: 9183534
Labesse G, Colloc'h N, Pothier J, Mornon J-P: P-SEA: a new efficient
assignment of secondary structure from C_alpha.
Comput Appl Biosci 1997 , 13:291-295
ftp://ftp.lmcp.jussieu.fr/pub/sincris/software/protein/p-sea/
"""
import os
from Bio.PDB.Polypeptide import is_aa
def run_psea(fname):
"""Run PSEA and return output filename.
Note that this assumes the P-SEA binary is called "psea" and that it is
on the path.
Note that P-SEA will write an output file in the current directory using
the input filename with extension ".sea".
Note that P-SEA will write output to the terminal while run.
"""
os.system("psea "+fname)
last=fname.split("/")[-1]
base=last.split(".")[0]
return base+".sea"
def psea(pname):
"""Parse PSEA output file."""
fname=run_psea(pname)
start=0
ss=""
fp=open(fname, 'r')
for l in fp.readlines():
if l[0:6]==">p-sea":
start=1
continue
if not start:
continue
if l[0]=="\n":
break
ss=ss+l[0:-1]
fp.close()
return ss
def psea2HEC(pseq):
"""Translate PSEA secondary structure string into HEC."""
seq=[]
for ss in pseq:
if ss=="a":
n="H"
elif ss=="b":
n="E"
elif ss=="c":
n="C"
seq.append(n)
return seq
def annotate(m, ss_seq):
"""Apply seconardary structure information to residues in model."""
c=m.get_list()[0]
all=c.get_list()
residues=[]
# Now remove HOH etc.
for res in all:
if is_aa(res):
residues.append(res)
L=len(residues)
if not (L==len(ss_seq)):
raise ValueError("Length mismatch %i %i" % (L, len(ss_seq)))
for i in range(0, L):
residues[i].xtra["SS_PSEA"]=ss_seq[i]
#os.system("rm "+fname)
class PSEA:
def __init__(self, model, filename):
ss_seq=psea(filename)
ss_seq=psea2HEC(ss_seq)
annotate(model, ss_seq)
self.ss_seq=ss_seq
def get_seq(self):
"""
Return secondary structure string.
"""
return self.ss_seq
if __name__=="__main__":
import sys
from Bio.PDB import PDBParser
# Parse PDB file
p=PDBParser()
s=p.get_structure('X', sys.argv[1])
# Annotate structure with PSEA sceondary structure info
PSEA(s[0], sys.argv[1])
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/PDB/PSEA.py
|
Python
|
gpl-2.0
| 2,747 | 0.014197 |
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from ipaddress import ip_network
from operator import itemgetter
from indico.util.i18n import _
from indico.web.forms.fields import MultiStringField
class MultiIPNetworkField(MultiStringField):
"""A field to enter multiple IPv4 or IPv6 networks.
The field data is a set of ``IPNetwork``s not bound to a DB session.
The ``unique`` and ``sortable`` parameters of the parent class cannot be used with this class.
"""
def __init__(self, *args, **kwargs):
super(MultiIPNetworkField, self).__init__(*args, field=('subnet', _("subnet")), **kwargs)
self._data_converted = False
self.data = None
def _value(self):
if self.data is None:
return []
elif self._data_converted:
data = [{self.field_name: unicode(network)} for network in self.data or []]
return sorted(data, key=itemgetter(self.field_name))
else:
return self.data
def process_data(self, value):
if value is not None:
self._data_converted = True
self.data = value
def _fix_network(self, network):
network = network.encode('ascii', 'ignore')
if network.startswith('::ffff:'):
# convert ipv6-style ipv4 to regular ipv4
# the ipaddress library doesn't deal with such IPs properly!
network = network[7:]
return unicode(network)
def process_formdata(self, valuelist):
self._data_converted = False
super(MultiIPNetworkField, self).process_formdata(valuelist)
self.data = {ip_network(self._fix_network(entry[self.field_name])) for entry in self.data}
self._data_converted = True
def pre_validate(self, form):
pass # nothing to do
|
eliasdesousa/indico
|
indico/modules/networks/fields.py
|
Python
|
gpl-3.0
| 2,522 | 0.001586 |
import argparse
import hashlib
import json
import csv
import os
MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def make_maestro_index(data_path):
metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json')
print(metadata_path)
maestro_index = {}
with open(metadata_path, 'r') as fhandle:
metadata = json.load(fhandle)
for i, row in enumerate(metadata):
print(i)
trackid = row['midi_filename'].split('.')[0]
maestro_index[trackid] = {}
midi_path = os.path.join(data_path, row['midi_filename'])
midi_checksum = md5(midi_path)
maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum]
audio_path = os.path.join(data_path, row['audio_filename'])
audio_checksum = md5(audio_path)
maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum]
with open(MAESTRO_INDEX_PATH, 'w') as fhandle:
json.dump(maestro_index, fhandle, indent=2)
def main(args):
print("creating index...")
make_maestro_index(args.maestro_data_path)
print("done!")
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.')
PARSER.add_argument(
'maestro_data_path', type=str, help='Path to MAESTRO data folder.'
)
main(PARSER.parse_args())
|
mir-dataset-loaders/mirdata
|
scripts/legacy/make_maestro_index.py
|
Python
|
bsd-3-clause
| 1,799 | 0.001112 |
import simplejson as json
import xmltodict
import requests
import os
import re
import sys
import base64
from urlparse import urlparse
from os.path import splitext, basename
from BeautifulSoup import BeautifulSoup
nb_token = os.environ.get('NB_TOKEN')
site_slug = os.environ.get('SITE_SLUG')
api_url = "https://" + site_slug + ".nationbuilder.com/api/v1/sites/" + \
site_slug
def youtube_links(input_text):
''' Find youtube links in any wordpress blogpost '''
# This is some horrible regex, see
# http://stackoverflow.com/questions/839994/extracting-a-url-in-python
url = re.findall(r'(src=\S+)', input_text)
if len(url) > 0:
url = url[0]
url = url.replace('src=', '')
url = url.replace('"', '')
# The URL format for youtube videos changed halfway through the life
# of the blog, this tests for the format and returns the correct URL
if url.startswith("//"):
# If it's the new broken format append http,
# otherwise just return the URL
url = "http:" + url
return url
else:
return url
def read_xml(input_xml):
''' Reads an xml file and turns it into a dictionary '''
f = open(input_xml, 'r')
doc_xml = f.read()
doc = xmltodict.parse(doc_xml)
return doc
def remove_img_tags(input):
''' Removes img tags from text '''
soup = BeautifulSoup(input)
[s.extract() for s in soup('img')]
return str(soup)
def image_links(input):
''' Finds all the image links in a string '''
list_of_images = []
soup = BeautifulSoup(input)
for i in soup.findAll('img'):
list_of_images.append(i)
return list_of_images
def convert_wp2nb(input_xml):
'''
Extracts the relevant items from wordpress posts and converts it into a
nationbuilder friendly format. If there are any youtube links it appends
them to the end of the post
'''
content = input_xml['content:encoded']
if content is not None:
content = content.replace('\n', '<br>')
# extract image URLs to be uploaded
image_urls = image_links(content)
# remove img tags as they will be brought in using liquid tags
content = remove_img_tags(content)
if content.find('youtube') > 0:
youtube_url = youtube_links(content)
if youtube_url is not None:
content = content + youtube_url
output_dict = {
'post':
{'blog_post': {
'name': input_xml['title'],
'slug': input_xml['wp:post_id'],
'status': 'published',
'content_before_flip': content,
'published_at': input_xml['pubDate'],
'author_id': '2'}},
'images': image_urls}
return output_dict
def upload_blog_post(input_json):
''' Uploads blog posts to the nationbuilder URL '''
url = api_url + '/pages/blogs/1/posts'
payload = input_json
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
parameters = {'access_token': nb_token}
r = requests.post(url, data=payload, headers=headers, params=parameters)
response = r.status_code
print response
def delete_post(id):
''' Delete a nationbuilder post '''
url = api_url + '/pages/blogs/1/posts/%s' % id
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
parameters = {'access_token': nb_token}
r = requests.delete(url, headers=headers, params=parameters)
response = r.status_code
print response
def get_posts():
''' Get blog post IDs of the blog '''
url = api_url + '/pages/blogs/1/posts/'
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
parameters = {'access_token': nb_token, 'per_page': '100'}
r = requests.get(url, headers=headers, params=parameters)
response = json.loads(r.content)
return response
def upload_image(page_slug, image_url):
''' Upload an image attachment to a blog post '''
url = api_url + '/pages/%s/attachments' % page_slug
image = prepare_image(image_url)
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
parameters = {'access_token': nb_token}
r = requests.post(url, headers=headers, params=parameters,
data=json.dumps(image))
return r
def prepare_image(url):
'''
Downloads an image, encodes it to base64 for the NB api and sets required
parameters then returns a dictionary
'''
# Download the image then enode it as bas364 per the NB api requirements
image = requests.get(url)
image_base64 = base64.b64encode(image.content)
# This splits the filename from the URL. See
# http://stackoverflow.com/questions/10552188/python-split-url-to-find-image-name-and-extension
image_disassembled = urlparse(image.url)
filename, file_ext = splitext(basename(image_disassembled.path))
image_filename = filename[1:] + file_ext
content = {'attachment': {'filename': image_filename, 'content_type': 'image/jpeg', 'updated_at': '2013-06-06T10:15:02-07:00', 'content': image_base64}}
return content
def delete_all_posts():
''' Removes all posts from the blog '''
posts = get_posts()
post_ids = []
for i in posts['results']:
post_ids.append(i['id'])
for i in post_ids:
delete_post(i)
if __name__ == "__main__":
''' Convert an xml file then upload it to nationbuilder '''
input_file = sys.argv[1]
doc = read_xml(input_file)
# Iterate through the xml entries, if there is any content,
# then upload to the blog
for i in doc['rss']['channel']['item']:
if i['content:encoded']:
output_dict = convert_wp2nb(i)
upload_blog_post(json.dumps(output_dict['post']))
# If the post contains any images, theng go through and
# upload them to the relevant page
if output_dict['images']:
for i in output_dict['images']:
upload_image(output_dict['post']['blog_post']['slug'], i)
|
thmcmahon/wp2nb
|
wp2nb.py
|
Python
|
mit
| 6,200 | 0.000645 |
################################################################################
### Copyright © 2012-2013 BlackDragonHunt
###
### This file is part of the Super Duper Script Editor.
###
### The Super Duper Script Editor is free software: you can redistribute it
### and/or modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation, either version 3 of the License,
### or (at your option) any later version.
###
### The Super Duper Script Editor is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with the Super Duper Script Editor.
### If not, see <http://www.gnu.org/licenses/>.
################################################################################
import logging
import os
import re
import time
try:
import cPickle as pickle
except:
import pickle
import common
import list_files
from script_file import ScriptFile
_LOGGER_NAME = common.LOGGER_NAME + "." + __name__
_LOGGER = logging.getLogger(_LOGGER_NAME)
DATA_FILE = "data/analytics.bin"
DEFAULT_FILTER = re.compile(ur"^\d\d|^e\d\d|^event|^mtb_s\d\d|^script_pak|^voice", re.IGNORECASE | re.DOTALL | re.UNICODE)
SEARCH_ORIGINAL = 0b0001
SEARCH_TRANSLATED = 0b0010
SEARCH_COMMENTS = 0b0100
SEARCH_NOTAGS = 0b1000
DEFAULT_SEARCH_FLAGS = SEARCH_ORIGINAL | SEARCH_TRANSLATED | SEARCH_COMMENTS
MIN_INTERVAL = 0.100
################################################################################
### @class ScriptData
################################################################################
class ScriptData():
####################################################################
### @fn __init__()
####################################################################
def __init__(self, filename = None):
self.filename = ""
self.filesize = None
self.last_edited = None
self.data = None
if filename:
self.load_file(filename)
####################################################################
### @fn load_file(filename)
####################################################################
def load_file(self, filename):
if filename == self.filename and not self.needs_update():
return
if not os.path.isfile(filename):
return
stats = os.stat(filename)
data = ScriptFile(filename)
self.filename = filename
self.filesize = int(stats.st_size)
self.last_edited = int(stats.st_mtime)
self.data = data
####################################################################
### @fn update()
####################################################################
def update(self):
self.load_file(self.filename)
####################################################################
### @fn needs_update()
####################################################################
def needs_update(self):
if not isinstance(self.data, ScriptFile):
_LOGGER.warning("Probably shouldn't be doing this.")
return True
stats = os.stat(self.filename)
filesize = int(stats.st_size)
last_edited = int(stats.st_mtime)
return (filesize != int(self.filesize) or last_edited != int(self.last_edited))
################################################################################
### @class ScriptAnalytics
################################################################################
class ScriptAnalytics():
####################################################################
### @fn __init__()
####################################################################
def __init__(self):
self.script_data = {}
self.load()
####################################################################
### @fn load()
####################################################################
def load(self):
# if os.path.isfile(DATA_FILE):
try:
with open(DATA_FILE, "rb") as f:
self.script_data = pickle.load(f)
# else:
except:
self.script_data = {}
self.update()
####################################################################
### @fn save()
####################################################################
def save(self):
with open(DATA_FILE, "wb") as f:
pickle.dump(self.script_data, f, pickle.HIGHEST_PROTOCOL)
####################################################################
### @fn update(dir_filter)
### Updates files whose directory match the filter.
####################################################################
def update(self, dir_filter = DEFAULT_FILTER):
txt_files = ScriptAnalytics.list_txt_files(dir_filter)
for txt_file in txt_files:
self.update_file(txt_file)
####################################################################
### @fn update_file(filename)
### Updates the given file.
####################################################################
def update_file(self, filename):
try:
self.script_data[filename].update()
except:
self.script_data[filename] = ScriptData(os.path.join(common.editor_config.umdimage_dir, filename))
#print "Probably shouldn't be doing this."
####################################################################
### @fn search_gen(text_filter, dir_filter, search_flags)
### Returns a list of files whose contents match the text filter
### and whose directory matches the directory filter.
### This is a generator which yields:
### * the current file number
### * the total number of files
### * the current filename
### * a list of matches found since the last yield
####################################################################
def search_gen(self, text_filter, dir_filter = DEFAULT_FILTER, search_flags = DEFAULT_SEARCH_FLAGS):
matches = []
original = search_flags & SEARCH_ORIGINAL
translated = search_flags & SEARCH_TRANSLATED
comments = search_flags & SEARCH_COMMENTS
notags = search_flags & SEARCH_NOTAGS
last_update = time.time()
#for i, (path, data) in enumerate(self.script_data.iteritems()):
for i, path in enumerate(self.script_data):
#if i % 500 == 0:
if time.time() - last_update > MIN_INTERVAL:
yield i, len(self.script_data), path, matches
matches = []
last_update = time.time()
if not dir_filter.search(path):
continue
self.update_file(path)
data = self.script_data[path]
to_search = []
if original:
to_search.append(data.data.original_notags if notags else data.data.original)
if translated:
to_search.append(data.data.translated_notags if notags else data.data.translated)
if comments:
to_search.append(data.data.comments)
to_search = "\n".join(to_search)
if text_filter.search(to_search):
matches.append(path)
yield len(self.script_data), len(self.script_data), "", matches
####################################################################
### @fn search(text_filter, dir_filter, search_flags)
### Returns a list of files whose contents match the text filter
### and whose directory matches the directory filter.
####################################################################
def search(self, text_filter, dir_filter = DEFAULT_FILTER, search_flags = DEFAULT_SEARCH_FLAGS):
matches = []
for index, total, path, cur_matches in search_gen(text_filter, dir_filter, search_flags):
matches.extend(cur_matches)
return matches
####################################################################
### @fn get_data(dir_filter)
### A generator which yields:
### * the file number
### * the total number of files
### * the filename
### * and the data field of each file that matches the filter
### or None if there wasn't a match at a periodic interval
####################################################################
def get_data(self, dir_filter = DEFAULT_FILTER):
#self.update(dir_filter)
last_update = time.time()
#for i, (path, data) in enumerate(sorted(self.script_data.iteritems())):
for i, path in enumerate(sorted(self.script_data.keys())):
if not dir_filter.search(path):
#if i % 500 == 0:
if time.time() - last_update > MIN_INTERVAL:
yield i, len(self.script_data), path, None
last_update = time.time()
continue
self.update_file(path)
data = self.script_data[path]
yield i, len(self.script_data), path, data.data
last_update = time.time()
####################################################################
### @fn list_txt_files(dir_filter)
### Returns a list of files whose directory match the filter.
####################################################################
@staticmethod
def list_txt_files(dir_filter = DEFAULT_FILTER):
files = []
for dir in ScriptAnalytics.list_dirs(dir_filter):
temp_files = list_files.list_all_files(os.path.join(common.editor_config.umdimage_dir, dir))
files.extend(temp_files)
# For our dupe database, we need "umdimage" instead of wherever the files
# are really stored, so we strip that part off first.
dir_start = len(common.editor_config.umdimage_dir) + 1
text_files = []
for file in files:
if os.path.splitext(file)[1] == ".txt":
text_files.append(file[dir_start:])
return text_files
####################################################################
### @fn list_dirs(filter)
### Returns a list of directories that match the filter.
####################################################################
@staticmethod
def list_dirs(filter = DEFAULT_FILTER):
dirs = []
base_dir = common.editor_config.umdimage_dir
for item in os.listdir(base_dir):
full_path = os.path.join(base_dir, item)
if os.path.isdir(full_path):
if filter.search(item):
dirs.append(item)
return dirs
SA = ScriptAnalytics()
if __name__ == "__main__":
start_time = None
def lazy_timer():
global start_time
if start_time == None:
start_time = time.time()
else:
old_start = start_time
start_time = time.time()
elapsed = start_time - old_start
print elapsed, "seconds since last call"
lazy_timer()
#analytics = ScriptAnalytics()
#lazy_timer()
#results = common.script_analytics.search_gen(re.compile(ur"バカ", re.IGNORECASE | re.DOTALL | re.UNICODE))
results = []
for i, total, filename, partial_results in common.script_analytics.search_gen(re.compile(ur"バカ", re.IGNORECASE | re.DOTALL | re.UNICODE)):
print i, total, filename, len(partial_results)
results.extend(partial_results)
print len(results)
lazy_timer()
common.script_analytics.save()
### EOF ###
|
ThunderGemios10/The-Super-Duper-Script-Editor
|
script_analytics.py
|
Python
|
gpl-3.0
| 11,293 | 0.027477 |
"""
Django settings for {{ project_name }} project.
"""
import os
import re
MAIN_APPLICATION_PATH = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(MAIN_APPLICATION_PATH)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
'DJANGO_SECRET', '{{ secret_key }}'
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', 'False').lower() in (
"on", "yes", "true", "t", "1"
)
if DEBUG:
ALLOWED_HOSTS = ['*']
else:
ah = os.environ.get('ALLOWED_HOSTS', '*')
ah = re.split(',', ah)
ALLOWED_HOSTS = ah
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
]
LOCAL_APPS = [
'core',
]
INSTALLED_APPS.extend(LOCAL_APPS)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Asia/Krasnoyarsk'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_files')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'translations'),
]
|
wolfstein9119/django-skel
|
project_name/settings/common.py
|
Python
|
bsd-2-clause
| 3,448 | 0.00116 |
def powers_of_two(limit):
value = 1
while value < limit:
yield value
value += value
# Use the generator
for i in powers_of_two(70):
print(i)
# Explore the mechanism
g = powers_of_two(100)
assert str(type(powers_of_two)) == "<class 'function'>"
assert str(type(g)) == "<class 'generator'>"
assert g.__next__() == 1
assert g.__next__() == 2
assert next(g) == 4
assert next(g) == 8
|
rtoal/ple
|
python/powers_of_two.py
|
Python
|
mit
| 409 | 0.002445 |
"""
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
from __future__ import unicode_literals
import decimal
import re
import sys
import warnings
def _setup_environment(environ):
import platform
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
import os
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
try:
import pytz
except ImportError:
pytz = None
from django.db import utils
from django.db.backends import *
from django.db.backends.oracle.client import DatabaseClient
from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils.encoding import force_bytes, force_text
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# Check whether cx_Oracle was compiled with the WITH_UNICODE option if cx_Oracle is pre-5.1. This will
# also be True for cx_Oracle 5.1 and in Python 3.0. See #19606
if int(Database.version.split('.', 1)[0]) >= 5 and \
(int(Database.version.split('.', 2)[1]) >= 1 or
not hasattr(Database, 'UNICODE')):
convert_unicode = force_text
else:
convert_unicode = force_bytes
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
needs_datetime_string_cast = False
interprets_empty_strings_as_nulls = True
uses_savepoints = True
has_select_for_update = True
has_select_for_update_nowait = True
can_return_id_from_insert = True
allow_sliced_subqueries = False
supports_subqueries_in_group_by = False
supports_transactions = True
supports_timezones = False
has_zoneinfo_database = pytz is not None
supports_bitwise_or = False
can_defer_constraint_checks = True
ignores_nulls_in_unique_constraints = False
has_bulk_insert = True
supports_tablespaces = True
supports_sequence_reset = False
atomic_transactions = False
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
sq_name = self._get_sequence_name(table)
tr_name = self._get_trigger_name(table)
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % locals()
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT "%(sq_name)s".nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % locals()
return sequence_sql, trigger_sql
def cache_key_culling_sql(self):
return """
SELECT cache_key
FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s)
WHERE rank = %%s + 1
"""
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the interval functionality for expressions
format for Oracle:
(datefield + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6))
"""
minutes, seconds = divmod(timedelta.seconds, 60)
hours, minutes = divmod(minutes, 60)
days = str(timedelta.days)
day_precision = len(days)
fmt = "(%s %s INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6))"
return fmt % (sql, connector, days, hours, minutes, seconds,
timedelta.microseconds, day_precision)
def date_trunc_sql(self, lookup_type, field_name):
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
else:
return "TRUNC(%s)" % field_name
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = re.compile(r'^[\w/:+-]+$')
def _convert_field_to_tz(self, field_name, tzname):
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE.
result = "(FROM_TZ(%s, '0:00') AT TIME ZONE '%s')" % (field_name, tzname)
# Extracting from a TIMESTAMP WITH TIME ZONE ignore the time zone.
# Convert to a DATETIME, which is called DATE by Oracle. There's no
# built-in function to do that; the easiest is to go through a string.
result = "TO_CHAR(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
result = "TO_DATE(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
# Re-convert to a TIMESTAMP because EXTRACT only handles the date part
# on DATE values, even though they actually store the time part.
return "CAST(%s AS TIMESTAMP)" % result
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = self._convert_field_to_tz(field_name, tzname)
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
sql = "TO_CHAR(%s, 'D')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, []
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = self._convert_field_to_tz(field_name, tzname)
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = field_name # Cast to DATE removes sub-second precision.
return sql, []
def convert_values(self, value, field):
if isinstance(value, Database.LOB):
value = value.read()
if field and field.get_internal_type() == 'TextField':
value = force_text(value)
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None and field and field.empty_strings_allowed:
value = ''
# Convert 1 or 0 to True or False
elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
value = bool(value)
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
# Convert floats to decimals
elif value is not None and field and field.get_internal_type() == 'DecimalField':
value = util.typecast_decimal(field.format_number(value))
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime. We use the type
# of the Field to determine which to cast to, but it's not
# always available.
# As a workaround, we cast to date if all the time-related
# values are 0, or to time if the date is 1/1/1900.
# This could be cleaned a bit by adding a method to the Field
# classes to normalize values from the database (the to_python
# method is used for validation and isn't what we want here).
elif isinstance(value, Database.Timestamp):
if field and field.get_internal_type() == 'DateTimeField':
pass
elif field and field.get_internal_type() == 'DateField':
value = value.date()
elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1):
value = value.time()
elif value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
return value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(self._get_sequence_name(table))
def fetch_returned_insert_id(self, cursor):
return int(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_executed_query(self, cursor, sql, params):
# http://cx-oracle.sourceforge.net/html/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
if statement and six.PY2 and not isinstance(statement, unicode):
statement = statement.decode('utf-8')
# Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's
# `statement` doesn't contain the query parameters. refs #20010.
return super(DatabaseOperations, self).last_executed_query(cursor, statement, params)
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return force_text(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % util.truncate_name(name.upper(),
self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%','%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup_9(self, lookup_type):
raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
def regex_lookup_10(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def regex_lookup(self, lookup_type):
# If regex_lookup is called before it's been initialized, then create
# a cursor to initialize it and recur.
self.connection.cursor()
return self.connection.ops.regex_lookup(lookup_type)
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
sequence_name = self._get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {'sequence': sequence_name,
'table': table_name,
'column': column_name}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = _get_sequence_reset_sql()
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = self._get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.rel.through:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = self._get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def value_to_db_datetime(self, value):
if value is None:
return None
# Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return datetime.datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def year_lookup_bounds_for_date_field(self, value):
first = '%s-01-01'
second = '%s-12-31'
return [first % value, second % value]
def year_lookup_bounds_for_datetime_field(self, value):
# The default implementation uses datetime objects for the bounds.
# This must be overridden here, to use a formatted date (string) as
# 'second' instead -- cx_Oracle chops the fraction-of-second part
# off of datetime objects, leaving almost an entire second out of
# the year under the default implementation.
bounds = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
if settings.USE_TZ:
bounds = [b.astimezone(timezone.utc).replace(tzinfo=None) for b in bounds]
return [b.isoformat(b' ') for b in bounds]
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
raise NotImplementedError("Bit-wise or is not supported in Oracle.")
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def _get_sequence_name(self, table):
name_length = self.max_name_length() - 3
return '%s_SQ' % util.truncate_name(table, name_length).upper()
def _get_trigger_name(self, table):
name_length = self.max_name_length() - 3
return '%s_TR' % util.truncate_name(table, name_length).upper()
def bulk_insert_sql(self, fields, num_values):
items_sql = "SELECT %s FROM DUAL" % ", ".join(["%s"] * len(fields))
return " UNION ALL ".join([items_sql] * num_values)
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT'].strip():
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
conn_string = convert_unicode(self._connect_string())
return Database.connect(conn_string, **conn_params)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set oracle date to ansi date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'"
+ (" TIME_ZONE = 'UTC'" if settings.USE_TZ else ''))
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except DatabaseError:
self.operators = self._likec_operators
else:
self.operators = self._standard_operators
cursor.close()
# There's no way for the DatabaseOperations class to know the
# currently active Oracle version, so we do some setups here.
# TODO: Multi-db support will need a better solution (a way to
# communicate the current version).
if self.oracle_version is not None and self.oracle_version <= 9:
self.ops.regex_lookup = self.ops.regex_lookup_9
else:
self.ops.regex_lookup = self.ops.regex_lookup_10
try:
self.connection.stmtcachesize = 20
except:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
def create_cursor(self):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.DatabaseError as e:
# cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
# Oracle doesn't support savepoint commits. Ignore them.
def _savepoint_commit(self, sid):
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
if hasattr(self.connection, 'ping'): # Oracle 10g R2 and higher
self.connection.ping()
else:
# Use a cx_Oracle cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1 FROM DUAL")
except Database.Error:
return False
else:
return True
@cached_property
def oracle_version(self):
with self.temporary_connection():
version = self.connection.version
try:
return int(version.split('.')[0])
except ValueError:
return None
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and isinstance(param, datetime.datetime):
if timezone.is_naive(param):
warnings.warn("Oracle received a naive datetime (%s)"
" while time zone support is active." % param,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
param = timezone.make_aware(param, default_timezone)
param = param.astimezone(timezone.utc).replace(tzinfo=None)
# Oracle doesn't recognize True and False correctly in Python 3.
# The conversion done below works both in 2 and 3.
if param is True:
param = "1"
elif param is False:
param = "0"
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, six.memoryview):
self.force_bytes = param
else:
self.force_bytes = convert_unicode(param, cursor.charset,
strings_only)
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif isinstance(param, six.string_types) and len(param) > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instanciate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class InsertIdVar(object):
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
def bind_parameter(self, cursor):
param = cursor.cursor.var(Database.NUMBER)
cursor._insert_id_var = param
return param
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
try:
return dict((k,OracleParam(v, self, True)) for k,v in params.items())
except AttributeError:
return tuple([OracleParam(p, self, True) for p in params])
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return dict((k, v.force_bytes) for k,v in params.items())
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
query = convert_unicode(query, self.charset)
elif hasattr(params, 'keys'):
# Handle params as dict
args = dict((k, ":%s"%k) for k in params.keys())
query = convert_unicode(query % args, self.charset)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = convert_unicode(query % tuple(args), self.charset)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams]+[self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchmany(size)])
def fetchall(self):
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchall()])
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(six.Iterator):
"""Cursor iterator wrapper that invokes our custom row factory."""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def __next__(self):
return _rowfactory(next(self.iter), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = decimal.Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = decimal.Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = decimal.Decimal(value)
else:
value = int(value)
# datetimes are returned as TIMESTAMP, except the results
# of "dates" queries, which are returned as DATETIME.
elif desc[1] in (Database.TIMESTAMP, Database.DATETIME):
# Confirm that dt is naive before overwriting its tzinfo.
if settings.USE_TZ and value is not None and timezone.is_naive(value):
value = value.replace(tzinfo=timezone.utc)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, six.string_types):
return force_text(s)
return s
def _get_sequence_reset_sql():
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
return """
DECLARE
table_value integer;
seq_value integer;
BEGIN
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = '%(sequence)s';
WHILE table_value > seq_value LOOP
SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
END LOOP;
END;
/"""
|
atruberg/django-custom
|
django/db/backends/oracle/base.py
|
Python
|
bsd-3-clause
| 39,830 | 0.001456 |
"""
Import prescribing data from CSV files into SQLite
"""
from collections import namedtuple
import csv
from itertools import groupby
import logging
import os
import sqlite3
import gzip
import heapq
from matrixstore.matrix_ops import sparse_matrix, finalise_matrix
from matrixstore.serializer import serialize_compressed
from .common import get_prescribing_filename
logger = logging.getLogger(__name__)
MatrixRow = namedtuple("MatrixRow", "bnf_code items quantity actual_cost net_cost")
class MissingHeaderError(Exception):
pass
def import_prescribing(filename):
if not os.path.exists(filename):
raise RuntimeError("No SQLite file at: {}".format(filename))
connection = sqlite3.connect(filename)
# Trade crash-safety for insert speed
connection.execute("PRAGMA synchronous=OFF")
dates = [date for (date,) in connection.execute("SELECT date FROM date")]
prescriptions = get_prescriptions_for_dates(dates)
write_prescribing(connection, prescriptions)
connection.commit()
connection.close()
def write_prescribing(connection, prescriptions):
cursor = connection.cursor()
# Map practice codes and date strings to their corresponding row/column
# offset in the matrix
practices = dict(cursor.execute("SELECT code, offset FROM practice"))
dates = dict(cursor.execute("SELECT date, offset FROM date"))
matrices = build_matrices(prescriptions, practices, dates)
rows = format_as_sql_rows(matrices, connection)
cursor.executemany(
"""
UPDATE presentation SET items=?, quantity=?, actual_cost=?, net_cost=?
WHERE bnf_code=?
""",
rows,
)
def get_prescriptions_for_dates(dates):
"""
Yield all prescribing data for the given dates as tuples of the form:
bnf_code, practice_code, date, items, quantity, actual_cost, net_cost
sorted by bnf_code, practice and date.
"""
dates = sorted(dates)
filenames = [get_prescribing_filename(date) for date in dates]
missing_files = [f for f in filenames if not os.path.exists(f)]
if missing_files:
raise RuntimeError(
"Some required CSV files were missing:\n {}".format(
"\n ".join(missing_files)
)
)
prescribing_streams = [read_gzipped_prescribing_csv(f) for f in filenames]
# We assume that the input files are already sorted by (bnf_code, practice,
# month) so to ensure that the combined stream is sorted we just need to
# merge them correctly, which heapq.merge handles nicely for us
return heapq.merge(*prescribing_streams)
def read_gzipped_prescribing_csv(filename):
with gzip.open(filename, "rt") as f:
for row in parse_prescribing_csv(f):
yield row
def parse_prescribing_csv(input_stream):
"""
Accepts a stream of CSV and yields prescribing data as tuples of the form:
bnf_code, practice_code, date, items, quantity, actual_cost, net_cost
"""
reader = csv.reader(input_stream)
headers = next(reader)
try:
bnf_code_col = headers.index("bnf_code")
practice_col = headers.index("practice")
date_col = headers.index("month")
items_col = headers.index("items")
quantity_col = headers.index("quantity")
actual_cost_col = headers.index("actual_cost")
net_cost_col = headers.index("net_cost")
except ValueError as e:
raise MissingHeaderError(str(e))
for row in reader:
yield (
# These sometimes have trailing spaces in the CSV
row[bnf_code_col].strip(),
row[practice_col].strip(),
# We only need the YYYY-MM-DD part of the date
row[date_col][:10],
int(row[items_col]),
float(row[quantity_col]),
pounds_to_pence(row[actual_cost_col]),
pounds_to_pence(row[net_cost_col]),
)
def pounds_to_pence(value):
return int(round(float(value) * 100))
def build_matrices(prescriptions, practices, dates):
"""
Accepts an iterable of prescriptions plus mappings of pratice codes and
date strings to their respective row/column offsets. Yields tuples of the
form:
bnf_code, items_matrix, quantity_matrix, actual_cost_matrix, net_cost_matrix
Where the matrices contain the prescribed values for that presentation for
every practice and date.
"""
max_row = max(practices.values())
max_col = max(dates.values())
shape = (max_row + 1, max_col + 1)
grouped_by_bnf_code = groupby(prescriptions, lambda row: row[0])
for bnf_code, row_group in grouped_by_bnf_code:
items_matrix = sparse_matrix(shape, integer=True)
quantity_matrix = sparse_matrix(shape, integer=False)
actual_cost_matrix = sparse_matrix(shape, integer=True)
net_cost_matrix = sparse_matrix(shape, integer=True)
for _, practice, date, items, quantity, actual_cost, net_cost in row_group:
practice_offset = practices[practice]
date_offset = dates[date]
items_matrix[practice_offset, date_offset] = items
quantity_matrix[practice_offset, date_offset] = quantity
actual_cost_matrix[practice_offset, date_offset] = actual_cost
net_cost_matrix[practice_offset, date_offset] = net_cost
yield MatrixRow(
bnf_code,
finalise_matrix(items_matrix),
finalise_matrix(quantity_matrix),
finalise_matrix(actual_cost_matrix),
finalise_matrix(net_cost_matrix),
)
def format_as_sql_rows(matrices, connection):
"""
Given an iterable of MatrixRows (which contain a BNF code plus all
prescribing data for that presentation) yield tuples of values ready for
insertion into SQLite
"""
cursor = connection.cursor()
num_presentations = next(cursor.execute("SELECT COUNT(*) FROM presentation"))[0]
count = 0
for row in matrices:
count += 1
# We make sure we have a row for every BNF code in the data, even ones
# we didn't know about previously. This is a hack that we won't need
# once we can use SQLite v3.24.0 which has proper UPSERT support.
cursor.execute(
"INSERT OR IGNORE INTO presentation (bnf_code) VALUES (?)", [row.bnf_code]
)
if should_log_message(count):
logger.info(
"Writing data for %s (%s/%s)", row.bnf_code, count, num_presentations
)
yield (
serialize_compressed(row.items),
serialize_compressed(row.quantity),
serialize_compressed(row.actual_cost),
serialize_compressed(row.net_cost),
row.bnf_code,
)
logger.info("Finished writing data for %s presentations", count)
def should_log_message(n):
"""
To avoid cluttering log output we don't log the insertion of every single
presentation
"""
if n <= 10:
return True
if n == 100:
return True
return n % 200 == 0
|
ebmdatalab/openprescribing
|
openprescribing/matrixstore/build/import_prescribing.py
|
Python
|
mit
| 7,057 | 0.00085 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from contextable import Contextable
class Terms(Contextable):
"""
An address in the invoice
"""
def __init__(self, days, string):
self.days = days
self.string = string
def context(self):
return {
'terms': {
'string': self.string,
'days': self.days
}
}
|
lambdal/envois
|
envois/terms.py
|
Python
|
mit
| 396 | 0.005051 |
from django.db import models
from django.db.models import Q
class FriendshipManager(models.Manager):
"""
Provides an interface to friends
"""
def friends_for_user(self, user):
"""
Returns friends for specific user
"""
friends = []
qs = self.filter(Q(from_user=user) | Q(to_user=user)).select_related(depth=1)
for friendship in qs:
if friendship.from_user == user:
friends.append(friendship.to_user)
else:
friends.append(friendship.from_user)
return friends
def are_friends(self, user1, user2):
"""
Returns boolean value of whether user1 and user2 are currently friends.
"""
return self.filter(
Q(from_user=user1, to_user=user2) |
Q(from_user=user2, to_user=user1)
).count() > 0
def remove(self, user1, user2):
"""
Removes specific user from another specific users friends list.
"""
friendships = self.filter(from_user=user1, to_user=user2)
if not friendships:
friendships = self.filter(from_user=user2, to_user=user1)
if friendships:
friendships.delete()
class FriendshipInvitationManager(models.Manager):
"""
Provides an interface to friendship invitations
"""
def is_invited(self, user1, user2):
"""
Returns boolean value of whether user1 has been invited to a friendship by user2
"""
return self.filter(
Q(from_user=user1, to_user=user2) |
Q(from_user=user2, to_user=user1)
).count() > 0
def remove(self, user1, user2):
"""
Removes friendship request from user1 to user2.
"""
invitations = self.filter(from_user=user1, to_user=user2)
if not invitations:
invitations = self.filter(from_user=user2, to_user=user1)
if invitations:
invitations.delete()
class BlockingManager(models.Manager):
"""
Provides an interface to blocked users for all users.
"""
def blocked_for_user(self, user):
"""
Returns users that have blocked the specified user.
"""
blocked = []
qs = self.filter(from_user=user).select_related(depth=1)
for blocking in qs:
blocked.append(blocking.to_user)
return blocked
|
chrisfranklin/dinnertime
|
friends/managers.py
|
Python
|
gpl-3.0
| 2,411 | 0.001659 |
# Copyright 2012 Dan Smith <dsmith@danplanet.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import struct
import logging
from chirp import chirp_common, errors, util, directory, memmap
from chirp import bitwise
from chirp.settings import RadioSetting, RadioSettingGroup, \
RadioSettingValueInteger, RadioSettingValueList, \
RadioSettingValueBoolean, RadioSettingValueString, \
RadioSettings
LOG = logging.getLogger(__name__)
def uvf1_identify(radio):
"""Do identify handshake with TYT TH-UVF1"""
radio.pipe.write("PROG333")
ack = radio.pipe.read(1)
if ack != "\x06":
raise errors.RadioError("Radio did not respond")
radio.pipe.write("\x02")
ident = radio.pipe.read(16)
LOG.info("Ident:\n%s" % util.hexprint(ident))
radio.pipe.write("\x06")
ack = radio.pipe.read(1)
if ack != "\x06":
raise errors.RadioError("Radio did not ack identification")
return ident
def uvf1_download(radio):
"""Download from TYT TH-UVF1"""
data = uvf1_identify(radio)
for i in range(0, 0x1000, 0x10):
msg = struct.pack(">BHB", ord("R"), i, 0x10)
radio.pipe.write(msg)
block = radio.pipe.read(0x10 + 4)
if len(block) != (0x10 + 4):
raise errors.RadioError("Radio sent a short block")
radio.pipe.write("\x06")
ack = radio.pipe.read(1)
if ack != "\x06":
raise errors.RadioError("Radio NAKed block")
data += block[4:]
status = chirp_common.Status()
status.cur = i
status.max = 0x1000
status.msg = "Cloning from radio"
radio.status_fn(status)
radio.pipe.write("\x45")
return memmap.MemoryMap(data)
def uvf1_upload(radio):
"""Upload to TYT TH-UVF1"""
data = uvf1_identify(radio)
radio.pipe.timeout = 1
if data != radio._mmap[:16]:
raise errors.RadioError("Unable to talk to this model")
for i in range(0, 0x1000, 0x10):
addr = i + 0x10
msg = struct.pack(">BHB", ord("W"), i, 0x10)
msg += radio._mmap[addr:addr+0x10]
radio.pipe.write(msg)
ack = radio.pipe.read(1)
if ack != "\x06":
LOG.debug(repr(ack))
raise errors.RadioError("Radio did not ack block %i" % i)
status = chirp_common.Status()
status.cur = i
status.max = 0x1000
status.msg = "Cloning to radio"
radio.status_fn(status)
# End of clone?
radio.pipe.write("\x45")
THUV1F_MEM_FORMAT = """
struct mem {
bbcd rx_freq[4];
bbcd tx_freq[4];
lbcd rx_tone[2];
lbcd tx_tone[2];
u8 unknown1:1,
pttid:2,
unknown2:2,
ishighpower:1,
unknown3:2;
u8 unknown4:4,
isnarrow:1,
vox:1,
bcl:2;
u8 unknown5:1,
scan:1,
unknown6:3,
scramble_code:3;
u8 unknown7;
};
struct name {
char name[7];
};
#seekto 0x0020;
struct mem memory[128];
#seekto 0x0840;
struct {
u8 scans:2,
autolk:1,
unknown1:5;
u8 light:2,
unknown6:2,
disnm:1,
voice:1,
beep:1,
rxsave:1;
u8 led:2,
unknown5:3,
ani:1,
roger:1,
dw:1;
u8 opnmsg:2,
unknown4:1,
dwait:1,
unknown9:4;
u8 squelch;
u8 unknown2:4,
tot:4;
u8 unknown3:4,
vox_level:4;
u8 pad[10];
char ponmsg[6];
} settings;
#seekto 0x08D0;
struct name names[128];
"""
LED_LIST = ["Off", "On", "Auto"]
LIGHT_LIST = ["Purple", "Orange", "Blue"]
VOX_LIST = ["1", "2", "3", "4", "5", "6", "7", "8"]
TOT_LIST = ["Off", "30s", "60s", "90s", "120s", "150s", "180s", "210s",
"240s", "270s"]
SCANS_LIST = ["Time", "Carry", "Seek"]
OPNMSG_LIST = ["Off", "DC", "Message"]
POWER_LEVELS = [chirp_common.PowerLevel("High", watts=5),
chirp_common.PowerLevel("Low", watts=1),
]
PTTID_LIST = ["Off", "BOT", "EOT", "Both"]
BCL_LIST = ["Off", "CSQ", "QT/DQT"]
CODES_LIST = [x for x in range(1, 9)]
@directory.register
class TYTTHUVF1Radio(chirp_common.CloneModeRadio):
"""TYT TH-UVF1"""
VENDOR = "TYT"
MODEL = "TH-UVF1"
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.memory_bounds = (1, 128)
rf.has_bank = False
rf.has_ctone = True
rf.has_tuning_step = False
rf.has_cross = True
rf.has_rx_dtcs = True
rf.has_settings = True
rf.can_odd_split = True
rf.valid_duplexes = ["", "-", "+", "split", "off"]
rf.valid_tmodes = ["", "Tone", "TSQL", "DTCS", "Cross"]
rf.valid_characters = chirp_common.CHARSET_UPPER_NUMERIC + "-"
rf.valid_bands = [(136000000, 174000000),
(420000000, 470000000)]
rf.valid_skips = ["", "S"]
rf.valid_power_levels = POWER_LEVELS
rf.valid_modes = ["FM", "NFM"]
rf.valid_name_length = 7
rf.valid_cross_modes = ["Tone->Tone", "DTCS->DTCS",
"Tone->DTCS", "DTCS->Tone",
"->Tone", "->DTCS", "DTCS->"]
return rf
def sync_in(self):
try:
self._mmap = uvf1_download(self)
except errors.RadioError:
raise
except Exception, e:
raise errors.RadioError("Failed to communicate with radio: %s" % e)
self.process_mmap()
def sync_out(self):
try:
uvf1_upload(self)
except errors.RadioError:
raise
except Exception, e:
raise errors.RadioError("Failed to communicate with radio: %s" % e)
@classmethod
def match_model(cls, filedata, filename):
# TYT TH-UVF1 original
if filedata.startswith("\x13\x60\x17\x40\x40\x00\x48\x00" +
"\x35\x00\x39\x00\x47\x00\x52\x00"):
return True
# TYT TH-UVF1 V2
elif filedata.startswith("\x14\x40\x14\x80\x43\x00\x45\x00" +
"\x13\x60\x17\x40\x40\x00\x47\x00"):
return True
else:
return False
def process_mmap(self):
self._memobj = bitwise.parse(THUV1F_MEM_FORMAT, self._mmap)
def _decode_tone(self, toneval):
pol = "N"
rawval = (toneval[1].get_bits(0xFF) << 8) | toneval[0].get_bits(0xFF)
if toneval[0].get_bits(0xFF) == 0xFF:
mode = ""
val = 0
elif toneval[1].get_bits(0xC0) == 0xC0:
mode = "DTCS"
val = int("%x" % (rawval & 0x3FFF))
pol = "R"
elif toneval[1].get_bits(0x80):
mode = "DTCS"
val = int("%x" % (rawval & 0x3FFF))
else:
mode = "Tone"
val = int(toneval) / 10.0
return mode, val, pol
def _encode_tone(self, _toneval, mode, val, pol):
toneval = 0
if mode == "Tone":
toneval = int("%i" % (val * 10), 16)
elif mode == "DTCS":
toneval = int("%i" % val, 16)
toneval |= 0x8000
if pol == "R":
toneval |= 0x4000
else:
toneval = 0xFFFF
_toneval[0].set_raw(toneval & 0xFF)
_toneval[1].set_raw((toneval >> 8) & 0xFF)
def get_raw_memory(self, number):
return repr(self._memobj.memory[number - 1])
def _is_txinh(self, _mem):
raw_tx = ""
for i in range(0, 4):
raw_tx += _mem.tx_freq[i].get_raw()
return raw_tx == "\xFF\xFF\xFF\xFF"
def get_memory(self, number):
_mem = self._memobj.memory[number - 1]
mem = chirp_common.Memory()
mem.number = number
if _mem.get_raw().startswith("\xFF\xFF\xFF\xFF"):
mem.empty = True
return mem
mem.freq = int(_mem.rx_freq) * 10
txfreq = int(_mem.tx_freq) * 10
if self._is_txinh(_mem):
mem.duplex = "off"
mem.offset = 0
elif txfreq == mem.freq:
mem.duplex = ""
elif abs(txfreq - mem.freq) > 70000000:
mem.duplex = "split"
mem.offset = txfreq
elif txfreq < mem.freq:
mem.duplex = "-"
mem.offset = mem.freq - txfreq
elif txfreq > mem.freq:
mem.duplex = "+"
mem.offset = txfreq - mem.freq
txmode, txval, txpol = self._decode_tone(_mem.tx_tone)
rxmode, rxval, rxpol = self._decode_tone(_mem.rx_tone)
chirp_common.split_tone_decode(
mem, (txmode, txval, txpol), (rxmode, rxval, rxpol))
mem.name = str(self._memobj.names[number - 1].name)
mem.name = mem.name.replace("\xFF", " ").rstrip()
mem.skip = not _mem.scan and "S" or ""
mem.mode = _mem.isnarrow and "NFM" or "FM"
mem.power = POWER_LEVELS[1 - _mem.ishighpower]
mem.extra = RadioSettingGroup("extra", "Extra Settings")
rs = RadioSetting("pttid", "PTT ID",
RadioSettingValueList(PTTID_LIST,
PTTID_LIST[_mem.pttid]))
mem.extra.append(rs)
rs = RadioSetting("vox", "VOX",
RadioSettingValueBoolean(_mem.vox))
mem.extra.append(rs)
rs = RadioSetting("bcl", "Busy Channel Lockout",
RadioSettingValueList(BCL_LIST,
BCL_LIST[_mem.bcl]))
mem.extra.append(rs)
rs = RadioSetting("scramble_code", "Scramble Code",
RadioSettingValueList(
CODES_LIST, CODES_LIST[_mem.scramble_code]))
mem.extra.append(rs)
return mem
def set_memory(self, mem):
_mem = self._memobj.memory[mem.number - 1]
if mem.empty:
_mem.set_raw("\xFF" * 16)
return
if _mem.get_raw() == ("\xFF" * 16):
LOG.debug("Initializing empty memory")
_mem.set_raw("\x00" * 16)
_mem.rx_freq = mem.freq / 10
if mem.duplex == "off":
for i in range(0, 4):
_mem.tx_freq[i].set_raw("\xFF")
elif mem.duplex == "split":
_mem.tx_freq = mem.offset / 10
elif mem.duplex == "-":
_mem.tx_freq = (mem.freq - mem.offset) / 10
elif mem.duplex == "+":
_mem.tx_freq = (mem.freq + mem.offset) / 10
else:
_mem.tx_freq = mem.freq / 10
(txmode, txval, txpol), (rxmode, rxval, rxpol) = \
chirp_common.split_tone_encode(mem)
self._encode_tone(_mem.tx_tone, txmode, txval, txpol)
self._encode_tone(_mem.rx_tone, rxmode, rxval, rxpol)
self._memobj.names[mem.number - 1].name = mem.name.ljust(7, "\xFF")
_mem.scan = mem.skip == ""
_mem.isnarrow = mem.mode == "NFM"
_mem.ishighpower = mem.power == POWER_LEVELS[0]
for element in mem.extra:
setattr(_mem, element.get_name(), element.value)
def get_settings(self):
_settings = self._memobj.settings
group = RadioSettingGroup("basic", "Basic")
top = RadioSettings(group)
group.append(
RadioSetting("led", "LED Mode",
RadioSettingValueList(LED_LIST,
LED_LIST[_settings.led])))
group.append(
RadioSetting("light", "Light Color",
RadioSettingValueList(LIGHT_LIST,
LIGHT_LIST[_settings.light])))
group.append(
RadioSetting("squelch", "Squelch Level",
RadioSettingValueInteger(0, 9, _settings.squelch)))
group.append(
RadioSetting("vox_level", "VOX Level",
RadioSettingValueList(VOX_LIST,
VOX_LIST[_settings.vox_level])))
group.append(
RadioSetting("beep", "Beep",
RadioSettingValueBoolean(_settings.beep)))
group.append(
RadioSetting("ani", "ANI",
RadioSettingValueBoolean(_settings.ani)))
group.append(
RadioSetting("dwait", "D.WAIT",
RadioSettingValueBoolean(_settings.dwait)))
group.append(
RadioSetting("tot", "Timeout Timer",
RadioSettingValueList(TOT_LIST,
TOT_LIST[_settings.tot])))
group.append(
RadioSetting("roger", "Roger Beep",
RadioSettingValueBoolean(_settings.roger)))
group.append(
RadioSetting("dw", "Dual Watch",
RadioSettingValueBoolean(_settings.dw)))
group.append(
RadioSetting("rxsave", "RX Save",
RadioSettingValueBoolean(_settings.rxsave)))
group.append(
RadioSetting("scans", "Scans",
RadioSettingValueList(SCANS_LIST,
SCANS_LIST[_settings.scans])))
group.append(
RadioSetting("autolk", "Auto Lock",
RadioSettingValueBoolean(_settings.autolk)))
group.append(
RadioSetting("voice", "Voice",
RadioSettingValueBoolean(_settings.voice)))
group.append(
RadioSetting("opnmsg", "Opening Message",
RadioSettingValueList(OPNMSG_LIST,
OPNMSG_LIST[_settings.opnmsg])))
group.append(
RadioSetting("disnm", "Display Name",
RadioSettingValueBoolean(_settings.disnm)))
def _filter(name):
LOG.debug(repr(str(name)))
return str(name).rstrip("\xFF").rstrip()
group.append(
RadioSetting("ponmsg", "Power-On Message",
RadioSettingValueString(0, 6,
_filter(_settings.ponmsg))))
return top
def set_settings(self, settings):
_settings = self._memobj.settings
for element in settings:
if not isinstance(element, RadioSetting):
self.set_settings(element)
continue
setattr(_settings, element.get_name(), element.value)
|
mach327/chirp_fork
|
chirp/drivers/thuv1f.py
|
Python
|
gpl-3.0
| 14,900 | 0 |
from django.test import TestCase
from django.core.exceptions import FieldError
from models import User, Poll, Choice
class ReverseLookupTests(TestCase):
def setUp(self):
john = User.objects.create(name="John Doe")
jim = User.objects.create(name="Jim Bo")
first_poll = Poll.objects.create(
question="What's the first question?",
creator=john
)
second_poll = Poll.objects.create(
question="What's the second question?",
creator=jim
)
new_choice = Choice.objects.create(
poll=first_poll,
related_poll=second_poll,
name="This is the answer."
)
def test_reverse_by_field(self):
u1 = User.objects.get(
poll__question__exact="What's the first question?"
)
self.assertEqual(u1.name, "John Doe")
u2 = User.objects.get(
poll__question__exact="What's the second question?"
)
self.assertEqual(u2.name, "Jim Bo")
def test_reverse_by_related_name(self):
p1 = Poll.objects.get(poll_choice__name__exact="This is the answer.")
self.assertEqual(p1.question, "What's the first question?")
p2 = Poll.objects.get(
related_choice__name__exact="This is the answer.")
self.assertEqual(p2.question, "What's the second question?")
def test_reverse_field_name_disallowed(self):
"""
If a related_name is given you can't use the field name instead
"""
self.assertRaises(FieldError, Poll.objects.get,
choice__name__exact="This is the answer")
|
mzdaniel/oh-mainline
|
vendor/packages/Django/tests/modeltests/reverse_lookup/tests.py
|
Python
|
agpl-3.0
| 1,645 | 0.001216 |
#!/usr/bin/env python
# update-dependencies-bad.py - Fails on bad.swift -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# Fails if the input file is named "bad.swift" or "crash.swift"; otherwise
# dispatches to update-dependencies.py. "crash.swift" gives an exit code
# other than 1.
#
# ----------------------------------------------------------------------------
from __future__ import print_function
import os
import shutil
import sys
assert sys.argv[1] == '-frontend'
primaryFile = sys.argv[sys.argv.index('-primary-file') + 1]
if (os.path.basename(primaryFile) == 'bad.swift' or
os.path.basename(primaryFile) == 'crash.swift'):
print("Handled", os.path.basename(primaryFile))
# Replace the dependencies file with the input file.
try:
depsFile = sys.argv[sys.argv.index(
'-emit-reference-dependencies-path') + 1]
shutil.copyfile(primaryFile, depsFile)
except ValueError:
pass
if os.path.basename(primaryFile) == 'bad.swift':
sys.exit(1)
else:
sys.exit(129)
execDir = os.path.dirname(os.path.abspath(__file__))
execfile(os.path.join(execDir, "update-dependencies.py"))
|
gottesmm/swift
|
test/Driver/Dependencies/Inputs/update-dependencies-bad.py
|
Python
|
apache-2.0
| 1,560 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myhpom', '0012_auto_20180718_1140'),
]
operations = [
migrations.AlterModelOptions(
name='staterequirement',
options={'ordering': ['-state', 'id']},
),
migrations.AlterUniqueTogether(
name='staterequirement',
unique_together=set([('state', 'text')]),
),
migrations.RemoveField(
model_name='staterequirement',
name='position',
),
]
|
ResearchSoftwareInstitute/MyHPOM
|
myhpom/migrations/0013_auto_20180720_0942.py
|
Python
|
bsd-3-clause
| 643 | 0 |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import json
import random
import sys
from multiprocessing import Process
# from autobahn.asyncio.websocket import WebSocketClientProtocol, \
# WebSocketClientFactory
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
# import trollius
class MyClientProtocol(WebSocketClientProtocol):
def __init__(self, *args, **kwargs):
self.handle = ''
self.pair_handle = ''
self.open_positions = ''
self.my_move = False
super(MyClientProtocol, self).__init__(*args, **kwargs)
def onConnect(self, response):
print("Server connected: {0}".format(response.peer))
# @trollius.coroutine
def onOpen(self):
print("WebSocket connection open.")
# start sending messages every second ..
def onMessage(self, payload, isBinary):
data = json.loads(payload)
print data
if data['action'] == 'connect':
self.handle = data['handle']
data = {}
data['action'] = 'ready'
data['handle'] = self.handle
self.sendMessage(json.dumps(data))
# yield trollius.sleep(1)
elif data['action'] == 'paired':
self.pair_handle = data['pair']
elif data['action'] == 'game-start':
if data['next_handle'] == self.handle:
self.my_move = True
else:
self.my_move = False
self.open_positions = data['valid-moves']
elif data['action'] == 'valid-moves':
if data['next_handle'] == self.handle:
self.open_positions = data['valid-moves']
self.my_move = True
elif data['action'] == 'player-move':
pass
elif data['action'] == 'game-end':
print 'My Handle: ', self.handle, 'Pair Handle: ', self.pair_handle, 'Result: ', data['result'], ' : ', data['win_handle']
# Game Over
self.my_move = False
data = {}
data['action'] = 'ready'
data['handle'] = self.handle
self.sendMessage(json.dumps(data))
# yield trollius.sleep(1)
if self.my_move:
# select a piece to move
self.my_move = False
data = {}
data['action'] = 'player-move'
data['handle'] = self.handle
data['move'] = random.choice(self.open_positions.split(';'))
self.sendMessage(json.dumps(data))
# yield trollius.sleep(1)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
def main_async(worker_numb):
print sys.argv
if len(sys.argv) > 1 and sys.argv[1] == 'local':
ws_host = '127.0.0.1'
ws_port = 8001
else:
ws_host = "websocket-ha-test.ovlizj.0001.usw1.cache.amazonaws.com"
ws_port = 80
# + ':' + str(ws_port) +
ws_host_url = 'ws://' + ws_host + ':' + str(ws_port) + '/tic-tac-toe/'
factory = WebSocketClientFactory(ws_host_url, debug=False)
factory.protocol = MyClientProtocol
loop = trollius.get_event_loop()
coro = loop.create_connection(factory, ws_host, ws_port)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
def main(worker_numb):
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
print sys.argv
if len(sys.argv) > 1 and sys.argv[1] == 'local':
ws_host = '127.0.0.1'
ws_port = 8001
else:
ws_host = "websocket-ha-test.ovlizj.0001.usw1.cache.amazonaws.com"
ws_port = 80
# + ':' + str(ws_port) +
ws_host_url = 'ws://' + ws_host + ':' + str(ws_port) + '/tic-tac-toe/'
factory = WebSocketClientFactory(ws_host_url, debug=False)
factory.protocol = MyClientProtocol
reactor.connectTCP(ws_host, ws_port, factory)
reactor.run()
if __name__ == '__main__':
# Setup a list of processes that we want to run
processes = [Process(target=main, args=(x,)) for x in range(4)]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
sampathweb/game-server
|
pybot_game_autobahn.py
|
Python
|
mit
| 5,481 | 0.000182 |
# Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
# This script generates the packer.c file from the gl_header.parsed file.
import sys, string, re
import apiutil
def WriteData( offset, arg_type, arg_name, is_swapped ):
"""Return a string to write a variable to the packing buffer."""
retval = 9
if apiutil.IsPointer(arg_type):
retval = "\tWRITE_NETWORK_POINTER( %d, (void *) %s );" % (offset, arg_name )
else:
if is_swapped:
if arg_type == "GLfloat" or arg_type == "GLclampf":
retval = "\tWRITE_DATA( %d, GLuint, SWAPFLOAT(%s) );" % (offset, arg_name)
elif arg_type == "GLdouble" or arg_type == "GLclampd":
retval = "\tWRITE_SWAPPED_DOUBLE( %d, %s );" % (offset, arg_name)
elif apiutil.sizeof(arg_type) == 1:
retval = "\tWRITE_DATA( %d, %s, %s );" % (offset, arg_type, arg_name)
elif apiutil.sizeof(arg_type) == 2:
retval = "\tWRITE_DATA( %d, %s, SWAP16(%s) );" % (offset, arg_type, arg_name)
elif apiutil.sizeof(arg_type) == 4:
retval = "\tWRITE_DATA( %d, %s, SWAP32(%s) );" % (offset, arg_type, arg_name)
else:
if arg_type == "GLdouble" or arg_type == "GLclampd":
retval = "\tWRITE_DOUBLE( %d, %s );" % (offset, arg_name)
else:
retval = "\tWRITE_DATA( %d, %s, %s );" % (offset, arg_type, arg_name)
if retval == 9:
print >>sys.stderr, "no retval for %s %s" % (arg_name, arg_type)
assert 0
return retval
def UpdateCurrentPointer( func_name ):
m = re.search( r"^(Color|Normal)([1234])(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
k = m.group(1)
name = '%s%s' % (k[:1].lower(),k[1:])
type = m.group(3) + m.group(2)
print "\tpc->current.c.%s.%s = data_ptr;" % (name,type)
return
m = re.search( r"^(SecondaryColor)(3)(ub|b|us|s|ui|i|f|d)EXT$", func_name )
if m :
k = m.group(1)
name = 'secondaryColor'
type = m.group(3) + m.group(2)
print "\tpc->current.c.%s.%s = data_ptr;" % (name,type)
return
m = re.search( r"^(TexCoord)([1234])(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
k = m.group(1)
name = 'texCoord'
type = m.group(3) + m.group(2)
print "\tpc->current.c.%s.%s[0] = data_ptr;" % (name,type)
return
m = re.search( r"^(MultiTexCoord)([1234])(ub|b|us|s|ui|i|f|d)ARB$", func_name )
if m :
k = m.group(1)
name = 'texCoord'
type = m.group(3) + m.group(2)
print "\tpc->current.c.%s.%s[texture-GL_TEXTURE0_ARB] = data_ptr + 4;" % (name,type)
return
m = re.match( r"^(Index)(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
k = m.group(1)
name = 'index'
type = m.group(2) + "1"
print "\tpc->current.c.%s.%s = data_ptr;" % (name,type)
return
m = re.match( r"^(EdgeFlag)$", func_name )
if m :
k = m.group(1)
name = 'edgeFlag'
type = "l1"
print "\tpc->current.c.%s.%s = data_ptr;" % (name,type)
return
m = re.match( r"^(FogCoord)(f|d)EXT$", func_name )
if m :
k = m.group(1)
name = 'fogCoord'
type = m.group(2) + "1"
print "\tpc->current.c.%s.%s = data_ptr;" % (name,type)
return
m = re.search( r"^(VertexAttrib)([1234])N?(ub|b|s|f|d)(NV|ARB)$", func_name )
if m :
k = m.group(1)
name = 'vertexAttrib'
type = m.group(3) + m.group(2)
# Add 12 to skip the packet length, opcode and index fields
print "\tpc->current.c.%s.%s[index] = data_ptr + 12;" % (name,type)
if m.group(4) == "ARB" or m.group(4) == "NV":
print "\tpc->current.attribsUsedMask |= (1 << index);"
return
def PrintFunc( func_name, params, is_swapped, can_have_pointers ):
"""Emit a packer function."""
if is_swapped:
print 'void PACK_APIENTRY crPack%sSWAP( %s )' % (func_name, apiutil.MakeDeclarationString(params))
else:
print 'void PACK_APIENTRY crPack%s( %s )' % (func_name, apiutil.MakeDeclarationString(params))
print '{'
print '\tCR_GET_PACKER_CONTEXT(pc);'
# Save original function name
orig_func_name = func_name
# Convert to a non-vector version of the function if possible
func_name = apiutil.NonVectorFunction( func_name )
if not func_name:
func_name = orig_func_name
# Check if there are any pointer parameters.
# That's usually a problem so we'll emit an error function.
nonVecParams = apiutil.Parameters(func_name)
bail_out = 0
for (name, type, vecSize) in nonVecParams:
if apiutil.IsPointer(type) and vecSize == 0 and not can_have_pointers:
bail_out = 1
if bail_out:
for (name, type, vecSize) in nonVecParams:
print '\t(void)%s;' % (name)
print '\tcrError ( "%s needs to be special cased %d %d!");' % (func_name, vecSize, can_have_pointers)
print '\t(void) pc;'
print '}'
# XXX we should really abort here
return
if "extpack" in apiutil.ChromiumProps(func_name):
is_extended = 1
else:
is_extended = 0
print "\tunsigned char *data_ptr;"
print '\t(void) pc;'
#if func_name == "Enable" or func_name == "Disable":
# print "\tCRASSERT(!pc->buffer.geometry_only); /* sanity check */"
for index in range(0,len(params)):
(name, type, vecSize) = params[index]
if vecSize>0 and func_name!=orig_func_name:
print " if (!%s) {" % name
# Know the reason for this one, so avoid the spam.
if orig_func_name != "SecondaryColor3fvEXT":
print " crDebug(\"App passed NULL as %s for %s\");" % (name, orig_func_name)
print " return;"
print " }"
packet_length = apiutil.PacketLength(nonVecParams)
if packet_length == 0 and not is_extended:
print "\tCR_GET_BUFFERED_POINTER_NO_ARGS( pc );"
elif func_name[:9] == "Translate" or func_name[:5] == "Color":
# XXX WTF is the purpose of this?
if is_extended:
packet_length += 8
print "\tCR_GET_BUFFERED_POINTER_NO_BEGINEND_FLUSH( pc, %d, GL_TRUE );" % packet_length
else:
if is_extended:
packet_length += 8
print "\tCR_GET_BUFFERED_POINTER( pc, %d );" % packet_length
UpdateCurrentPointer( func_name )
if is_extended:
counter = 8
print WriteData( 0, 'GLint', packet_length, is_swapped )
print WriteData( 4, 'GLenum', apiutil.ExtendedOpcodeName( func_name ), is_swapped )
else:
counter = 0
# Now emit the WRITE_() macros for all parameters
for index in range(0,len(params)):
(name, type, vecSize) = params[index]
# if we're converting a vector-valued function to a non-vector func:
if vecSize > 0 and func_name != orig_func_name:
ptrType = apiutil.PointerType(type)
for i in range(0, vecSize):
print WriteData( counter + i * apiutil.sizeof(ptrType),
ptrType, "%s[%d]" % (name, i), is_swapped )
# XXX increment counter here?
else:
print WriteData( counter, type, name, is_swapped )
if apiutil.IsPointer(type):
counter += apiutil.PointerSize()
else:
counter += apiutil.sizeof(type)
# finish up
if is_extended:
print "\tWRITE_OPCODE( pc, CR_EXTEND_OPCODE );"
else:
print "\tWRITE_OPCODE( pc, %s );" % apiutil.OpcodeName( func_name )
print '\tCR_UNLOCK_PACKER_CONTEXT(pc);'
print '}\n'
apiutil.CopyrightC()
print """
/* DO NOT EDIT - THIS FILE GENERATED BY THE packer.py SCRIPT */
/* For each of the OpenGL functions we have a packer function which
* packs the function's opcode and arguments into a buffer.
*/
#include "packer.h"
#include "cr_opcodes.h"
"""
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in keys:
if apiutil.FindSpecial( "packer", func_name ):
continue
if not apiutil.HasPackOpcode(func_name):
continue
pointers_ok = 0
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
if return_type != 'void':
# Yet another gross hack for glGetString
if string.find( return_type, '*' ) == -1:
return_type = return_type + " *"
params.append(("return_value", return_type, 0))
if "get" in apiutil.Properties(func_name):
pointers_ok = 1
params.append(("writeback", "int *", 0))
if func_name == 'Writeback':
pointers_ok = 1
PrintFunc( func_name, params, 0, pointers_ok )
PrintFunc( func_name, params, 1, pointers_ok )
|
dezelin/vbox-haiku
|
src/VBox/GuestHost/OpenGL/packer/packer.py
|
Python
|
gpl-2.0
| 8,980 | 0.010245 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_byproduct
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
diogocs1/comps
|
web/addons/mrp_byproduct/__init__.py
|
Python
|
apache-2.0
| 1,074 | 0.001862 |
"""Python interface to GenoLogics LIMS via its REST API.
LIMS interface.
Per Kraulis, Science for Life Laboratory, Stockholm, Sweden.
Copyright (C) 2012 Per Kraulis
"""
__all__ = ['Lab', 'Researcher', 'Project', 'Sample',
'Containertype', 'Container', 'Processtype', 'Process',
'Artifact', 'Lims']
import os
import re
from io import BytesIO
import requests
# python 2.7, 3+ compatibility
from sys import version_info
if version_info[0] == 2:
from urlparse import urljoin
from urllib import urlencode
else:
from urllib.parse import urljoin
from urllib.parse import urlencode
from .entities import *
# Python 2.6 support work-arounds
# - Exception ElementTree.ParseError does not exist
# - ElementTree.ElementTree.write does not take arg. xml_declaration
if version_info[:2] < (2,7):
from xml.parsers import expat
ElementTree.ParseError = expat.ExpatError
p26_write = ElementTree.ElementTree.write
def write_with_xml_declaration(self, file, encoding, xml_declaration):
assert xml_declaration is True # Support our use case only
file.write("<?xml version='1.0' encoding='utf-8'?>\n")
p26_write(self, file, encoding=encoding)
ElementTree.ElementTree.write = write_with_xml_declaration
TIMEOUT = 16
class Lims(object):
"LIMS interface through which all entity instances are retrieved."
VERSION = 'v2'
def __init__(self, baseuri, username, password, version=VERSION):
"""baseuri: Base URI for the GenoLogics server, excluding
the 'api' or version parts!
For example: https://genologics.scilifelab.se:8443/
username: The account name of the user to login as.
password: The password for the user account to login as.
version: The optional LIMS API version, by default 'v2'
"""
self.baseuri = baseuri.rstrip('/') + '/'
self.username = username
self.password = password
self.VERSION = version
self.cache = dict()
# For optimization purposes, enables requests to persist connections
self.request_session = requests.Session()
# The connection pool has a default size of 10
self.adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
self.request_session.mount('http://', self.adapter)
def get_uri(self, *segments, **query):
"Return the full URI given the path segments and optional query."
segments = ['api', self.VERSION] + list(segments)
url = urljoin(self.baseuri, '/'.join(segments))
if query:
url += '?' + urlencode(query)
return url
def get(self, uri, params=dict()):
"GET data from the URI. Return the response XML as an ElementTree."
try:
r = self.request_session.get(uri, params=params,
auth=(self.username, self.password),
headers=dict(accept='application/xml'),
timeout=TIMEOUT)
except requests.exceptions.Timeout as e:
raise type(e)("{0}, Error trying to reach {1}".format(str(e), uri))
else:
return self.parse_response(r)
def get_file_contents(self, id=None, uri=None):
"""Returns the contents of the file of <ID> or <uri>"""
if id:
segments = ['api', self.VERSION, 'files', id, 'download']
elif uri:
segments = [uri, 'download']
else:
raise ValueError("id or uri required")
url = urljoin(self.baseuri, '/'.join(segments))
r = self.request_session.get(url, auth=(self.username, self.password), timeout=TIMEOUT, stream=True)
self.validate_response(r)
if 'text' in r.headers['Content-Type']:
return r.text
else:
return r.raw
def upload_new_file(self, entity, file_to_upload):
"""Upload a file and attach it to the provided entity."""
file_to_upload = os.path.abspath(file_to_upload)
if not os.path.isfile(file_to_upload):
raise IOError("{} not found".format(file_to_upload))
# Request the storage space on glsstorage
# Create the xml to describe the file
root = ElementTree.Element(nsmap('file:file'))
s = ElementTree.SubElement(root, 'attached-to')
s.text = entity.uri
s = ElementTree.SubElement(root, 'original-location')
s.text = file_to_upload
root = self.post(
uri=self.get_uri('glsstorage'),
data=self.tostring(ElementTree.ElementTree(root))
)
# Create the file object
root = self.post(
uri=self.get_uri('files'),
data=self.tostring(ElementTree.ElementTree(root))
)
file = File(self, uri=root.attrib['uri'])
# Actually upload the file
uri = self.get_uri('files', file.id, 'upload')
r = requests.post(uri, files={'file': (file_to_upload, open(file_to_upload, 'rb'))},
auth=(self.username, self.password))
self.validate_response(r)
return file
def put(self, uri, data, params=dict()):
"""PUT the serialized XML to the given URI.
Return the response XML as an ElementTree.
"""
r = requests.put(uri, data=data, params=params,
auth=(self.username, self.password),
headers={'content-type': 'application/xml',
'accept': 'application/xml'})
return self.parse_response(r)
def post(self, uri, data, params=dict()):
"""POST the serialized XML to the given URI.
Return the response XML as an ElementTree.
"""
r = requests.post(uri, data=data, params=params,
auth=(self.username, self.password),
headers={'content-type': 'application/xml',
'accept': 'application/xml'})
return self.parse_response(r, accept_status_codes=[200, 201, 202])
def delete(self, uri, params=dict()):
"""sends a DELETE to the given URI.
Return the response XML as an ElementTree.
"""
r = requests.delete(uri, params=params,
auth=(self.username, self.password),
headers={'content-type': 'application/xml',
'accept': 'application/xml'})
return self.validate_response(r, accept_status_codes=[204])
def check_version(self):
"""Raise ValueError if the version for this interface
does not match any of the versions given for the API.
"""
uri = urljoin(self.baseuri, 'api')
r = requests.get(uri, auth=(self.username, self.password))
root = self.parse_response(r)
tag = nsmap('ver:versions')
assert tag == root.tag
for node in root.findall('version'):
if node.attrib['major'] == self.VERSION: return
raise ValueError('version mismatch')
def validate_response(self, response, accept_status_codes=[200]):
"""Parse the XML returned in the response.
Raise an HTTP error if the response status is not one of the
specified accepted status codes.
"""
if response.status_code not in accept_status_codes:
try:
root = ElementTree.fromstring(response.content)
node = root.find('message')
if node is None:
response.raise_for_status()
message = "%s" % (response.status_code)
else:
message = "%s: %s" % (response.status_code, node.text)
node = root.find('suggested-actions')
if node is not None:
message += ' ' + node.text
except ElementTree.ParseError: # some error messages might not follow the xml standard
message = response.content
raise requests.exceptions.HTTPError(message)
return True
def parse_response(self, response, accept_status_codes=[200]):
"""Parse the XML returned in the response.
Raise an HTTP error if the response status is not 200.
"""
self.validate_response(response, accept_status_codes)
root = ElementTree.fromstring(response.content)
return root
def get_udfs(self, name=None, attach_to_name=None, attach_to_category=None, start_index=None, add_info=False):
"""Get a list of udfs, filtered by keyword arguments.
name: name of udf
attach_to_name: item in the system, to wich the udf is attached, such as
Sample, Project, Container, or the name of a process.
attach_to_category: If 'attach_to_name' is the name of a process, such as 'CaliperGX QC (DNA)',
then you need to set attach_to_category='ProcessType'. Must not be provided otherwise.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
attach_to_name=attach_to_name,
attach_to_category=attach_to_category,
start_index=start_index)
return self._get_instances(Udfconfig, add_info=add_info, params=params)
def get_reagent_types(self, name=None, start_index=None):
"""Get a list of reqgent types, filtered by keyword arguments.
name: reagent type name, or list of names.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
start_index=start_index)
return self._get_instances(ReagentType, params=params)
def get_labs(self, name=None, last_modified=None,
udf=dict(), udtname=None, udt=dict(), start_index=None, add_info=False):
"""Get a list of labs, filtered by keyword arguments.
name: Lab name, or list of names.
last_modified: Since the given ISO format datetime.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
return self._get_instances(Lab, add_info=add_info, params=params)
def get_researchers(self, firstname=None, lastname=None, username=None,
last_modified=None,
udf=dict(), udtname=None, udt=dict(), start_index=None,
add_info=False):
"""Get a list of researchers, filtered by keyword arguments.
firstname: Researcher first name, or list of names.
lastname: Researcher last name, or list of names.
username: Researcher account name, or list of names.
last_modified: Since the given ISO format datetime.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(firstname=firstname,
lastname=lastname,
username=username,
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
return self._get_instances(Researcher, add_info=add_info, params=params)
def get_projects(self, name=None, open_date=None, last_modified=None,
udf=dict(), udtname=None, udt=dict(), start_index=None,
add_info=False):
"""Get a list of projects, filtered by keyword arguments.
name: Project name, or list of names.
open_date: Since the given ISO format date.
last_modified: Since the given ISO format datetime.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
open_date=open_date,
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
return self._get_instances(Project, add_info=add_info, params=params)
def get_sample_number(self, name=None, projectname=None, projectlimsid=None,
udf=dict(), udtname=None, udt=dict(), start_index=None):
"""Gets the number of samples matching the query without fetching every
sample, so it should be faster than len(get_samples()"""
params = self._get_params(name=name,
projectname=projectname,
projectlimsid=projectlimsid,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
root = self.get(self.get_uri(Sample._URI), params=params)
total = 0
while params.get('start-index') is None: # Loop over all pages.
total += len(root.findall("sample"))
node = root.find('next-page')
if node is None: break
root = self.get(node.attrib['uri'], params=params)
return total
def get_samples(self, name=None, projectname=None, projectlimsid=None,
udf=dict(), udtname=None, udt=dict(), start_index=None):
"""Get a list of samples, filtered by keyword arguments.
name: Sample name, or list of names.
projectlimsid: Samples for the project of the given LIMS id.
projectname: Samples for the project of the name.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
projectname=projectname,
projectlimsid=projectlimsid,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
return self._get_instances(Sample, params=params)
def get_artifacts(self, name=None, type=None, process_type=None,
artifact_flag_name=None, working_flag=None, qc_flag=None,
sample_name=None, samplelimsid=None, artifactgroup=None, containername=None,
containerlimsid=None, reagent_label=None,
udf=dict(), udtname=None, udt=dict(), start_index=None,
resolve=False):
"""Get a list of artifacts, filtered by keyword arguments.
name: Artifact name, or list of names.
type: Artifact type, or list of types.
process_type: Produced by the process type, or list of types.
artifact_flag_name: Tagged with the genealogy flag, or list of flags.
working_flag: Having the given working flag; boolean.
qc_flag: Having the given QC flag: UNKNOWN, PASSED, FAILED.
sample_name: Related to the given sample name.
samplelimsid: Related to the given sample id.
artifactgroup: Belonging to the artifact group (experiment in client).
containername: Residing in given container, by name, or list.
containerlimsid: Residing in given container, by LIMS id, or list.
reagent_label: having attached reagent labels.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
type=type,
process_type=process_type,
artifact_flag_name=artifact_flag_name,
working_flag=working_flag,
qc_flag=qc_flag,
sample_name=sample_name,
samplelimsid=samplelimsid,
artifactgroup=artifactgroup,
containername=containername,
containerlimsid=containerlimsid,
reagent_label=reagent_label,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
if resolve:
return self.get_batch(self._get_instances(Artifact, params=params))
else:
return self._get_instances(Artifact, params=params)
def get_container_types(self, name=None, start_index=None):
"""Get a list of container types, filtered by keyword arguments.
name: Container Type name.
start-index: Page to retrieve, all if None."""
params = self._get_params(name=name, start_index=start_index)
return self._get_instances(Containertype, params=params)
def get_containers(self, name=None, type=None,
state=None, last_modified=None,
udf=dict(), udtname=None, udt=dict(), start_index=None,
add_info=False):
"""Get a list of containers, filtered by keyword arguments.
name: Containers name, or list of names.
type: Container type, or list of types.
state: Container state: Empty, Populated, Discarded, Reagent-Only.
last_modified: Since the given ISO format datetime.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
type=type,
state=state,
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
return self._get_instances(Container, add_info=add_info, params=params)
def get_processes(self, last_modified=None, type=None,
inputartifactlimsid=None,
techfirstname=None, techlastname=None, projectname=None,
udf=dict(), udtname=None, udt=dict(), start_index=None):
"""Get a list of processes, filtered by keyword arguments.
last_modified: Since the given ISO format datetime.
type: Process type, or list of types.
inputartifactlimsid: Input artifact LIMS id, or list of.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
techfirstname: First name of researcher, or list of.
techlastname: Last name of researcher, or list of.
projectname: Name of project, or list of.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(last_modified=last_modified,
type=type,
inputartifactlimsid=inputartifactlimsid,
techfirstname=techfirstname,
techlastname=techlastname,
projectname=projectname,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
return self._get_instances(Process, params=params)
def get_automations(self, name=None, add_info=False):
"""Get the list of configured automations on the system """
params = self._get_params(name=name)
return self._get_instances(Automation, add_info=add_info, params=params)
def get_workflows(self, name=None, add_info=False):
"""Get the list of existing workflows on the system """
params = self._get_params(name=name)
return self._get_instances(Workflow, add_info=add_info, params=params)
def get_process_types(self, displayname=None, add_info=False):
"""Get a list of process types with the specified name."""
params = self._get_params(displayname=displayname)
return self._get_instances(Processtype, add_info=add_info, params=params)
def get_reagent_types(self, name=None, add_info=False):
params = self._get_params(name=name)
return self._get_instances(ReagentType, add_info=add_info, params=params)
def get_protocols(self, name=None, add_info=False):
"""Get the list of existing protocols on the system """
params = self._get_params(name=name)
return self._get_instances(Protocol, add_info=add_info, params=params)
def get_reagent_kits(self, name=None, start_index=None, add_info=False):
"""Get a list of reagent kits, filtered by keyword arguments.
name: reagent kit name, or list of names.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
start_index=start_index)
return self._get_instances(ReagentKit, add_info=add_info, params=params)
def get_reagent_lots(self, name=None, kitname=None, number=None,
start_index=None):
"""Get a list of reagent lots, filtered by keyword arguments.
name: reagent kit name, or list of names.
kitname: name of the kit this lots belong to
number: lot number or list of lot number
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name, kitname=kitname, number=number,
start_index=start_index)
return self._get_instances(ReagentLot, params=params)
def get_instruments(self, name=None):
"""Returns a list of Instruments, can be filtered by name"""
params = self._get_params(name=name)
return self._get_instances(Instrument, params=params)
def _get_params(self, **kwargs):
"Convert keyword arguments to a kwargs dictionary."
result = dict()
for key, value in kwargs.items():
if value is None: continue
result[key.replace('_', '-')] = value
return result
def _get_params_udf(self, udf=dict(), udtname=None, udt=dict()):
"Convert UDF-ish arguments to a params dictionary."
result = dict()
for key, value in udf.items():
result["udf.%s" % key] = value
if udtname is not None:
result['udt.name'] = udtname
for key, value in udt.items():
result["udt.%s" % key] = value
return result
def _get_instances(self, klass, add_info=None, params=dict()):
results = []
additionnal_info_dicts = []
tag = klass._TAG
if tag is None:
tag = klass.__name__.lower()
root = self.get(self.get_uri(klass._URI), params=params)
while params.get('start-index') is None: # Loop over all pages.
for node in root.findall(tag):
results.append(klass(self, uri=node.attrib['uri']))
info_dict = {}
for attrib_key in node.attrib:
info_dict[attrib_key] = node.attrib['uri']
for subnode in node:
info_dict[subnode.tag] = subnode.text
additionnal_info_dicts.append(info_dict)
node = root.find('next-page')
if node is None: break
root = self.get(node.attrib['uri'], params=params)
if add_info:
return results, additionnal_info_dicts
else:
return results
def get_batch(self, instances, force=False):
"""Get the content of a set of instances using the efficient batch call.
Returns the list of requested instances in arbitrary order, with duplicates removed
(duplicates=entities occurring more than once in the instances argument).
For Artifacts it is possible to have multiple instances with the same LIMSID but
different URI, differing by a query parameter ?state=XX. If state is not
given for an input URI, a state is added in the data returned by the batch
API. In this case, the URI of the Entity object is not updated by this function
(this is similar to how Entity.get() works). This may help with caching.
The batch request API call collapses all requested Artifacts with different
state into a single result with state equal to the state of the Artifact
occurring at the last position in the list.
"""
if not instances:
return []
root = ElementTree.Element(nsmap('ri:links'))
needs_request = False
instance_map = {}
for instance in instances:
instance_map[instance.id] = instance
if force or instance.root is None:
ElementTree.SubElement(root, 'link', dict(uri=instance.uri,
rel=instance.__class__._URI))
needs_request = True
if needs_request:
uri = self.get_uri(instance.__class__._URI, 'batch/retrieve')
data = self.tostring(ElementTree.ElementTree(root))
root = self.post(uri, data)
for node in root.getchildren():
instance = instance_map[node.attrib['limsid']]
instance.root = node
return list(instance_map.values())
def put_batch(self, instances):
"""Update multiple instances using a single batch request."""
if not instances:
return
root = None # XML root element for batch request
for instance in instances:
if root is None:
klass = instance.__class__
# Tag is art:details, con:details, etc.
example_root = instance.root
ns_uri = re.match("{(.*)}.*", example_root.tag).group(1)
root = ElementTree.Element("{%s}details" % (ns_uri))
root.append(instance.root)
uri = self.get_uri(klass._URI, 'batch/update')
data = self.tostring(ElementTree.ElementTree(root))
root = self.post(uri, data)
def route_artifacts(self, artifact_list, workflow_uri=None, stage_uri=None, unassign=False):
root = ElementTree.Element(nsmap('rt:routing'))
if unassign:
s = ElementTree.SubElement(root, 'unassign')
else:
s = ElementTree.SubElement(root, 'assign')
if workflow_uri:
s.set('workflow-uri', workflow_uri)
if stage_uri:
s.set('stage-uri', stage_uri)
for artifact in artifact_list:
a = ElementTree.SubElement(s, 'artifact')
a.set('uri', artifact.uri)
uri = self.get_uri('route', 'artifacts')
r = requests.post(uri, data=self.tostring(ElementTree.ElementTree(root)),
auth=(self.username, self.password),
headers={'content-type': 'application/xml',
'accept': 'application/xml'})
self.validate_response(r)
def tostring(self, etree):
"Return the ElementTree contents as a UTF-8 encoded XML string."
outfile = BytesIO()
self.write(outfile, etree)
return outfile.getvalue()
def write(self, outfile, etree):
"Write the ElementTree contents as UTF-8 encoded XML to the open file."
etree.write(outfile, encoding='utf-8', xml_declaration=True)
def create_container(self, container_type, name=None):
"""Create a new container of type container_type and returns it
Akin to Container.create(lims type=container_type, name=name)"""
el = ElementTree.Element(nsmap('con:container'))
if name is not None:
nm = ElementTree.SubElement(el, 'name')
nm.text = name
ty = ElementTree.SubElement(el, 'type', attrib={'uri':container_type.uri, 'name':container_type.name})
ret_el = self.post(uri=self.get_uri('containers'), data=ElementTree.tostring(el))
ret_con = Container(self, uri=ret_el.attrib['uri'])
ret_con.root = ret_el
return ret_con
|
SciLifeLab/genologics
|
genologics/lims.py
|
Python
|
mit
| 29,765 | 0.001478 |
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class FileStoreTo(SimpleHoster):
__name__ = "FileStoreTo"
__type__ = "hoster"
__version__ = "0.07"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?filestore\.to/\?d=(?P<ID>\w+)'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium", "bool", "Use premium account if available", True)]
__description__ = """FileStore.to hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com"),
("stickell", "l.stickell@yahoo.it")]
INFO_PATTERN = r'File: <span.*?>(?P<N>.+?)<.*>Size: (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r'>Download-Datei wurde nicht gefunden<'
TEMP_OFFLINE_PATTERN = r'>Der Download ist nicht bereit !<'
def setup(self):
self.resume_download = True
self.multiDL = True
def handle_free(self, pyfile):
self.wait(10)
self.link = self.load("http://filestore.to/ajax/download.php",
get={'D': re.search(r'"D=(\w+)', self.html).group(1)})
getInfo = create_getInfo(FileStoreTo)
|
fzimmermann89/pyload
|
module/plugins/hoster/FileStoreTo.py
|
Python
|
gpl-3.0
| 1,290 | 0.013953 |
###############################################################################
# Name: fileutil.py #
# Purpose: File Management Utilities. #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2009 Cody Precord <staff@editra.org> #
# Licence: wxWindows Licence #
###############################################################################
"""
Editra Business Model Library: File Utilities
Utility functions for managing and working with files.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: fileutil.py 70034 2011-12-17 20:11:27Z CJP $"
__revision__ = "$Revision: 70034 $"
__all__ = [ 'GetAbsPath', 'GetFileExtension', 'GetFileModTime', 'GetFileName',
'GetFileSize', 'GetPathName', 'GetPathFromURI', 'GetUniqueName',
'IsLink', 'MakeNewFile', 'MakeNewFolder', 'PathExists',
'ResolveRealPath', 'IsExecutable', 'Which', 'ComparePaths',
'AddFileExtension', 'GetDirectoryObject', 'File', 'Directory',
'GetFileManagerCmd', 'OpenWithFileManager']
#-----------------------------------------------------------------------------#
# Imports
import wx
import os
import platform
import urllib2
import stat
import subprocess
UNIX = WIN = False
if platform.system().lower() in ['windows', 'microsoft']:
WIN = True
try:
# Check for if win32 extensions are available
import win32com.client as win32client
except ImportError:
win32client = None
try:
# Check for win32api
import win32api
except ImportError:
win32api = None
else:
UNIX = True
#-----------------------------------------------------------------------------#
def uri2path(func):
"""Decorator method to convert path arguments that may be uri's to
real file system paths. Arg 0 must be a file path or uri.
"""
def WrapURI(*args, **kwargs):
args = list(args)
args[0] = GetPathFromURI(args[0])
return func(*args, **kwargs)
WrapURI.__name__ = func.__name__
WrapURI.__doc__ = func.__doc__
return WrapURI
#-----------------------------------------------------------------------------#
def AddFileExtension(path, ext):
"""Add a file extension to a path if it doesn't already exist
@param path: file path
@param ext: file extension
"""
assert isinstance(ext, basestring)
if not ext.startswith('.'):
ext = '.' + ext
if not path.endswith(ext):
path = path + ext
return path
def ComparePaths(path1, path2):
"""Determine whether the two given paths are equivalent
@param path1: unicode
@param path2: unicode
@return: bool
"""
path1 = GetAbsPath(path1)
path2 = GetAbsPath(path2)
if WIN:
path1 = path1.lower()
path2 = path2.lower()
return path1 == path2
def CopyFile(orig, dest):
"""Copy the given file to the destination
@param orig: file to copy (full path)
@param dest: where to copy to
"""
raise NotImplementedError
@uri2path
def GetAbsPath(path):
"""Get the absolute path of a file of a file.
@param path: string
@return: string
@note: on windows if win32api is available short notation paths will be
converted to the proper long name.
"""
rpath = os.path.abspath(path)
# Resolve short path notation on Windows when possible
if WIN and win32api is not None and u"~" in rpath:
try:
rpath = win32api.GetLongPathNameW(rpath)
except Exception:
# Ignore errors from win32api calls
pass
return rpath
def GetFileExtension(file_str):
"""Gets last atom at end of string as extension if
no extension whole string is returned
@param file_str: path or file name to get extension from
"""
return file_str.split('.')[-1]
def GetFileModTime(file_name):
"""Returns the time that the given file was last modified on
@param file_name: path of file to get mtime of
"""
try:
mod_time = os.path.getmtime(file_name)
except (OSError, EnvironmentError):
mod_time = 0
return mod_time
def GetFileName(path):
"""Gets last atom on end of string as filename
@param path: full path to get filename from
"""
return os.path.split(path)[-1]
@uri2path
def GetFileSize(path):
"""Get the size of the file at a given path
@param path: Path to file
@return: long
"""
try:
return os.stat(path)[stat.ST_SIZE]
except:
return 0
def GetPathFromURI(path):
"""Get a local path from a file:// uri
@return: normalized path
"""
if path.startswith(u"file:"):
path = path.replace(u"file:", u"")
path = path.lstrip(u"/")
if platform.system().lower() in ('windows', 'microsoft'):
path = path.replace(u"/", u"\\")
if len(path) >= 2 and path[1] != u':':
# A valid windows file uri should start with the drive
# letter. If not make the assumption that it should be
# the C: drive.
path = u"C:\\\\" + path
else:
path = u"/" + path
path = urllib2.unquote(path)
return path
@uri2path
def GetPathName(path):
"""Gets the path minus filename
@param path: full path to get base of
"""
return os.path.split(path)[0]
@uri2path
def IsLink(path):
"""Is the file a link
@return: bool
"""
if WIN:
return path.endswith(".lnk") or os.path.islink(path)
else:
return os.path.islink(path)
@uri2path
def PathExists(path):
"""Does the path exist.
@param path: file path or uri
@return: bool
"""
return os.path.exists(path)
@uri2path
def IsExecutable(path):
"""Is the file at the given path an executable file
@param path: file path
@return: bool
"""
return os.path.isfile(path) and os.access(path, os.X_OK)
@uri2path
def ResolveRealPath(link):
"""Return the real path of the link file
@param link: path of link file
@return: string
"""
assert IsLink(link), "ResolveRealPath expects a link file!"
realpath = link
if WIN and win32client is not None:
shell = win32client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(link)
realpath = shortcut.Targetpath
else:
realpath = os.path.realpath(link)
return realpath
def GetFileManagerCmd():
"""Get the file manager open command for the current os. Under linux
it will check for xdg-open, nautilus, konqueror, and Thunar, it will then
return which one it finds first or 'nautilus' it finds nothing.
@return: string
"""
if wx.Platform == '__WXMAC__':
return 'open'
elif wx.Platform == '__WXMSW__':
return 'explorer'
else:
# Check for common linux filemanagers returning first one found
# Gnome/ubuntu KDE/kubuntu xubuntu
for cmd in ('xdg-open', 'nautilus', 'konqueror', 'Thunar'):
result = os.system("which %s > /dev/null" % cmd)
if result == 0:
return cmd
else:
return 'nautilus'
def OpenWithFileManager(path):
"""Open the given path with the systems file manager
@param path: file/directory path
"""
cmd = GetFileManagerCmd()
subprocess.call([cmd, path])
def Which(program):
"""Find the path of the given executable
@param program: executable name (i.e 'python')
@return: executable path or None
"""
# Check local directory first
if IsExecutable(program):
return program
else:
# Start looking on the $PATH
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if IsExecutable(exe_file):
return exe_file
return None
def GetDirectoryObject(path, recurse=True, includedot=False):
"""Gets a L{Directory} object representing the filesystem of the
given path.
@param path: base path to list
@keyword recurse: recurse into subdirectories
@keyword includedot: include '.' files
@return: L{Directory} object instance
"""
assert os.path.isdir(path)
pjoin = os.path.join
def _BuildDir(thedir):
for fname in os.listdir(thedir.Path):
if not includedot and fname.startswith('.'):
continue
fpath = pjoin(thedir.Path, fname)
if os.path.isdir(fpath):
newobj = Directory(fpath)
if recurse:
_BuildDir(newobj)
else:
newobj = File(fpath)
thedir.Files.append(newobj)
dobj = Directory(path)
_BuildDir(dobj)
return dobj
#-----------------------------------------------------------------------------#
class File(object):
"""Basic file data structure"""
__slots__ = ('path', 'modtime')
def __init__(self, path):
super(File, self).__init__()
self.path = path
self.modtime = GetFileModTime(self.path)
Path = property(lambda self: self.path)
Name = property(lambda self: os.path.basename(self.Path))
ModTime = property(lambda self: self.modtime,
lambda self, mod: setattr(self, 'modtime', mod))
def __str__(self):
return self.Path
def __eq__(self, other):
assert isinstance(other, File)
return ComparePaths(self.Path, other.Path)
class Directory(File):
"""Basic directory data structure.
Is a container class that provides a simple in memory representation of
a file system.
"""
__slots__ = ('files',)
def __init__(self, path):
super(Directory, self).__init__(path)
self.files = list()
Files = property(lambda self: self.files)
#-----------------------------------------------------------------------------#
def GetUniqueName(path, name):
"""Make a file name that will be unique in case a file of the
same name already exists at that path.
@param path: Root path to folder of files destination
@param name: desired file name base
@return: string
"""
tmpname = os.path.join(path, name)
if os.path.exists(tmpname):
if '.' not in name:
ext = ''
fbase = name
else:
ext = '.' + name.split('.')[-1]
fbase = name[:-1 * len(ext)]
inc = len([x for x in os.listdir(path) if x.startswith(fbase)])
tmpname = os.path.join(path, "%s-%d%s" % (fbase, inc, ext))
while os.path.exists(tmpname):
inc = inc + 1
tmpname = os.path.join(path, "%s-%d%s" % (fbase, inc, ext))
return tmpname
#-----------------------------------------------------------------------------#
def MakeNewFile(path, name):
"""Make a new file at the given path with the given name.
If the file already exists, the given name will be changed to
a unique name in the form of name + -NUMBER + .extension
@param path: path to directory to create file in
@param name: desired name of file
@return: Tuple of (success?, Path of new file OR Error message)
"""
if not os.path.isdir(path):
path = os.path.dirname(path)
fname = GetUniqueName(path, name)
try:
open(fname, 'w').close()
except (IOError, OSError), msg:
return (False, str(msg))
return (True, fname)
def MakeNewFolder(path, name):
"""Make a new folder at the given path with the given name.
If the folder already exists, the given name will be changed to
a unique name in the form of name + -NUMBER.
@param path: path to create folder on
@param name: desired name for folder
@return: Tuple of (success?, new dirname OR Error message)
"""
if not os.path.isdir(path):
path = os.path.dirname(path)
folder = GetUniqueName(path, name)
try:
os.mkdir(folder)
except (OSError, IOError), msg:
return (False, str(msg))
return (True, folder)
|
iut-ibk/P8-WSC-GUI
|
3dparty/Editra/src/ebmlib/fileutil.py
|
Python
|
gpl-2.0
| 12,224 | 0.003027 |
# -*- coding: utf-8 -*-
"""
Created on 11 Apr 2014
@author: Kimon Tsitsikas
Copyright © 2013-2016 Kimon Tsitsikas and Éric Piel, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License version 2 as published by the Free
Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
import collections
from concurrent.futures._base import CancelledError, CANCELLED, FINISHED, \
RUNNING
import logging
import numpy
from odemis import model
from odemis.acq._futures import executeTask
from odemis.model import InstantaneousFuture
from odemis.util.img import Subtract
from scipy import ndimage
import threading
import time
import cv2
MAX_STEPS_NUMBER = 100 # Max steps to perform autofocus
MAX_BS_NUMBER = 1 # Maximum number of applying binary search with a smaller max_step
def _convertRBGToGrayscale(image):
"""
Quick and dirty convertion of RGB data to grayscale
image (numpy array of shape YX3)
return (numpy array of shape YX)
"""
r, g, b = image[:, :, 0], image[:, :, 1], image[:, :, 2]
gray = numpy.empty(image.shape[0:2], dtype="uint16")
gray[...] = r
gray += g
gray += b
return gray
def MeasureSEMFocus(image):
"""
Given an image, focus measure is calculated using the standard deviation of
the raw data.
image (model.DataArray): SEM image
returns (float): The focus level of the SEM image (higher is better)
"""
# Handle RGB image
if len(image.shape) == 3:
# TODO find faster/better solution
image = _convertRBGToGrayscale(image)
return ndimage.standard_deviation(image)
def MeasureOpticalFocus(image):
"""
Given an image, focus measure is calculated using the variance of Laplacian
of the raw data.
image (model.DataArray): Optical image
returns (float): The focus level of the optical image (higher is better)
"""
# Handle RGB image
if len(image.shape) == 3:
# TODO find faster/better solution
image = _convertRBGToGrayscale(image)
return cv2.Laplacian(image, cv2.CV_64F).var()
def AcquireNoBackground(ccd, dfbkg=None):
"""
Performs optical acquisition with background subtraction if possible.
Particularly used in order to eliminate the e-beam source background in the
Delphi.
ccd (model.DigitalCamera): detector from which to acquire an image
dfbkg (model.DataFlow or None): dataflow of se- or bs- detector to
start/stop the source. If None, a standard acquisition is performed (without
background subtraction)
returns (model.DataArray):
Image (with subtracted background if requested)
"""
if dfbkg is not None:
bg_image = ccd.data.get(asap=False)
dfbkg.subscribe(_discard_data)
image = ccd.data.get(asap=False)
dfbkg.unsubscribe(_discard_data)
ret_data = Subtract(image, bg_image)
return ret_data
else:
image = ccd.data.get(asap=False)
return image
def _discard_data(df, data):
"""
Does nothing, just discard the SEM data received (for spot mode)
"""
pass
def _DoAutoFocus(future, detector, emt, focus, dfbkg, good_focus):
"""
Iteratively acquires an optical image, measures its focus level and adjusts
the optical focus with respect to the focus level.
future (model.ProgressiveFuture): Progressive future provided by the wrapper
detector: model.DigitalCamera or model.Detector
emt (None or model.Emitter): In case of a SED this is the scanner used
focus (model.Actuator): The optical focus
dfbkg (model.DataFlow): dataflow of se- or bs- detector
good_focus (float): if provided, an already known good focus position to be
taken into consideration while autofocusing
returns:
(float): Focus position (m)
(float): Focus level
raises:
CancelledError if cancelled
IOError if procedure failed
"""
# TODO: dfbkg is mis-named, as it's the dataflow to use to _activate_ the
# emitter. To acquire the background, it's specifically not used.
# It does a dichotomy search on the focus level. In practice, it means it
# will start going into the direction that increase the focus with big steps
# until the focus decreases again. Then it'll bounce back and forth with
# smaller and smaller steps.
# The tricky parts are:
# * it's hard to estimate the focus level (on a random image)
# * two acquisitions at the same focus position can have (slightly) different
# focus levels (due to noise and sample degradation)
# * if the focus actuator is not precise (eg, open loop), it's hard to
# even go back to the same focus position when wanted
logging.debug("Starting Autofocus...")
try:
# use the .depthOfField on detector or emitter as maximum stepsize
avail_depths = (detector, emt)
if model.hasVA(emt, "dwellTime"):
# Hack in case of using the e-beam with a DigitalCamera detector.
# All the digital cameras have a depthOfField, which is updated based
# on the optical lens properties... but the depthOfField in this
# case depends on the e-beam lens.
avail_depths = (emt, detector)
for c in avail_depths:
if model.hasVA(c, "depthOfField"):
dof = c.depthOfField.value
break
else:
logging.debug("No depth of field info found")
dof = 1e-6 # m, not too bad value
logging.debug("Depth of field is %f", dof)
min_step = dof / 2
rng = focus.axes["z"].range
max_step = (rng[1] - rng[0]) / 2
if max_step <= 0:
raise ValueError("Unexpected focus range %s" % (rng,))
max_reached = False # True once we've passed the maximum level (ie, start bouncing)
# It's used to cache the focus level, to avoid reacquiring at the same
# position. We do it only for the 'rough' max search because for the fine
# search, the actuator and acquisition delta are likely to play a role
focus_levels = {} # focus pos (float) -> focus level (float)
best_pos = focus.position.value['z']
best_fm = 0
last_pos = None
# Pick measurement method based on the heuristics that SEM detectors
# are typically just a point (ie, shape == data depth).
# TODO: is this working as expected? Alternatively, we could check
# MD_DET_TYPE.
if len(detector.shape) > 1:
logging.debug("Using Optical method to estimate focus")
Measure = MeasureOpticalFocus
else:
logging.debug("Using SEM method to estimate focus")
Measure = MeasureSEMFocus
step_factor = 2 ** 7
if good_focus is not None:
current_pos = focus.position.value['z']
image = AcquireNoBackground(detector, dfbkg)
fm_current = Measure(image)
logging.debug("Focus level at %f is %f", current_pos, fm_current)
focus_levels[current_pos] = fm_current
focus.moveAbsSync({"z": good_focus})
image = AcquireNoBackground(detector, dfbkg)
fm_good = Measure(image)
logging.debug("Focus level at %f is %f", good_focus, fm_good)
focus_levels[good_focus] = fm_good
last_pos = good_focus
if fm_good < fm_current:
# Move back to current position if good_pos is not that good
# after all
focus.moveAbsSync({"z": current_pos})
# it also means we are pretty close
step_factor = 2 ** 4
if step_factor * min_step > max_step:
# Large steps would be too big. We can reduce step_factor and/or
# min_step. => let's take our time, and maybe find finer focus
min_step = max_step / step_factor
logging.debug("Reducing min step to %g", min_step)
# TODO: to go a bit faster, we could use synchronised acquisition on
# the detector (if it supports it)
# TODO: we could estimate the quality of the autofocus by looking at the
# standard deviation of the the focus levels (and the standard deviation
# of the focus levels measured for the same focus position)
logging.debug("Step factor used for autofocus: %g", step_factor)
step_cntr = 1
while step_factor >= 1 and step_cntr <= MAX_STEPS_NUMBER:
# TODO: update the estimated time (based on how long it takes to
# move + acquire, and how many steps are approximately left)
# Start at the current focus position
center = focus.position.value['z']
# Don't redo the acquisition either if we've just done it, or if it
# was already done and we are still doing a rough search
if (not max_reached or last_pos == center) and center in focus_levels:
fm_center = focus_levels[center]
else:
image = AcquireNoBackground(detector, dfbkg)
fm_center = Measure(image)
logging.debug("Focus level (center) at %f is %f", center, fm_center)
focus_levels[center] = fm_center
# Move to right position
right = center + step_factor * min_step
right = max(rng[0], min(right, rng[1])) # clip
if not max_reached and right in focus_levels:
fm_right = focus_levels[right]
else:
focus.moveAbsSync({"z": right})
right = focus.position.value["z"]
image = AcquireNoBackground(detector, dfbkg)
fm_right = Measure(image)
logging.debug("Focus level (right) at %f is %f", right, fm_right)
focus_levels[right] = fm_right
# Move to left position
left = center - step_factor * min_step
left = max(rng[0], min(left, rng[1])) # clip
if not max_reached and left in focus_levels:
fm_left = focus_levels[left]
else:
focus.moveAbsSync({"z": left})
left = focus.position.value["z"]
image = AcquireNoBackground(detector, dfbkg)
fm_left = Measure(image)
logging.debug("Focus level (left) at %f is %f", left, fm_left)
focus_levels[left] = fm_left
last_pos = left
fm_range = (fm_left, fm_center, fm_right)
pos_range = (left, center, right)
best_fm = max(fm_range)
i_max = fm_range.index(best_fm)
best_pos = pos_range[i_max]
if future._autofocus_state == CANCELLED:
raise CancelledError()
# if best focus was found at the center
if i_max == 1:
step_factor /= 2
if not max_reached:
logging.debug("Now zooming in on improved focus")
max_reached = True
elif (rng[0] > best_pos - step_factor * min_step or
rng[1] < best_pos + step_factor * min_step):
step_factor /= 1.5
logging.debug("Reducing step factor to %g because the focus (%g) is near range limit %s",
step_factor, best_pos, rng)
if step_factor <= 8:
max_reached = True # Force re-checking data
focus.moveAbsSync({"z": best_pos})
step_cntr += 1
if step_cntr == MAX_STEPS_NUMBER:
logging.info("Auto focus gave up after %d steps @ %g m", step_cntr, best_pos)
else:
logging.info("Auto focus found best level %g @ %g m", best_fm, best_pos)
return best_pos, best_fm
except CancelledError:
# Go to the best position known so far
focus.moveAbsSync({"z": best_pos})
finally:
with future._autofocus_lock:
if future._autofocus_state == CANCELLED:
raise CancelledError()
future._autofocus_state = FINISHED
def _CancelAutoFocus(future):
"""
Canceller of _DoAutoFocus task.
"""
logging.debug("Cancelling autofocus...")
with future._autofocus_lock:
if future._autofocus_state == FINISHED:
return False
future._autofocus_state = CANCELLED
logging.debug("Autofocus cancellation requested.")
return True
# TODO: drop steps, which is unused, or use it
def estimateAutoFocusTime(exposure_time, steps=MAX_STEPS_NUMBER):
"""
Estimates overlay procedure duration
"""
return steps * exposure_time
def AutoFocus(detector, emt, focus, dfbkg=None, good_focus=None):
"""
Wrapper for DoAutoFocus. It provides the ability to check the progress of autofocus
procedure or even cancel it.
detector (model.DigitalCamera or model.Detector): Detector on which to
improve the focus quality
emt (None or model.Emitter): In case of a SED this is the scanner used
focus (model.Actuator): The focus actuator
dfbkg (model.DataFlow or None): If provided, will be used to start/stop
the e-beam emission (it must be the dataflow of se- or bs-detector) in
order to do background subtraction. If None, no background subtraction is
performed.
good_focus (float): if provided, an already known good focus position to be
taken into consideration while autofocusing
returns (model.ProgressiveFuture): Progress of DoAutoFocus, whose result() will return:
Focus position (m)
Focus level
"""
# Create ProgressiveFuture and update its state to RUNNING
est_start = time.time() + 0.1
# Check if the emitter is a scanner (focusing = SEM)
if model.hasVA(emt, "dwellTime"):
et = emt.dwellTime.value * numpy.prod(emt.resolution.value)
elif model.hasVA(detector, "exposureTime"):
et = detector.exposureTime.value
else:
# Completely random... but we are in a case where probably that's the last
# thing the caller will care about.
et = 1
f = model.ProgressiveFuture(start=est_start,
end=est_start + estimateAutoFocusTime(et))
f._autofocus_state = RUNNING
f._autofocus_lock = threading.Lock()
f.task_canceller = _CancelAutoFocus
# Run in separate thread
autofocus_thread = threading.Thread(target=executeTask,
name="Autofocus",
args=(f, _DoAutoFocus, f, detector, emt,
focus, dfbkg, good_focus))
autofocus_thread.start()
return f
def AutoFocusSpectrometer(spectrograph, focuser, detectors, selector=None):
"""
Run autofocus for a spectrograph. It will actually run autofocus on each
gratings, and for each detectors. The input slit should already be in a
good position (typically, almost closed), and a light source should be
active.
Note: it's currently tailored to the Andor Shamrock SR-193i. It's recommended
to put the detector on the "direct" output as first detector.
spectrograph (Actuator): should have grating and wavelength.
focuser (Actuator): should have a z axis
detectors (Detector or list of Detectors): all the detectors available on
the spectrometer. The first detector will be used to autofocus all the
gratings, and each other detector will be focused with the original
grating.
selector (Actuator or None): must have a rx axis with each position corresponding
to one of the detectors. If there is only one detector, selector can be None.
return (ProgressiveFuture -> dict((grating, detector)->focus position)): a progressive future
which will eventually return a map of grating/detector -> focus position.
"""
if not isinstance(detectors, collections.Iterable):
detectors = [detectors]
if not detectors:
raise ValueError("At least one detector must be provided")
# Create ProgressiveFuture and update its state to RUNNING
est_start = time.time() + 0.1
detector = detectors[0]
if model.hasVA(detector, "exposureTime"):
et = detector.exposureTime.value
else:
# Completely random... but we are in a case where probably that's the last
# thing the caller will care about.
et = 1
# 1 time / grating + 1 time / extra detector
cnts = len(spectrograph.axes["grating"].choices) + (len(detectors) - 1)
f = model.ProgressiveFuture(start=est_start,
end=est_start + cnts * estimateAutoFocusTime(et))
f.task_canceller = _CancelAutoFocusSpectrometer
# Extra info for the canceller
f._autofocus_state = RUNNING
f._autofocus_lock = threading.Lock()
f._subfuture = InstantaneousFuture()
# Run in separate thread
autofocus_thread = threading.Thread(target=executeTask,
name="Spectrometer Autofocus",
args=(f, _DoAutoFocusSpectrometer, f,
spectrograph, focuser, detectors, selector))
autofocus_thread.start()
return f
def _moveSelectorToDetector(selector, detector):
"""
Move the selector to have the given detector receive light
selector (Actuator): a rx axis with a position
detector (Component): the component to receive light
return (position): the new position of the selector
raise LookupError: if no position on the selector affects the detector
"""
# TODO: handle every way of indicating affect position in acq.path? -> move to odemis.util
mv = {}
for an, ad in selector.axes.items():
if hasattr(ad, "choices") and isinstance(ad.choices, dict):
for pos, value in ad.choices.items():
if detector.name in value:
# set the position so it points to the target
mv[an] = pos
if mv:
selector.moveAbsSync(mv)
return mv
raise LookupError("Failed to find detector '%s' in positions of selector axes %s" %
(detector.name, selector.axes.keys()))
def _updateAFSProgress(future, last_dur, left):
"""
Update the progress of the future based on duration of the previous autofocus
future (ProgressiveFuture)
last_dur (0< float): duration of the latest autofocusing action
left (0<= int): number of autofocus actions still left
"""
# Estimate that all the other autofocusing will take the same amount of time
tleft = left * last_dur + 5 # 5 s to go back to original pos
future.set_progress(end=time.time() + tleft)
def _DoAutoFocusSpectrometer(future, spectrograph, focuser, detectors, selector):
"""
cf AutoFocusSpectrometer
return dict((grating, detector) -> focus pos)
"""
ret = {}
# Record the wavelength and grating position
pos_orig = {k: v for k, v in spectrograph.position.value.items()
if k in ("wavelength", "grating")}
gratings = spectrograph.axes["grating"].choices.keys()
if selector:
sel_orig = selector.position.value
# For progress update
cnts = len(gratings) + (len(detectors) - 1)
# Note: this procedure works well with the SR-193i. In particular, it
# records the focus position for each grating (in absolute) and each
# detector (as an offset). It needs to be double checked if used with
# other detectors.
if "Shamrock" not in spectrograph.hwVersion:
logging.warning("Spectrometer autofocusing has not been tested on"
"this type of spectrograph (%s)", spectrograph.hwVersion)
try:
# Autofocus each grating, using the first detector
detector = detectors[0]
if selector:
_moveSelectorToDetector(selector, detector)
if future._autofocus_state == CANCELLED:
raise CancelledError()
# start with the current grating, to save the move time
gratings.sort(key=lambda g: 0 if g == pos_orig["grating"] else 1)
for g in gratings:
logging.debug("Autofocusing on grating %s", g)
tstart = time.time()
try:
# 0th order is not absolutely necessary for focusing, but it
# typically gives the best results
spectrograph.moveAbsSync({"wavelength": 0, "grating": g})
except Exception:
logging.exception("Failed to move to 0th order for grating %s", g)
future._subfuture = AutoFocus(detector, None, focuser)
fp, flvl = future._subfuture.result()
ret[(g, detector)] = fp
cnts -= 1
_updateAFSProgress(future, time.time() - tstart, cnts)
if future._autofocus_state == CANCELLED:
raise CancelledError()
# Autofocus each additional detector
grating = pos_orig["grating"]
for d in detectors[1:]:
logging.debug("Autofocusing on detector %s", d)
tstart = time.time()
_moveSelectorToDetector(selector, d)
try:
# 0th order + original grating
# TODO: instead of using original grating, use mirror grating if
# available
spectrograph.moveAbsSync({"wavelength": 0, "grating": grating})
except Exception:
logging.exception("Failed to move to 0th order and grating %s", grating)
future._subfuture = AutoFocus(detector, None, focuser)
fp, flvl = future._subfuture.result()
ret[(grating, d)] = fp
cnts -= 1
_updateAFSProgress(future, time.time() - tstart, cnts)
if future._autofocus_state == CANCELLED:
raise CancelledError()
return ret
except CancelledError:
logging.debug("AutofocusSpectrometer cancelled")
finally:
spectrograph.moveAbsSync(pos_orig)
if selector:
selector.moveAbsSync(sel_orig)
with future._autofocus_lock:
if future._autofocus_state == CANCELLED:
raise CancelledError()
future._autofocus_state = FINISHED
def _CancelAutoFocusSpectrometer(future):
"""
Canceller of _DoAutoFocus task.
"""
logging.debug("Cancelling autofocus...")
with future._autofocus_lock:
if future._autofocus_state == FINISHED:
return False
future._autofocus_state = CANCELLED
future._subfuture.cancel()
logging.debug("AutofocusSpectrometer cancellation requested.")
return True
|
ktsitsikas/odemis
|
src/odemis/acq/align/autofocus.py
|
Python
|
gpl-2.0
| 23,236 | 0.001679 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import fields, osv
class project_compute_phases(osv.osv_memory):
_name = 'project.compute.phases'
_description = 'Project Compute Phases'
_columns = {
'target_project': fields.selection([
('all', 'Compute All My Projects'),
('one', 'Compute a Single Project'),
], 'Action', required=True),
'project_id': fields.many2one('project.project', 'Project')
}
_defaults = {
'target_project': 'one'
}
def check_selection(self, cr, uid, ids, context=None):
return self.compute_date(cr, uid, ids, context=context)
def compute_date(self, cr, uid, ids, context=None):
"""
Compute the phases for scheduling.
"""
project_pool = self.pool.get('project.project')
data = self.read(cr, uid, ids, [], context=context)[0]
if not data['project_id'] and data['target_project'] == 'one':
raise osv.except_osv(_('Error!'), _('Please specify a project to schedule.'))
if data['target_project'] == 'one':
project_ids = [data['project_id'][0]]
else:
project_ids = project_pool.search(cr, uid, [('user_id','=',uid)], context=context)
project_pool.schedule_phases(cr, uid, project_ids, context=context)
return self._open_phases_list(cr, uid, data, context=context)
def _open_phases_list(self, cr, uid, data, context=None):
"""
Return the scheduled phases list.
"""
if context is None:
context = {}
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj._get_id(cr, uid, 'project_long_term', 'act_project_phase')
id = mod_obj.read(cr, uid, [result], ['res_id'])[0]['res_id']
result = act_obj.read(cr, uid, [id], context=context)[0]
result['target'] = 'current'
project_id = data.get('project_id') and data.get('project_id')[0] or False
result['context'] = {"search_default_project_id":project_id, "default_project_id":project_id, "search_default_current": 1}
return result
project_compute_phases()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jeffery9/mixprint_addons
|
project_long_term/wizard/project_compute_phases.py
|
Python
|
agpl-3.0
| 3,262 | 0.003372 |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.5)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "ioncoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("litemode=1\n")
f.write("enablezeromint=0\n")
f.write("staking=0\n")
f.write("spendzeroconfchange=1\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser�' + str(n), 'rpcpass�' + str(n)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "ioncoin.conf")):
with open(os.path.join(datadir, "ioncoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for addr in [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(addr)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
time.sleep(5)
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
#if flush_scheduler:
#for r in rpc_connections:
# r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = float(satoshi_round(send_value / 2))
outputs[addr2] = float(satoshi_round(send_value / 2))
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = float(satoshi_round(change))
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
|
cevap/ion
|
test/functional/test_framework/util.py
|
Python
|
mit
| 22,607 | 0.004336 |
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
import boto.jsonresponse
from boto.compat import json
from boto.regioninfo import RegionInfo
from boto.connection import AWSQueryConnection
class Layer1(AWSQueryConnection):
APIVersion = '2010-12-01'
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None, security_token=None, profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(Layer1, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token, profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def _encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _get_response(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
def check_dns_availability(self, cname_prefix):
"""Checks if the specified CNAME is available.
:type cname_prefix: string
:param cname_prefix: The prefix used when this CNAME is
reserved.
"""
params = {'CNAMEPrefix': cname_prefix}
return self._get_response('CheckDNSAvailability', params)
def create_application(self, application_name, description=None):
"""
Creates an application that has one configuration template
named default and no application versions.
:type application_name: string
:param application_name: The name of the application.
Constraint: This name must be unique within your account. If the
specified name already exists, the action returns an
InvalidParameterValue error.
:type description: string
:param description: Describes the application.
:raises: TooManyApplicationsException
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('CreateApplication', params)
def create_application_version(self, application_name, version_label,
description=None, s3_bucket=None,
s3_key=None, auto_create_application=None):
"""Creates an application version for the specified application.
:type application_name: string
:param application_name: The name of the application. If no
application is found with this name, and AutoCreateApplication is
false, returns an InvalidParameterValue error.
:type version_label: string
:param version_label: A label identifying this version. Constraint:
Must be unique per application. If an application version already
exists with this label for the specified application, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type description: string
:param description: Describes this version.
:type s3_bucket: string
:param s3_bucket: The Amazon S3 bucket where the data is located.
:type s3_key: string
:param s3_key: The Amazon S3 key where the data is located. Both
s3_bucket and s3_key must be specified in order to use a specific
source bundle. If both of these values are not specified the
sample application will be used.
:type auto_create_application: boolean
:param auto_create_application: Determines how the system behaves if
the specified application for this version does not already exist:
true: Automatically creates the specified application for this
version if it does not already exist. false: Returns an
InvalidParameterValue if the specified application for this version
does not already exist. Default: false Valid Values: true | false
:raises: TooManyApplicationsException,
TooManyApplicationVersionsException,
InsufficientPrivilegesException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
if s3_bucket and s3_key:
params['SourceBundle.S3Bucket'] = s3_bucket
params['SourceBundle.S3Key'] = s3_key
if auto_create_application:
params['AutoCreateApplication'] = self._encode_bool(
auto_create_application)
return self._get_response('CreateApplicationVersion', params)
def create_configuration_template(self, application_name, template_name,
solution_stack_name=None,
source_configuration_application_name=None,
source_configuration_template_name=None,
environment_id=None, description=None,
option_settings=None):
"""Creates a configuration template.
Templates are associated with a specific application and are used to
deploy different versions of the application with the same
configuration settings.
:type application_name: string
:param application_name: The name of the application to associate with
this configuration template. If no application is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template.
Constraint: This name must be unique per application. Default: If
a configuration template already exists with this name, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack used by this
configuration. The solution stack specifies the operating system,
architecture, and application server for a configuration template.
It determines the set of configuration options as well as the
possible and default values. Use ListAvailableSolutionStacks to
obtain a list of available solution stacks. Default: If the
SolutionStackName is not specified and the source configuration
parameter is blank, AWS Elastic Beanstalk uses the default solution
stack. If not specified and the source configuration parameter is
specified, AWS Elastic Beanstalk uses the same solution stack as
the source configuration template.
:type source_configuration_application_name: string
:param source_configuration_application_name: The name of the
application associated with the configuration.
:type source_configuration_template_name: string
:param source_configuration_template_name: The name of the
configuration template.
:type environment_id: string
:param environment_id: The ID of the environment used with this
configuration template.
:type description: string
:param description: Describes this configuration.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration option to the requested value. The new
value overrides the value obtained from the solution stack or the
source configuration template.
:raises: InsufficientPrivilegesException,
TooManyConfigurationTemplatesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if source_configuration_application_name:
params['SourceConfiguration.ApplicationName'] = source_configuration_application_name
if source_configuration_template_name:
params['SourceConfiguration.TemplateName'] = source_configuration_template_name
if environment_id:
params['EnvironmentId'] = environment_id
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
return self._get_response('CreateConfigurationTemplate', params)
def create_environment(self, application_name, environment_name,
version_label=None, template_name=None,
solution_stack_name=None, cname_prefix=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""Launches an environment for the application using a configuration.
:type application_name: string
:param application_name: The name of the application that contains the
version to be deployed. If no application is found with this name,
CreateEnvironment returns an InvalidParameterValue error.
:type environment_name: string
:param environment_name: A unique name for the deployment environment.
Used in the application URL. Constraint: Must be from 4 to 23
characters in length. The name can contain only letters, numbers,
and hyphens. It cannot start or end with a hyphen. This name must
be unique in your account. If the specified name already exists,
AWS Elastic Beanstalk returns an InvalidParameterValue error.
Default: If the CNAME parameter is not specified, the environment
name becomes part of the CNAME, and therefore part of the visible
URL for your application.
:type version_label: string
:param version_label: The name of the application version to deploy. If
the specified application has no associated application versions,
AWS Elastic Beanstalk UpdateEnvironment returns an
InvalidParameterValue error. Default: If not specified, AWS
Elastic Beanstalk attempts to launch the most recently created
application version.
:type template_name: string
:param template_name: The name of the configuration template to
use in deployment. If no configuration template is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
Condition: You must specify either this parameter or a
SolutionStackName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type solution_stack_name: string
:param solution_stack_name: This is an alternative to specifying a
configuration name. If specified, AWS Elastic Beanstalk sets the
configuration values to the default values associated with the
specified solution stack. Condition: You must specify either this
or a TemplateName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type cname_prefix: string
:param cname_prefix: If specified, the environment attempts to use this
value as the prefix for the CNAME. If not specified, the
environment uses the environment name.
:type description: string
:param description: Describes this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration options to the requested value in the
configuration set for the new environment. These override the
values obtained from the solution stack or the configuration
template. Each element in the list is a tuple of (Namespace,
OptionName, Value), for example::
[('aws:autoscaling:launchconfiguration',
'Ec2KeyName', 'mykeypair')]
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this new
environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: TooManyEnvironmentsException, InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if cname_prefix:
params['CNAMEPrefix'] = cname_prefix
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.Name'] = tier_name
params['Tier.Type'] = tier_type
params['Tier.Version'] = tier_version
return self._get_response('CreateEnvironment', params)
def create_storage_location(self):
"""
Creates the Amazon S3 storage location for the account. This
location is used to store user log files.
:raises: TooManyBucketsException,
S3SubscriptionRequiredException,
InsufficientPrivilegesException
"""
return self._get_response('CreateStorageLocation', params={})
def delete_application(self, application_name,
terminate_env_by_force=None):
"""
Deletes the specified application along with all associated
versions and configurations. The application versions will not
be deleted from your Amazon S3 bucket.
:type application_name: string
:param application_name: The name of the application to delete.
:type terminate_env_by_force: boolean
:param terminate_env_by_force: When set to true, running
environments will be terminated before deleting the application.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name}
if terminate_env_by_force:
params['TerminateEnvByForce'] = self._encode_bool(
terminate_env_by_force)
return self._get_response('DeleteApplication', params)
def delete_application_version(self, application_name, version_label,
delete_source_bundle=None):
"""Deletes the specified version from the specified application.
:type application_name: string
:param application_name: The name of the application to delete
releases from.
:type version_label: string
:param version_label: The label of the version to delete.
:type delete_source_bundle: boolean
:param delete_source_bundle: Indicates whether to delete the
associated source bundle from Amazon S3. Valid Values: true |
false
:raises: SourceBundleDeletionException,
InsufficientPrivilegesException,
OperationInProgressException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if delete_source_bundle:
params['DeleteSourceBundle'] = self._encode_bool(
delete_source_bundle)
return self._get_response('DeleteApplicationVersion', params)
def delete_configuration_template(self, application_name, template_name):
"""Deletes the specified configuration template.
:type application_name: string
:param application_name: The name of the application to delete
the configuration template from.
:type template_name: string
:param template_name: The name of the configuration template to
delete.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
return self._get_response('DeleteConfigurationTemplate', params)
def delete_environment_configuration(self, application_name,
environment_name):
"""
Deletes the draft configuration associated with the running
environment. Updating a running environment with any
configuration changes creates a draft configuration set. You can
get the draft configuration using DescribeConfigurationSettings
while the update is in progress or if the update fails. The
DeploymentStatus for the draft configuration indicates whether
the deployment is in process or has failed. The draft
configuration remains in existence until it is deleted with this
action.
:type application_name: string
:param application_name: The name of the application the
environment is associated with.
:type environment_name: string
:param environment_name: The name of the environment to delete
the draft configuration from.
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
return self._get_response('DeleteEnvironmentConfiguration', params)
def describe_application_versions(self, application_name=None,
version_labels=None):
"""Returns descriptions for existing application versions.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include ones that are associated
with the specified application.
:type version_labels: list
:param version_labels: If specified, restricts the returned
descriptions to only include ones that have the specified version
labels.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_labels:
self.build_list_params(params, version_labels,
'VersionLabels.member')
return self._get_response('DescribeApplicationVersions', params)
def describe_applications(self, application_names=None):
"""Returns the descriptions of existing applications.
:type application_names: list
:param application_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include those with the specified
names.
"""
params = {}
if application_names:
self.build_list_params(params, application_names,
'ApplicationNames.member')
return self._get_response('DescribeApplications', params)
def describe_configuration_options(self, application_name=None,
template_name=None,
environment_name=None,
solution_stack_name=None, options=None):
"""Describes configuration options used in a template or environment.
Describes the configuration options that are used in a
particular configuration template or environment, or that a
specified solution stack defines. The description includes the
values the options, their default values, and an indication of
the required action on a running environment if an option value
is changed.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template or environment. Only needed if you want
to describe the configuration options associated with either the
configuration template or environment.
:type template_name: string
:param template_name: The name of the configuration template whose
configuration options you want to describe.
:type environment_name: string
:param environment_name: The name of the environment whose
configuration options you want to describe.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack whose
configuration options you want to describe.
:type options: list
:param options: If specified, restricts the descriptions to only
the specified options.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if options:
self.build_list_params(params, options, 'Options.member')
return self._get_response('DescribeConfigurationOptions', params)
def describe_configuration_settings(self, application_name,
template_name=None,
environment_name=None):
"""
Returns a description of the settings for the specified
configuration set, that is, either a configuration template or
the configuration set associated with a running environment.
When describing the settings for the configuration set
associated with a running environment, it is possible to receive
two sets of setting descriptions. One is the deployed
configuration set, and the other is a draft configuration of an
environment that is either in the process of deployment or that
failed to deploy.
:type application_name: string
:param application_name: The application for the environment or
configuration template.
:type template_name: string
:param template_name: The name of the configuration template to
describe. Conditional: You must specify either this parameter or
an EnvironmentName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to describe.
Condition: You must specify either this or a TemplateName, but not
both. If you specify both, AWS Elastic Beanstalk returns an
InvalidParameterCombination error. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
"""
params = {'ApplicationName': application_name}
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeConfigurationSettings', params)
def describe_environment_resources(self, environment_id=None,
environment_name=None):
"""Returns AWS resources for this environment.
:type environment_id: string
:param environment_id: The ID of the environment to retrieve AWS
resource usage data. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to retrieve
AWS resource usage data. Condition: You must specify either this
or an EnvironmentId, or both. If you do not specify either, AWS
Elastic Beanstalk returns MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeEnvironmentResources', params)
def describe_environments(self, application_name=None, version_label=None,
environment_ids=None, environment_names=None,
include_deleted=None,
included_deleted_back_to=None):
"""Returns descriptions for existing environments.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that are associated
with this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to include only those that are associated
with this application version.
:type environment_ids: list
:param environment_ids: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified IDs.
:type environment_names: list
:param environment_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified names.
:type include_deleted: boolean
:param include_deleted: Indicates whether to include deleted
environments: true: Environments that have been deleted after
IncludedDeletedBackTo are displayed. false: Do not include deleted
environments.
:type included_deleted_back_to: timestamp
:param included_deleted_back_to: If specified when IncludeDeleted is
set to true, then environments deleted after this date are
displayed.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if environment_ids:
self.build_list_params(params, environment_ids,
'EnvironmentIds.member')
if environment_names:
self.build_list_params(params, environment_names,
'EnvironmentNames.member')
if include_deleted:
params['IncludeDeleted'] = self._encode_bool(include_deleted)
if included_deleted_back_to:
params['IncludedDeletedBackTo'] = included_deleted_back_to
return self._get_response('DescribeEnvironments', params)
def describe_events(self, application_name=None, version_label=None,
template_name=None, environment_id=None,
environment_name=None, request_id=None, severity=None,
start_time=None, end_time=None, max_records=None,
next_token=None):
"""Returns event descriptions matching criteria up to the last 6 weeks.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those associated with
this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those associated with this application
version.
:type template_name: string
:param template_name: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that are associated with this
environment configuration.
:type environment_id: string
:param environment_id: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type environment_name: string
:param environment_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type request_id: string
:param request_id: If specified, AWS Elastic Beanstalk restricts the
described events to include only those associated with this request
ID.
:type severity: string
:param severity: If specified, limits the events returned from this
call to include only those with the specified severity or higher.
:type start_time: timestamp
:param start_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur on or after this time.
:type end_time: timestamp
:param end_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur up to, but not including,
the EndTime.
:type max_records: integer
:param max_records: Specifies the maximum number of events that can be
returned, beginning with the most recent event.
:type next_token: string
:param next_token: Pagination token. If specified, the events return
the next batch of results.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if request_id:
params['RequestId'] = request_id
if severity:
params['Severity'] = severity
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self._get_response('DescribeEvents', params)
def list_available_solution_stacks(self):
"""Returns a list of the available solution stack names."""
return self._get_response('ListAvailableSolutionStacks', params={})
def rebuild_environment(self, environment_id=None, environment_name=None):
"""
Deletes and recreates all of the AWS resources (for example:
the Auto Scaling group, load balancer, etc.) for a specified
environment and forces a restart.
:type environment_id: string
:param environment_id: The ID of the environment to rebuild.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to rebuild.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RebuildEnvironment', params)
def request_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Initiates a request to compile the specified type of
information of the deployed environment. Setting the InfoType
to tail compiles the last lines from the application server log
files of every Amazon EC2 instance in your environment. Use
RetrieveEnvironmentInfo to access the compiled information.
:type info_type: string
:param info_type: The type of information to request.
:type environment_id: string
:param environment_id: The ID of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RequestEnvironmentInfo', params)
def restart_app_server(self, environment_id=None, environment_name=None):
"""
Causes the environment to restart the application container
server running on each Amazon EC2 instance.
:type environment_id: string
:param environment_id: The ID of the environment to restart the server
for. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to restart the
server for. Condition: You must specify either this or an
EnvironmentId, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RestartAppServer', params)
def retrieve_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Retrieves the compiled information from a RequestEnvironmentInfo
request.
:type info_type: string
:param info_type: The type of information to retrieve.
:type environment_id: string
:param environment_id: The ID of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RetrieveEnvironmentInfo', params)
def swap_environment_cnames(self, source_environment_id=None,
source_environment_name=None,
destination_environment_id=None,
destination_environment_name=None):
"""Swaps the CNAMEs of two environments.
:type source_environment_id: string
:param source_environment_id: The ID of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentId, you must specify the
DestinationEnvironmentId.
:type source_environment_name: string
:param source_environment_name: The name of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentName, you must specify the
DestinationEnvironmentName.
:type destination_environment_id: string
:param destination_environment_id: The ID of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentId with
the DestinationEnvironmentId.
:type destination_environment_name: string
:param destination_environment_name: The name of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentName with
the DestinationEnvironmentName.
"""
params = {}
if source_environment_id:
params['SourceEnvironmentId'] = source_environment_id
if source_environment_name:
params['SourceEnvironmentName'] = source_environment_name
if destination_environment_id:
params['DestinationEnvironmentId'] = destination_environment_id
if destination_environment_name:
params['DestinationEnvironmentName'] = destination_environment_name
return self._get_response('SwapEnvironmentCNAMEs', params)
def terminate_environment(self, environment_id=None, environment_name=None,
terminate_resources=None):
"""Terminates the specified environment.
:type environment_id: string
:param environment_id: The ID of the environment to terminate.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to terminate.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type terminate_resources: boolean
:param terminate_resources: Indicates whether the associated AWS
resources should shut down when the environment is terminated:
true: (default) The user AWS resources (for example, the Auto
Scaling group, LoadBalancer, etc.) are terminated along with the
environment. false: The environment is removed from the AWS
Elastic Beanstalk but the AWS resources continue to operate. For
more information, see the AWS Elastic Beanstalk User Guide.
Default: true Valid Values: true | false
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if terminate_resources:
params['TerminateResources'] = self._encode_bool(
terminate_resources)
return self._get_response('TerminateEnvironment', params)
def update_application(self, application_name, description=None):
"""
Updates the specified application to have the specified
properties.
:type application_name: string
:param application_name: The name of the application to update.
If no such application is found, UpdateApplication returns an
InvalidParameterValue error.
:type description: string
:param description: A new description for the application. Default: If
not specified, AWS Elastic Beanstalk does not update the
description.
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('UpdateApplication', params)
def update_application_version(self, application_name, version_label,
description=None):
"""Updates the application version to have the properties.
:type application_name: string
:param application_name: The name of the application associated with
this version. If no application is found with this name,
UpdateApplication returns an InvalidParameterValue error.
:type version_label: string
:param version_label: The name of the version to update. If no
application version is found with this label, UpdateApplication
returns an InvalidParameterValue error.
:type description: string
:param description: A new description for this release.
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
return self._get_response('UpdateApplicationVersion', params)
def update_configuration_template(self, application_name, template_name,
description=None, option_settings=None,
options_to_remove=None):
"""
Updates the specified configuration template to have the
specified properties or configuration option values.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template to update. If no application is found
with this name, UpdateConfigurationTemplate returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template to update.
If no configuration template is found with this name,
UpdateConfigurationTemplate returns an InvalidParameterValue error.
:type description: string
:param description: A new description for the configuration.
:type option_settings: list
:param option_settings: A list of configuration option settings to
update with the new specified option value.
:type options_to_remove: list
:param options_to_remove: A list of configuration options to remove
from the configuration set. Constraint: You can remove only
UserDefined configuration options.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('UpdateConfigurationTemplate', params)
def update_environment(self, environment_id=None, environment_name=None,
version_label=None, template_name=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""
Updates the environment description, deploys a new application
version, updates the configuration settings to an entirely new
configuration template, or updates select configuration option
values in the running environment. Attempting to update both
the release and configuration is not allowed and AWS Elastic
Beanstalk returns an InvalidParameterCombination error. When
updating the configuration settings to a new template or
individual settings, a draft configuration is created and
DescribeConfigurationSettings for this environment returns two
setting descriptions with different DeploymentStatus values.
:type environment_id: string
:param environment_id: The ID of the environment to update. If no
environment with this ID exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentName, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to update. If no
environment with this name exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentId, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type version_label: string
:param version_label: If this parameter is specified, AWS Elastic
Beanstalk deploys the named application version to the environment.
If no such application version is found, returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: If this parameter is specified, AWS Elastic
Beanstalk deploys this configuration template to the environment.
If no such configuration template is found, AWS Elastic Beanstalk
returns an InvalidParameterValue error.
:type description: string
:param description: If this parameter is specified, AWS Elastic
Beanstalk updates the description of this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk updates the
configuration set associated with the running environment and sets
the specified configuration options to the requested value.
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.Name'] = tier_name
params['Tier.Type'] = tier_type
params['Tier.Version'] = tier_version
return self._get_response('UpdateEnvironment', params)
def validate_configuration_settings(self, application_name,
option_settings, template_name=None,
environment_name=None):
"""
Takes a set of configuration settings and either a
configuration template or environment, and determines whether
those values are valid. This action returns a list of messages
indicating any errors or warnings associated with the selection
of option values.
:type application_name: string
:param application_name: The name of the application that the
configuration template or environment belongs to.
:type template_name: string
:param template_name: The name of the configuration template to
validate the settings against. Condition: You cannot specify both
this and an environment name.
:type environment_name: string
:param environment_name: The name of the environment to validate the
settings against. Condition: You cannot specify both this and a
configuration template name.
:type option_settings: list
:param option_settings: A list of the options and desired values to
evaluate.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name}
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('ValidateConfigurationSettings', params)
def _build_list_params(self, params, user_values, prefix, tuple_names):
# For params such as the ConfigurationOptionSettings,
# they can specify a list of tuples where each tuple maps to a specific
# arg. For example:
# user_values = [('foo', 'bar', 'baz']
# prefix=MyOption.member
# tuple_names=('One', 'Two', 'Three')
# would result in:
# MyOption.member.1.One = foo
# MyOption.member.1.Two = bar
# MyOption.member.1.Three = baz
for i, user_value in enumerate(user_values, 1):
current_prefix = '%s.%s' % (prefix, i)
for key, value in zip(tuple_names, user_value):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
|
harshilasu/GraphicMelon
|
y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/beanstalk/layer1.py
|
Python
|
gpl-3.0
| 56,243 | 0.000338 |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class coinbasepro(Exchange):
def describe(self):
return self.deep_extend(super(coinbasepro, self).describe(), {
'id': 'coinbasepro',
'name': 'Coinbase Pro',
'countries': ['US'],
'rateLimit': 100,
'userAgent': self.userAgents['chrome'],
'pro': True,
'has': {
'CORS': True,
'spot': True,
'margin': None, # has but not fully inplemented
'swap': None, # has but not fully inplemented
'future': None, # has but not fully inplemented
'option': None,
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createOrder': True,
'deposit': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': None, # the exchange does not have self method, only createDepositAddress, see https://github.com/ccxt/ccxt/pull/7405
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransactions': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': 60,
'5m': 300,
'15m': 900,
'1h': 3600,
'6h': 21600,
'1d': 86400,
},
'hostname': 'pro.coinbase.com',
'urls': {
'test': {
'public': 'https://api-public.sandbox.pro.coinbase.com',
'private': 'https://api-public.sandbox.pro.coinbase.com',
},
'logo': 'https://user-images.githubusercontent.com/1294454/41764625-63b7ffde-760a-11e8-996d-a6328fa9347a.jpg',
'api': {
'public': 'https://api.{hostname}',
'private': 'https://api.{hostname}',
},
'www': 'https://pro.coinbase.com/',
'doc': 'https://docs.pro.coinbase.com',
'fees': [
'https://docs.pro.coinbase.com/#fees',
'https://support.pro.coinbase.com/customer/en/portal/articles/2945310-fees',
],
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'api': {
'public': {
'get': [
'currencies',
'products',
'products/{id}',
'products/{id}/book',
'products/{id}/candles',
'products/{id}/stats',
'products/{id}/ticker',
'products/{id}/trades',
'time',
'products/spark-lines', # experimental
],
},
'private': {
'get': [
'accounts',
'accounts/{id}',
'accounts/{id}/holds',
'accounts/{id}/ledger',
'accounts/{id}/transfers',
'coinbase-accounts',
'fills',
'funding',
'fees',
'margin/profile_information',
'margin/buying_power',
'margin/withdrawal_power',
'margin/withdrawal_power_all',
'margin/exit_plan',
'margin/liquidation_history',
'margin/position_refresh_amounts',
'margin/status',
'oracle',
'orders',
'orders/{id}',
'orders/client:{client_oid}',
'otc/orders',
'payment-methods',
'position',
'profiles',
'profiles/{id}',
'reports/{report_id}',
'transfers',
'transfers/{transfer_id}',
'users/self/exchange-limits',
'users/self/hold-balances',
'users/self/trailing-volume',
'withdrawals/fee-estimate',
],
'post': [
'conversions',
'deposits/coinbase-account',
'deposits/payment-method',
'coinbase-accounts/{id}/addresses',
'funding/repay',
'orders',
'position/close',
'profiles/margin-transfer',
'profiles/transfer',
'reports',
'withdrawals/coinbase',
'withdrawals/coinbase-account',
'withdrawals/crypto',
'withdrawals/payment-method',
],
'delete': [
'orders',
'orders/client:{client_oid}',
'orders/{id}',
],
},
},
'commonCurrencies': {
'CGLD': 'CELO',
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': True, # complicated tier system per coin
'percentage': True,
'maker': 0.5 / 100, # highest fee of all tiers
'taker': 0.5 / 100, # highest fee of all tiers
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 25,
},
'deposit': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 10,
},
},
},
'exceptions': {
'exact': {
'Insufficient funds': InsufficientFunds,
'NotFound': OrderNotFound,
'Invalid API Key': AuthenticationError,
'invalid signature': AuthenticationError,
'Invalid Passphrase': AuthenticationError,
'Invalid order id': InvalidOrder,
'Private rate limit exceeded': RateLimitExceeded,
'Trading pair not available': PermissionDenied,
'Product not found': InvalidOrder,
},
'broad': {
'Order already done': OrderNotFound,
'order not found': OrderNotFound,
'price too small': InvalidOrder,
'price too precise': InvalidOrder,
'under maintenance': OnMaintenance,
'size is too small': InvalidOrder,
'Cancel only mode': OnMaintenance, # https://github.com/ccxt/ccxt/issues/7690
},
},
})
def fetch_currencies(self, params={}):
response = self.publicGetCurrencies(params)
#
# [
# {
# id: 'XTZ',
# name: 'Tezos',
# min_size: '0.000001',
# status: 'online',
# message: '',
# max_precision: '0.000001',
# convertible_to: [],
# details: {
# type: 'crypto',
# symbol: 'Τ',
# network_confirmations: 60,
# sort_order: 53,
# crypto_address_link: 'https://tzstats.com/{{address}}',
# crypto_transaction_link: 'https://tzstats.com/{{txId}}',
# push_payment_methods: ['crypto'],
# group_types: [],
# display_name: '',
# processing_time_seconds: 0,
# min_withdrawal_amount: 1
# }
# }
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'id')
name = self.safe_string(currency, 'name')
code = self.safe_currency_code(id)
details = self.safe_value(currency, 'details', {})
precision = self.safe_number(currency, 'max_precision')
status = self.safe_string(currency, 'status')
active = (status == 'online')
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': self.safe_string(details, 'type'),
'name': name,
'active': active,
'deposit': None,
'withdraw': None,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_number(details, 'min_size'),
'max': None,
},
'withdraw': {
'min': self.safe_number(details, 'min_withdrawal_amount'),
'max': None,
},
},
}
return result
def fetch_markets(self, params={}):
response = self.publicGetProducts(params)
#
# [
# {
# "id": "ZEC-BTC",
# "base_currency": "ZEC",
# "quote_currency": "BTC",
# "base_min_size": "0.0056",
# "base_max_size": "3600",
# "quote_increment": "0.000001",
# "base_increment": "0.0001",
# "display_name": "ZEC/BTC",
# "min_market_funds": "0.000016",
# "max_market_funds": "12",
# "margin_enabled": False,
# "fx_stablecoin": False,
# "max_slippage_percentage": "0.03000000",
# "post_only": False,
# "limit_only": False,
# "cancel_only": False,
# "trading_disabled": False,
# "status": "online",
# "status_message": "",
# "auction_mode": False
# },
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': self.safe_value(market, 'margin_enabled'),
'swap': False,
'future': False,
'option': False,
'active': (status == 'online'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'base_increment'),
'price': self.safe_number(market, 'quote_increment'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'base_min_size'),
'max': self.safe_number(market, 'base_max_size'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_market_funds'),
'max': self.safe_number(market, 'max_market_funds'),
},
},
'info': market,
}))
return result
def fetch_accounts(self, params={}):
self.load_markets()
response = self.privateGetAccounts(params)
#
# [
# {
# id: '4aac9c60-cbda-4396-9da4-4aa71e95fba0',
# currency: 'BTC',
# balance: '0.0000000000000000',
# available: '0',
# hold: '0.0000000000000000',
# profile_id: 'b709263e-f42a-4c7d-949a-a95c83d065da'
# },
# {
# id: 'f75fa69a-1ad1-4a80-bd61-ee7faa6135a3',
# currency: 'USDC',
# balance: '0.0000000000000000',
# available: '0',
# hold: '0.0000000000000000',
# profile_id: 'b709263e-f42a-4c7d-949a-a95c83d065da'
# },
# ]
#
result = []
for i in range(0, len(response)):
account = response[i]
accountId = self.safe_string(account, 'id')
currencyId = self.safe_string(account, 'currency')
code = self.safe_currency_code(currencyId)
result.append({
'id': accountId,
'type': None,
'currency': code,
'info': account,
})
return result
def parse_balance(self, response):
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'hold')
account['total'] = self.safe_string(balance, 'balance')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetAccounts(params)
return self.parse_balance(response)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
# level 1 - only the best bid and ask
# level 2 - top 50 bids and asks(aggregated)
# level 3 - full order book(non aggregated)
request = {
'id': self.market_id(symbol),
'level': 2, # 1 best bidask, 2 aggregated, 3 full
}
response = self.publicGetProductsIdBook(self.extend(request, params))
#
# {
# "sequence":1924393896,
# "bids":[
# ["0.01825","24.34811287",2],
# ["0.01824","72.5463",3],
# ["0.01823","424.54298049",6],
# ],
# "asks":[
# ["0.01826","171.10414904",4],
# ["0.01827","22.60427028",1],
# ["0.01828","397.46018784",7],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'sequence')
return orderbook
def parse_ticker(self, ticker, market=None):
#
# fetchTickers
#
# [
# 1639472400, # timestamp
# 4.26, # low
# 4.38, # high
# 4.35, # open
# 4.27 # close
# ]
#
# fetchTicker
#
# publicGetProductsIdTicker
#
# {
# "trade_id":843439,
# "price":"0.997999",
# "size":"80.29769",
# "time":"2020-01-28T02:13:33.012523Z",
# "bid":"0.997094",
# "ask":"0.998",
# "volume":"1903188.03750000"
# }
#
# publicGetProductsIdStats
#
# {
# "open": "34.19000000",
# "high": "95.70000000",
# "low": "7.06000000",
# "volume": "2.41000000"
# }
#
timestamp = None
bid = None
ask = None
last = None
high = None
low = None
open = None
volume = None
symbol = None if (market is None) else market['symbol']
if isinstance(ticker, list):
last = self.safe_string(ticker, 4)
timestamp = self.milliseconds()
else:
timestamp = self.parse8601(self.safe_value(ticker, 'time'))
bid = self.safe_string(ticker, 'bid')
ask = self.safe_string(ticker, 'ask')
high = self.safe_string(ticker, 'high')
low = self.safe_string(ticker, 'low')
open = self.safe_string(ticker, 'open')
last = self.safe_string_2(ticker, 'price', 'last')
volume = self.safe_string(ticker, 'volume')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': volume,
'quoteVolume': None,
'info': ticker,
}, market, False)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
response = self.publicGetProductsSparkLines(self.extend(request, params))
#
# {
# YYY-USD: [
# [
# 1639472400, # timestamp
# 4.26, # low
# 4.38, # high
# 4.35, # open
# 4.27 # close
# ],
# [
# 1639468800,
# 4.31,
# 4.45,
# 4.35,
# 4.35
# ],
# ]
# }
#
result = {}
marketIds = list(response.keys())
delimiter = '-'
for i in range(0, len(marketIds)):
marketId = marketIds[i]
entry = self.safe_value(response, marketId, [])
first = self.safe_value(entry, 0, [])
market = self.safe_market(marketId, None, delimiter)
symbol = market['symbol']
result[symbol] = self.parse_ticker(first, market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
# publicGetProductsIdTicker or publicGetProductsIdStats
method = self.safe_string(self.options, 'fetchTickerMethod', 'publicGetProductsIdTicker')
response = getattr(self, method)(self.extend(request, params))
#
# publicGetProductsIdTicker
#
# {
# "trade_id":843439,
# "price":"0.997999",
# "size":"80.29769",
# "time":"2020-01-28T02:13:33.012523Z",
# "bid":"0.997094",
# "ask":"0.998",
# "volume":"1903188.03750000"
# }
#
# publicGetProductsIdStats
#
# {
# "open": "34.19000000",
# "high": "95.70000000",
# "low": "7.06000000",
# "volume": "2.41000000"
# }
#
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
#
# {
# type: 'match',
# trade_id: 82047307,
# maker_order_id: '0f358725-2134-435e-be11-753912a326e0',
# taker_order_id: '252b7002-87a3-425c-ac73-f5b9e23f3caf',
# order_id: 'd50ec984-77a8-460a-b958-66f114b0de9b',
# side: 'sell',
# size: '0.00513192',
# price: '9314.78',
# product_id: 'BTC-USD',
# profile_id: '6244401d-c078-40d9-b305-7ad3551bc3b0',
# sequence: 12038915443,
# time: '2020-01-31T20:03:41.158814Z'
# created_at: '2014-11-07T22:19:28.578544Z',
# liquidity: 'T',
# fee: '0.00025',
# settled: True,
# usd_volume: '0.0924556000000000',
# user_id: '595eb864313c2b02ddf2937d'
# }
#
timestamp = self.parse8601(self.safe_string_2(trade, 'time', 'created_at'))
marketId = self.safe_string(trade, 'product_id')
market = self.safe_market(marketId, market, '-')
feeRate = None
takerOrMaker = None
cost = None
feeCurrencyId = self.safe_string_lower(market, 'quoteId')
if feeCurrencyId is not None:
costField = feeCurrencyId + '_value'
cost = self.safe_string(trade, costField)
liquidity = self.safe_string(trade, 'liquidity')
if liquidity is not None:
takerOrMaker = 'taker' if (liquidity == 'T') else 'maker'
feeRate = self.safe_string(market, takerOrMaker)
feeCost = self.safe_string_2(trade, 'fill_fees', 'fee')
fee = {
'cost': feeCost,
'currency': market['quote'],
'rate': feeRate,
}
id = self.safe_string(trade, 'trade_id')
side = 'sell' if (trade['side'] == 'buy') else 'buy'
orderId = self.safe_string(trade, 'order_id')
# Coinbase Pro returns inverted side to fetchMyTrades vs fetchTrades
makerOrderId = self.safe_string(trade, 'maker_order_id')
takerOrderId = self.safe_string(trade, 'taker_order_id')
if (orderId is not None) or ((makerOrderId is not None) and (takerOrderId is not None)):
side = 'buy' if (trade['side'] == 'buy') else 'sell'
price = self.safe_string(trade, 'price')
amount = self.safe_string(trade, 'size')
return self.safe_trade({
'id': id,
'order': orderId,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'fee': fee,
'cost': cost,
}, market)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
# as of 2018-08-23
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['id'],
}
if limit is not None:
request['limit'] = limit
response = self.privateGetFills(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'], # fixes issue #2
}
if limit is not None:
request['limit'] = limit # default 100
response = self.publicGetProductsIdTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1591514160,
# 0.02507,
# 0.02507,
# 0.02507,
# 0.02507,
# 0.02816506
# ]
#
return [
self.safe_timestamp(ohlcv, 0),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
granularity = self.timeframes[timeframe]
request = {
'id': market['id'],
'granularity': granularity,
}
if since is not None:
request['start'] = self.iso8601(since)
if limit is None:
# https://docs.pro.coinbase.com/#get-historic-rates
limit = 300 # max = 300
else:
limit = min(300, limit)
request['end'] = self.iso8601(self.sum((limit - 1) * granularity * 1000, since))
response = self.publicGetProductsIdCandles(self.extend(request, params))
#
# [
# [1591514160,0.02507,0.02507,0.02507,0.02507,0.02816506],
# [1591514100,0.02507,0.02507,0.02507,0.02507,1.63830323],
# [1591514040,0.02505,0.02507,0.02505,0.02507,0.19918178]
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_time(self, params={}):
response = self.publicGetTime(params)
#
# {
# "iso":"2020-05-12T08:00:51.504Z",
# "epoch":1589270451.504
# }
#
return self.safe_timestamp(response, 'epoch')
def parse_order_status(self, status):
statuses = {
'pending': 'open',
'active': 'open',
'open': 'open',
'done': 'closed',
'canceled': 'canceled',
'canceling': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
# "price": "0.10000000",
# "size": "0.01000000",
# "product_id": "BTC-USD",
# "side": "buy",
# "stp": "dc",
# "type": "limit",
# "time_in_force": "GTC",
# "post_only": False,
# "created_at": "2016-12-08T20:02:28.53864Z",
# "fill_fees": "0.0000000000000000",
# "filled_size": "0.00000000",
# "executed_value": "0.0000000000000000",
# "status": "pending",
# "settled": False
# }
#
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
marketId = self.safe_string(order, 'product_id')
market = self.safe_market(marketId, market, '-')
status = self.parse_order_status(self.safe_string(order, 'status'))
doneReason = self.safe_string(order, 'done_reason')
if (status == 'closed') and (doneReason == 'canceled'):
status = 'canceled'
price = self.safe_string(order, 'price')
filled = self.safe_string(order, 'filled_size')
amount = self.safe_string(order, 'size', filled)
cost = self.safe_string(order, 'executed_value')
feeCost = self.safe_number(order, 'fill_fees')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': market['quote'],
'rate': None,
}
id = self.safe_string(order, 'id')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
timeInForce = self.safe_string(order, 'time_in_force')
postOnly = self.safe_value(order, 'post_only')
stopPrice = self.safe_number(order, 'stop_price')
clientOrderId = self.safe_string(order, 'client_oid')
return self.safe_order({
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': market['symbol'],
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': None,
'fee': fee,
'average': None,
'trades': None,
}, market)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_oid')
method = None
if clientOrderId is None:
method = 'privateGetOrdersId'
request['id'] = id
else:
method = 'privateGetOrdersClientClientOid'
request['client_oid'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client_oid'])
response = getattr(self, method)(self.extend(request, params))
return self.parse_order(response)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'order_id': id,
}
response = self.privateGetFills(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 'all',
}
return self.fetch_open_orders(symbol, since, limit, self.extend(request, params))
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
if limit is not None:
request['limit'] = limit # default 100
response = self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 'done',
}
return self.fetch_open_orders(symbol, since, limit, self.extend(request, params))
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
# common params --------------------------------------------------
# 'client_oid': clientOrderId,
'type': type,
'side': side,
'product_id': market['id'],
# 'size': self.amount_to_precision(symbol, amount),
# 'stp': 'dc', # self-trade prevention, dc = decrease and cancel, co = cancel oldest, cn = cancel newest, cb = cancel both
# 'stop': 'loss', # "loss" = stop loss below price, "entry" = take profit above price
# 'stop_price': self.price_to_precision(symbol, price),
# limit order params ---------------------------------------------
# 'price': self.price_to_precision(symbol, price),
# 'size': self.amount_to_precision(symbol, amount),
# 'time_in_force': 'GTC', # GTC, GTT, IOC, or FOK
# 'cancel_after' [optional]* min, hour, day, requires time_in_force to be GTT
# 'post_only': False, # invalid when time_in_force is IOC or FOK
# market order params --------------------------------------------
# 'size': self.amount_to_precision(symbol, amount),
# 'funds': self.cost_to_precision(symbol, amount),
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_oid')
if clientOrderId is not None:
request['client_oid'] = clientOrderId
stopPrice = self.safe_number_2(params, 'stopPrice', 'stop_price')
if stopPrice is not None:
request['stop_price'] = self.price_to_precision(symbol, stopPrice)
timeInForce = self.safe_string_2(params, 'timeInForce', 'time_in_force')
if timeInForce is not None:
request['time_in_force'] = timeInForce
postOnly = self.safe_value_2(params, 'postOnly', 'post_only', False)
if postOnly:
request['post_only'] = True
params = self.omit(params, ['timeInForce', 'time_in_force', 'stopPrice', 'stop_price', 'clientOrderId', 'client_oid', 'postOnly', 'post_only'])
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['size'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
cost = self.safe_number_2(params, 'cost', 'funds')
if cost is None:
if price is not None:
cost = amount * price
else:
params = self.omit(params, ['cost', 'funds'])
if cost is not None:
request['funds'] = self.cost_to_precision(symbol, cost)
else:
request['size'] = self.amount_to_precision(symbol, amount)
response = self.privatePostOrders(self.extend(request, params))
#
# {
# "id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
# "price": "0.10000000",
# "size": "0.01000000",
# "product_id": "BTC-USD",
# "side": "buy",
# "stp": "dc",
# "type": "limit",
# "time_in_force": "GTC",
# "post_only": False,
# "created_at": "2016-12-08T20:02:28.53864Z",
# "fill_fees": "0.0000000000000000",
# "filled_size": "0.00000000",
# "executed_value": "0.0000000000000000",
# "status": "pending",
# "settled": False
# }
#
return self.parse_order(response, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
# 'product_id': market['id'], # the request will be more performant if you include it
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_oid')
method = None
if clientOrderId is None:
method = 'privateDeleteOrdersId'
request['id'] = id
else:
method = 'privateDeleteOrdersClientClientOid'
request['client_oid'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client_oid'])
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['symbol'] # the request will be more performant if you include it
return getattr(self, method)(self.extend(request, params))
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['symbol'] # the request will be more performant if you include it
return self.privateDeleteOrders(self.extend(request, params))
def fetch_payment_methods(self, params={}):
return self.privateGetPaymentMethods(params)
def deposit(self, code, amount, address, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
}
method = 'privatePostDeposits'
if 'payment_method_id' in params:
# deposit from a payment_method, like a bank account
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
# deposit into Coinbase Pro account from a Coinbase account
method += 'CoinbaseAccount'
else:
# deposit methodotherwise we did not receive a supported deposit location
# relevant docs link for the Googlers
# https://docs.pro.coinbase.com/#deposits
raise NotSupported(self.id + ' deposit() requires one of `coinbase_account_id` or `payment_method_id` extra params')
response = getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' deposit() error: ' + self.json(response))
return {
'info': response,
'id': response['id'],
}
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
}
method = 'privatePostWithdrawals'
if 'payment_method_id' in params:
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
method += 'CoinbaseAccount'
else:
method += 'Crypto'
request['crypto_address'] = address
if tag is not None:
request['destination_tag'] = tag
response = getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' withdraw() error: ' + self.json(response))
return {
'info': response,
'id': response['id'],
}
def parse_ledger_entry_type(self, type):
types = {
'transfer': 'transfer', # Funds moved between portfolios
'match': 'trade', # Funds moved as a result of a trade
'fee': 'fee', # Fee as a result of a trade
'rebate': 'rebate', # Fee rebate
'conversion': 'trade', # Funds converted between fiat currency and a stablecoin
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
# {
# id: '12087495079',
# amount: '-0.0100000000000000',
# balance: '0.0645419900000000',
# created_at: '2021-10-28T17:14:32.593168Z',
# type: 'transfer',
# details: {
# from: '2f74edf7-1440-4586-86dc-ae58c5693691',
# profile_transfer_id: '3ef093ad-2482-40d1-8ede-2f89cff5099e',
# to: 'dda99503-4980-4b60-9549-0b770ee51336'
# }
# },
# {
# id: '11740725774',
# amount: '-1.7565669701255000',
# balance: '0.0016490047745000',
# created_at: '2021-10-22T03:47:34.764122Z',
# type: 'fee',
# details: {
# order_id: 'ad06abf4-95ab-432a-a1d8-059ef572e296',
# product_id: 'ETH-DAI',
# trade_id: '1740617'
# }
# }
id = self.safe_string(item, 'id')
amountString = self.safe_string(item, 'amount')
direction = None
afterString = self.safe_string(item, 'balance')
beforeString = Precise.string_sub(afterString, amountString)
if Precise.string_lt(amountString, '0'):
direction = 'out'
amountString = Precise.string_abs(amountString)
else:
direction = 'in'
amount = self.parse_number(amountString)
after = self.parse_number(afterString)
before = self.parse_number(beforeString)
timestamp = self.parse8601(self.safe_value(item, 'created_at'))
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(None, currency)
details = self.safe_value(item, 'details', {})
account = None
referenceAccount = None
referenceId = None
if type == 'transfer':
account = self.safe_string(details, 'from')
referenceAccount = self.safe_string(details, 'to')
referenceId = self.safe_string(details, 'profile_transfer_id')
else:
referenceId = self.safe_string(details, 'order_id')
status = 'ok'
return {
'id': id,
'currency': code,
'account': account,
'referenceAccount': referenceAccount,
'referenceId': referenceId,
'status': status,
'amount': amount,
'before': before,
'after': after,
'fee': None,
'direction': direction,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'type': type,
'info': item,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
# https://docs.cloud.coinbase.com/exchange/reference/exchangerestapi_getaccountledger
if code is None:
raise ArgumentsRequired(self.id + ' fetchLedger() requires a code param')
self.load_markets()
self.load_accounts()
currency = self.currency(code)
accountsByCurrencyCode = self.index_by(self.accounts, 'currency')
account = self.safe_value(accountsByCurrencyCode, code)
if account is None:
raise ExchangeError(self.id + ' fetchLedger() could not find account id for ' + code)
request = {
'id': account['id'],
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(self.milliseconds()),
# 'before': 'cursor', # sets start cursor to before date
# 'after': 'cursor', # sets end cursor to after date
# 'limit': limit, # default 100
# 'profile_id': 'string'
}
if since is not None:
request['start_date'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit # default 100
response = self.privateGetAccountsIdLedger(self.extend(request, params))
for i in range(0, len(response)):
response[i]['currency'] = code
return self.parse_ledger(response, currency, since, limit)
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
self.load_accounts()
currency = None
id = self.safe_string(params, 'id') # account id
if id is None:
if code is not None:
currency = self.currency(code)
accountsByCurrencyCode = self.index_by(self.accounts, 'currency')
account = self.safe_value(accountsByCurrencyCode, code)
if account is None:
raise ExchangeError(self.id + ' fetchTransactions() could not find account id for ' + code)
id = account['id']
request = {}
if id is not None:
request['id'] = id
if limit is not None:
request['limit'] = limit
response = None
if id is None:
response = self.privateGetTransfers(self.extend(request, params))
for i in range(0, len(response)):
account_id = self.safe_string(response[i], 'account_id')
account = self.safe_value(self.accountsById, account_id)
code = self.safe_string(account, 'currency')
response[i]['currency'] = code
else:
response = self.privateGetAccountsIdTransfers(self.extend(request, params))
for i in range(0, len(response)):
response[i]['currency'] = code
return self.parse_transactions(response, currency, since, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions(code, since, limit, self.extend({'type': 'deposit'}, params))
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions(code, since, limit, self.extend({'type': 'withdraw'}, params))
def parse_transaction_status(self, transaction):
canceled = self.safe_value(transaction, 'canceled_at')
if canceled:
return 'canceled'
processed = self.safe_value(transaction, 'processed_at')
completed = self.safe_value(transaction, 'completed_at')
if completed:
return 'ok'
elif processed and not completed:
return 'failed'
else:
return 'pending'
def parse_transaction(self, transaction, currency=None):
details = self.safe_value(transaction, 'details', {})
id = self.safe_string(transaction, 'id')
txid = self.safe_string(details, 'crypto_transaction_hash')
timestamp = self.parse8601(self.safe_string(transaction, 'created_at'))
updated = self.parse8601(self.safe_string(transaction, 'processed_at'))
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(transaction)
amount = self.safe_number(transaction, 'amount')
type = self.safe_string(transaction, 'type')
address = self.safe_string(details, 'crypto_address')
tag = self.safe_string(details, 'destination_tag')
address = self.safe_string(transaction, 'crypto_address', address)
fee = None
if type == 'withdraw':
type = 'withdrawal'
address = self.safe_string(details, 'sent_to_address', address)
feeCost = self.safe_number(details, 'fee')
if feeCost is not None:
if amount is not None:
amount -= feeCost
fee = {
'cost': feeCost,
'currency': code,
}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def create_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
accounts = self.safe_value(self.options, 'coinbaseAccounts')
if accounts is None:
accounts = self.privateGetCoinbaseAccounts()
self.options['coinbaseAccounts'] = accounts # cache it
self.options['coinbaseAccountsByCurrencyId'] = self.index_by(accounts, 'currency')
currencyId = currency['id']
account = self.safe_value(self.options['coinbaseAccountsByCurrencyId'], currencyId)
if account is None:
# eslint-disable-next-line quotes
raise InvalidAddress(self.id + " fetchDepositAddress() could not find currency code " + code + " with id = " + currencyId + " in self.options['coinbaseAccountsByCurrencyId']")
request = {
'id': account['id'],
}
response = self.privatePostCoinbaseAccountsIdAddresses(self.extend(request, params))
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'destination_tag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
request += '?' + self.urlencode(query)
url = self.implode_hostname(self.urls['api'][api]) + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
payload = ''
if method != 'GET':
if query:
body = self.json(query)
payload = body
what = nonce + method + request + payload
secret = None
try:
secret = self.base64_to_binary(self.secret)
except Exception as e:
raise AuthenticationError(self.id + ' sign() invalid base64 secret')
signature = self.hmac(self.encode(what), secret, hashlib.sha256, 'base64')
headers = {
'CB-ACCESS-KEY': self.apiKey,
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': nonce,
'CB-ACCESS-PASSPHRASE': self.password,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if (code == 400) or (code == 404):
if body[0] == '{':
message = self.safe_string(response, 'message')
feedback = self.id + ' ' + message
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
raise ExchangeError(self.id + ' ' + body)
def request(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
response = self.fetch2(path, api, method, params, headers, body, config, context)
if not isinstance(response, basestring):
if 'message' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
ccxt/ccxt
|
python/ccxt/coinbasepro.py
|
Python
|
mit
| 53,861 | 0.00104 |
from django.template import Library
from django.utils.functional import Promise
import json
register = Library()
@register.filter
def json_dumps(json_object):
if isinstance(json_object, Promise):
json_object = dict(json_object)
return json.dumps(json_object)
@register.filter
def json_dumps_pretty(json_object):
if isinstance(json_object, Promise):
json_object = dict(json_object)
return json.dumps(json_object, indent=4, separators=(',', ': '))
|
DylanMcCall/stuartmccall.ca
|
common/templatetags/json_tools.py
|
Python
|
mit
| 482 | 0.004149 |
# Copyright (c) 2014, The Boovix authors that are listed
# in the AUTHORS file. All rights reserved. Use of this
# source code is governed by the BSD 3-clause license that
# can be found in the LICENSE file.
"""
Function annotations in Python 3:
http://legacy.python.org/dev/peps/pep-3107/
Type checking in Python 3:
* http://code.activestate.com/recipes/578528
* http://stackoverflow.com/questions/1275646
mypy static type checking during compilation:
https://mail.python.org/pipermail/python-ideas/2014-August/028618.html
from typing import List, Dict
def word_count(input: List[str]) -> Dict[str, int]:
result = {} #type: Dict[str, int]
for line in input:
for word in line.split():
result[word] = result.get(word, 0) + 1
return result
Note that the #type: comment is part of the mypy syntax
"""
|
cztomczak/boovix
|
boovix1/utils/static_types.py
|
Python
|
bsd-3-clause
| 868 | 0 |
# -*- coding: utf-8 -*-
REPO_BACKENDS = {}
REPO_TYPES = []
class RepositoryTypeNotAvailable(Exception):
pass
try:
from brigitte.backends import libgit
REPO_BACKENDS['git'] = libgit.Repo
REPO_TYPES.append(('git', 'GIT'))
except ImportError:
from brigitte.backends import git
REPO_BACKENDS['git'] = git.Repo
REPO_TYPES.append(('git', 'GIT'))
try:
from brigitte.backends import hg
REPO_BACKENDS['hg'] = hg.Repo
REPO_TYPES.append(('hg', 'Mercurial'))
except ImportError:
pass
def get_backend(repo_type):
if not repo_type in REPO_BACKENDS:
raise RepositoryTypeNotAvailable(repo_type)
return REPO_BACKENDS[repo_type]
|
stephrdev/brigitte
|
brigitte/backends/__init__.py
|
Python
|
bsd-3-clause
| 680 | 0.001471 |
# -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import os
# pip install sphinx_rtd_theme
# import sphinx_rtd_theme
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.append(os.path.abspath('some/directory'))
#
sys.path.insert(0, os.path.join('ansible', 'lib'))
sys.path.append(os.path.abspath(os.path.join('..', '_extensions')))
# We want sphinx to document the ansible modules contained in this repository,
# not those that may happen to be installed in the version
# of Python used to run sphinx. When sphinx loads in order to document,
# the repository version needs to be the one that is loaded:
sys.path.insert(0, os.path.abspath(os.path.join('..', '..', '..', 'lib')))
VERSION = '2.10'
AUTHOR = 'Ansible, Inc'
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# TEST: 'sphinxcontrib.fulltoc'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments_lexer', 'notfound.extension']
# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
# colorized code generated too for references.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Ansible'
copyright = "2021 Red Hat, Inc."
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
# exclude_dirs = []
# A list of glob-style patterns that should be excluded when looking
# for source files.
exclude_patterns = [
'2.10_index.rst',
'ansible_index.rst',
'core_index.rst',
'porting_guides/core_porting_guides',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'YAML+Jinja'
# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything.
# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_:
# |br| is useful for formatting fields inside of tables
# |_| is a nonbreaking space; similarly useful inside of tables
rst_epilog = """
.. |br| raw:: html
<br>
.. |_| unicode:: 0xA0
:trim:
"""
# Options for HTML output
# -----------------------
html_theme_path = ['../_themes']
html_theme = 'sphinx_rtd_theme'
html_short_title = 'Ansible Documentation'
html_show_sphinx = False
html_theme_options = {
'canonical_url': "https://docs.ansible.com/ansible/latest/",
'vcs_pageview_mode': 'edit'
}
html_context = {
'display_github': 'True',
'github_user': 'ansible',
'github_repo': 'ansible',
'github_version': 'devel/docs/docsite/rst/',
'github_module_version': 'devel/lib/ansible/modules/',
'github_root_dir': 'devel/lib/ansible',
'github_cli_version': 'devel/lib/ansible/cli/',
'current_version': version,
'latest_version': '2.10',
# list specifically out of order to make latest work
'available_versions': ('latest', '2.9', '2.9_ja', '2.8', 'devel'),
'css_files': ('_static/ansible.css', # overrides to the standard theme
),
}
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'solar.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Ansible Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
# html_logo =
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = 'https://docs.ansible.com/ansible/latest'
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poseidodoc'
# Configuration for sphinx-notfound-pages
# with no 'notfound_template' and no 'notfound_context' set,
# the extension builds 404.rst into a location-agnostic 404 page
#
# default is `en` - using this for the sub-site:
notfound_default_language = "ansible"
# default is `latest`:
# setting explicitly - docsite serves up /ansible/latest/404.html
# so keep this set to `latest` even on the `devel` branch
# then no maintenance is needed when we branch a new stable_x.x
notfound_default_version = "latest"
# makes default setting explicit:
notfound_no_urls_prefix = False
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
autoclass_content = 'both'
# Note: Our strategy for intersphinx mappings is to have the upstream build location as the
# canonical source and then cached copies of the mapping stored locally in case someone is building
# when disconnected from the internet. We then have a script to update the cached copies.
#
# Because of that, each entry in this mapping should have this format:
# name: ('http://UPSTREAM_URL', (None, 'path/to/local/cache.inv'))
#
# The update script depends on this format so deviating from this (for instance, adding a third
# location for the mappning to live) will confuse it.
intersphinx_mapping = {'python': ('https://docs.python.org/2/', (None, '../python2.inv')),
'python3': ('https://docs.python.org/3/', (None, '../python3.inv')),
'jinja2': ('http://jinja.palletsprojects.com/', (None, '../jinja2.inv')),
'ansible_2_10': ('https://docs.ansible.com/ansible/2.10/', (None, '../ansible_2_10.inv')),
'ansible_2_9': ('https://docs.ansible.com/ansible/2.9/', (None, '../ansible_2_9.inv')),
'ansible_2_8': ('https://docs.ansible.com/ansible/2.8/', (None, '../ansible_2_8.inv')),
'ansible_2_7': ('https://docs.ansible.com/ansible/2.7/', (None, '../ansible_2_7.inv')),
'ansible_2_6': ('https://docs.ansible.com/ansible/2.6/', (None, '../ansible_2_6.inv')),
'ansible_2_5': ('https://docs.ansible.com/ansible/2.5/', (None, '../ansible_2_5.inv')),
}
# linckchecker settings
linkcheck_ignore = [
r'http://irc\.freenode\.net',
]
linkcheck_workers = 25
# linkcheck_anchors = False
|
dmsimard/ansible
|
docs/docsite/sphinx_conf/2.10_conf.py
|
Python
|
gpl-3.0
| 10,553 | 0.001516 |
"""
WSGI config for twitter-tools project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "twitter-tools.settings")
application = get_wsgi_application()
|
jparicka/twitter-tools
|
twitter-tools/wsgi.py
|
Python
|
mit
| 403 | 0 |
# Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
class XmlElementHandler(object):
def __init__(self, execution_result, root_handler=None):
self._stack = [(execution_result, root_handler or RootHandler())]
def start(self, elem):
result, handler = self._stack[-1]
self._stack.append(handler.handle_child(elem, result))
def end(self, elem):
result, handler = self._stack.pop()
handler.end(elem, result)
class _Handler(object):
def __init__(self):
self._child_map = dict((c.tag, c) for c in self._children())
def _children(self):
return []
def handle_child(self, elem, result):
try:
handler = self._child_map[elem.tag]
except KeyError:
raise DataError("Incompatible XML element '%s'" % elem.tag)
return handler.start(elem, result), handler
def start(self, elem, result):
return result
def end(self, elem, result):
pass
def _timestamp(self, elem, attr_name):
timestamp = elem.get(attr_name)
return timestamp if timestamp != 'N/A' else None
class RootHandler(_Handler):
def _children(self):
return [RobotHandler()]
class RobotHandler(_Handler):
tag = 'robot'
def start(self, elem, result):
generator = elem.get('generator', 'unknown').split()[0].upper()
result.generated_by_robot = generator == 'ROBOT'
return result
def _children(self):
return [RootSuiteHandler(), StatisticsHandler(), ErrorsHandler()]
class SuiteHandler(_Handler):
tag = 'suite'
def start(self, elem, result):
return result.suites.create(name=elem.get('name'),
source=elem.get('source', ''))
def _children(self):
return [DocHandler(), MetadataHandler(), SuiteStatusHandler(),
KeywordHandler(), TestCaseHandler(), self]
class RootSuiteHandler(SuiteHandler):
def start(self, elem, result):
result.suite.name = elem.get('name')
result.suite.source = elem.get('source')
return result.suite
def _children(self):
return SuiteHandler._children(self)[:-1] + [SuiteHandler()]
class TestCaseHandler(_Handler):
tag = 'test'
def start(self, elem, result):
return result.tests.create(name=elem.get('name'),
timeout=elem.get('timeout'))
def _children(self):
return [DocHandler(), TagsHandler(), TestStatusHandler(), KeywordHandler()]
class KeywordHandler(_Handler):
tag = 'kw'
def start(self, elem, result):
return result.keywords.create(name=elem.get('name'),
timeout=elem.get('timeout'),
type=elem.get('type'))
def _children(self):
return [DocHandler(), ArgumentsHandler(), KeywordStatusHandler(),
MessageHandler(), self]
class MessageHandler(_Handler):
tag = 'msg'
def end(self, elem, result):
result.messages.create(elem.text or '',
elem.get('level'),
elem.get('html', 'no') == 'yes',
self._timestamp(elem, 'timestamp'))
class _StatusHandler(_Handler):
tag = 'status'
def _set_status(self, elem, result):
result.status = elem.get('status', 'FAIL')
def _set_message(self, elem, result):
result.message = elem.text or ''
def _set_times(self, elem, result):
result.starttime = self._timestamp(elem, 'starttime')
result.endtime = self._timestamp(elem, 'endtime')
class KeywordStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_status(elem, result)
self._set_times(elem, result)
if result.type == result.TEARDOWN_TYPE:
self._set_message(elem, result)
class SuiteStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_message(elem, result)
self._set_times(elem, result)
class TestStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_status(elem, result)
self._set_message(elem, result)
self._set_times(elem, result)
class DocHandler(_Handler):
tag = 'doc'
def end(self, elem, result):
result.doc = elem.text or ''
class MetadataHandler(_Handler):
tag = 'metadata'
def _children(self):
return [MetadataItemHandler()]
class MetadataItemHandler(_Handler):
tag = 'item'
def end(self, elem, result):
result.metadata[elem.get('name')] = elem.text or ''
class TagsHandler(_Handler):
tag = 'tags'
def _children(self):
return [TagHandler()]
class TagHandler(_Handler):
tag = 'tag'
def end(self, elem, result):
result.tags.add(elem.text or '')
class ArgumentsHandler(_Handler):
tag = 'arguments'
def _children(self):
return [ArgumentHandler()]
class ArgumentHandler(_Handler):
tag = 'arg'
def end(self, elem, result):
result.args += (elem.text or '',)
class ErrorsHandler(_Handler):
tag = 'errors'
def start(self, elem, result):
return result.errors
def _children(self):
return [MessageHandler()]
class StatisticsHandler(_Handler):
tag = 'statistics'
def handle_child(self, elem, result):
return result, self
|
ktan2020/legacy-automation
|
win/Lib/site-packages/robot/result/xmlelementhandlers.py
|
Python
|
mit
| 6,002 | 0.000167 |
#!/usr/bin/env python
# ~*~ encoding: utf-8 ~*~
"""
This file is part of SOCSIM.
SOCSIM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SOCSIM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SOCSIM. If not, see <http://www.gnu.org/licenses/>.
"""
#===
# name input.py
# date: 2013AUG10
# prog: pr
# desc: input data from ini, export to meta & record
# copy: copyright (C) 2013 Peter Renshaw
#===
import configparser
import record
# ---
# read file
# generate data
# build meta
#
# build temp record of meta
# save to file
# ---
# * extact
# - sections
# - key, values
# * organise
# - meta
# - others
class ini:
def __init__(self, data, config):
self.data = data
self.store = []
self.c = config
def build(self):
"""build up store"""
if self.data:
if self.meta():
if self.sections():
return True
return False
def meta(self):
"""parse meta section"""
if 'meta' in self.c.sections():
meta = self.c.sections()[0]
self.store.append(meta)
return True
return False
def section(self, name):
"""parse info section by name"""
print(dir(name))
return True
def sections(self):
"""extract all sections"""
for section in self.c.sections():
self.section(section)
return True
def all(self):
"""return all sections in store"""
return self.store
def main():
pass
if __name__ == "__main__":
main()
# vim: ff=unix:ts=4:sw=4:tw=78:noai:expandtab
|
peterrenshaw/socsim
|
socsim/misc/extract.py
|
Python
|
gpl-3.0
| 2,087 | 0.004792 |
#!/usr/bin/python
# Copyright (C) 2014 Alex Nitz, Andrew Miller, Tito Dal Canton
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import copy
from glue import segments
from pycbc.psd.read import *
from pycbc.psd.analytical import *
from pycbc.psd.estimate import *
from pycbc.psd.variation import *
from pycbc.types import float32,float64
from pycbc.types import MultiDetOptionAppendAction, MultiDetOptionAction
from pycbc.types import copy_opts_for_single_ifo
from pycbc.types import required_opts, required_opts_multi_ifo
from pycbc.types import ensure_one_opt, ensure_one_opt_multi_ifo
def from_cli(opt, length, delta_f, low_frequency_cutoff,
strain=None, dyn_range_factor=1, precision=None):
"""Parses the CLI options related to the noise PSD and returns a
FrequencySeries with the corresponding PSD. If necessary, the PSD is
linearly interpolated to achieve the resolution specified in the CLI.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length,
psd_output).
length : int
The length in samples of the output PSD.
delta_f : float
The frequency step of the output PSD.
low_frequency_cutoff: float
The low frequncy cutoff to use when calculating the PSD.
strain : {None, TimeSeries}
Time series containing the data from which the PSD should be measured,
when psd_estimation is in use.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
Returns
-------
psd : FrequencySeries
The frequency series containing the PSD.
"""
f_low = low_frequency_cutoff
sample_rate = int((length -1) * 2 * delta_f)
try:
psd_estimation = opt.psd_estimation is not None
except AttributeError:
psd_estimation = False
exclusive_opts = [opt.psd_model, opt.psd_file, opt.asd_file,
psd_estimation]
if sum(map(bool, exclusive_opts)) != 1:
err_msg = "You must specify exactly one of '--psd-file', "
err_msg += "'--psd-model', '--asd-file', '--psd-estimation'"
raise ValueError(err_msg)
if (opt.psd_model or opt.psd_file or opt.asd_file):
# PSD from lalsimulation or file
if opt.psd_model:
psd = from_string(opt.psd_model, length, delta_f, f_low)
elif opt.psd_file or opt.asd_file:
if opt.asd_file:
psd_file_name = opt.asd_file
else:
psd_file_name = opt.psd_file
if psd_file_name.endswith(('.dat', '.txt')):
is_asd_file = bool(opt.asd_file)
psd = from_txt(psd_file_name, length,
delta_f, f_low, is_asd_file=is_asd_file)
elif opt.asd_file:
err_msg = "ASD files are only valid as ASCII files (.dat or "
err_msg += ".txt). Supplied {}.".format(psd_file_name)
elif psd_file_name.endswith(('.xml', '.xml.gz')):
psd = from_xml(psd_file_name, length, delta_f, f_low,
ifo_string=opt.psd_file_xml_ifo_string,
root_name=opt.psd_file_xml_root_name)
# Set values < flow to the value at flow
kmin = int(low_frequency_cutoff / psd.delta_f)
psd[0:kmin] = psd[kmin]
psd *= dyn_range_factor ** 2
elif psd_estimation:
# estimate PSD from data
psd = welch(strain, avg_method=opt.psd_estimation,
seg_len=int(opt.psd_segment_length * sample_rate),
seg_stride=int(opt.psd_segment_stride * sample_rate),
num_segments=opt.psd_num_segments,
require_exact_data_fit=False)
if delta_f != psd.delta_f:
psd = interpolate(psd, delta_f)
else:
# Shouldn't be possible to get here
raise ValueError("Shouldn't be possible to raise this!")
if opt.psd_inverse_length:
psd = inverse_spectrum_truncation(psd,
int(opt.psd_inverse_length * sample_rate),
low_frequency_cutoff=f_low)
if hasattr(opt, 'psd_output') and opt.psd_output:
(psd.astype(float64) / (dyn_range_factor ** 2)).save(opt.psd_output)
if precision is None:
return psd
elif precision == 'single':
return psd.astype(float32)
elif precision == 'double':
return psd.astype(float64)
else:
err_msg = "If provided the precision kwarg must be either 'single' "
err_msg += "or 'double'. You provided %s." %(precision)
raise ValueError(err_msg)
def from_cli_single_ifo(opt, length, delta_f, low_frequency_cutoff, ifo,
**kwargs):
"""
Get the PSD for a single ifo when using the multi-detector CLI
"""
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
return from_cli(single_det_opt, length, delta_f, low_frequency_cutoff,
**kwargs)
def from_cli_multi_ifos(opt, length_dict, delta_f_dict,
low_frequency_cutoff_dict, ifos, strain_dict=None,
**kwargs):
"""
Get the PSD for all ifos when using the multi-detector CLI
"""
psd = {}
for ifo in ifos:
if strain_dict is not None:
strain = strain_dict[ifo]
else:
strain = None
psd[ifo] = from_cli_single_ifo(opt, length_dict[ifo], delta_f_dict[ifo],
low_frequency_cutoff_dict[ifo], ifo,
strain=strain, **kwargs)
return psd
def insert_psd_option_group(parser, output=True, include_data_options=True):
"""
Adds the options used to call the pycbc.psd.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
"""
psd_options = parser.add_argument_group(
"Options to select the method of PSD generation",
"The options --psd-model, --psd-file, --asd-file, "
"and --psd-estimation are mutually exclusive.")
psd_options.add_argument("--psd-model",
help="Get PSD from given analytical model. ",
choices=get_psd_model_list())
psd_options.add_argument("--psd-file",
help="Get PSD using given PSD ASCII file")
psd_options.add_argument("--asd-file",
help="Get PSD using given ASD ASCII file")
psd_options.add_argument("--psd-inverse-length", type=float,
help="(Optional) The maximum length of the "
"impulse response of the overwhitening "
"filter (s)")
# Options specific to XML PSD files
psd_options.add_argument("--psd-file-xml-ifo-string",
help="If using an XML PSD file, use the PSD in "
"the file's PSD dictionary with this "
"ifo string. If not given and only one "
"PSD present in the file return that, if "
"not given and multiple (or zero) PSDs "
"present an exception will be raised.")
psd_options.add_argument("--psd-file-xml-root-name", default='psd',
help="If given use this as the root name for "
"the PSD XML file. If this means nothing "
"to you, then it is probably safe to "
"ignore this option.")
# Options for PSD variation
psd_options.add_argument("--psdvar_short_segment", type=float,
metavar="SECONDS", help="Length of short segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar_long_segment", type=float,
metavar="SECONDS", help="Length of long segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar_overlap", type=float, metavar="SECONDS", help="Sample length of the PSD.")
psd_options.add_argument("--psdvar_low_freq", type=float, metavar="HERTZ", help="Minimum frequency to consider in PSD "
"comparison.")
psd_options.add_argument("--psdvar_high_freq", type=float, metavar="HERTZ", help="Maximum frequency to consider in PSD "
"comparison.")
if include_data_options :
psd_options.add_argument("--psd-estimation",
help="Measure PSD from the data, using "
"given average method.",
choices=["mean", "median", "median-mean"])
psd_options.add_argument("--psd-segment-length", type=float,
help="(Required for --psd-estimation) The "
"segment length for PSD estimation (s)")
psd_options.add_argument("--psd-segment-stride", type=float,
help="(Required for --psd-estimation) "
"The separation between consecutive "
"segments (s)")
psd_options.add_argument("--psd-num-segments", type=int, default=None,
help="(Optional, used only with "
"--psd-estimation). If given, PSDs will "
"be estimated using only this number of "
"segments. If more data is given than "
"needed to make this number of segments "
"then excess data will not be used in "
"the PSD estimate. If not enough data "
"is given, the code will fail.")
if output:
psd_options.add_argument("--psd-output",
help="(Optional) Write PSD to specified file")
return psd_options
def insert_psd_option_group_multi_ifo(parser):
"""
Adds the options used to call the pycbc.psd.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
"""
psd_options = parser.add_argument_group(
"Options to select the method of PSD generation",
"The options --psd-model, --psd-file, --asd-file, "
"and --psd-estimation are mutually exclusive.")
psd_options.add_argument("--psd-model", nargs="+",
action=MultiDetOptionAction, metavar='IFO:MODEL',
help="Get PSD from given analytical model. "
"Choose from %s" %(', '.join(get_psd_model_list()),))
psd_options.add_argument("--psd-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Get PSD using given PSD ASCII file")
psd_options.add_argument("--asd-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Get PSD using given ASD ASCII file")
psd_options.add_argument("--psd-estimation", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Measure PSD from the data, using given "
"average method. Choose from "
"mean, median or median-mean.")
psd_options.add_argument("--psd-segment-length", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:LENGTH',
help="(Required for --psd-estimation) The segment "
"length for PSD estimation (s)")
psd_options.add_argument("--psd-segment-stride", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:STRIDE',
help="(Required for --psd-estimation) The separation"
" between consecutive segments (s)")
psd_options.add_argument("--psd-num-segments", type=int, nargs="+",
default=None,
action=MultiDetOptionAction, metavar='IFO:NUM',
help="(Optional, used only with --psd-estimation). "
"If given PSDs will be estimated using only "
"this number of segments. If more data is "
"given than needed to make this number of "
"segments than excess data will not be used in "
"the PSD estimate. If not enough data is given "
"the code will fail.")
psd_options.add_argument("--psd-inverse-length", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:LENGTH',
help="(Optional) The maximum length of the impulse"
" response of the overwhitening filter (s)")
psd_options.add_argument("--psd-output", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="(Optional) Write PSD to specified file")
# Options for PSD variation
psd_options.add_argument("--psdvar_short_segment", type=float,
metavar="SECONDS", help="Length of short segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar_long_segment", type=float,
metavar="SECONDS", help="Length of long segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar_overlap", type=float, metavar="SECONDS", help="Sample length of the PSD.")
psd_options.add_argument("--psdvar_low_freq", type=float, metavar="HERTZ", help="Minimum frequency to consider in PSD "
"comparison.")
psd_options.add_argument("--psdvar_high_freq", type=float, metavar="HERTZ", help="Maximum frequency to consider in PSD "
"comparison.")
return psd_options
ensure_one_opt_groups = []
ensure_one_opt_groups.append(['--psd-file', '--psd-model',
'--psd-estimation', '--asd-file'])
def verify_psd_options(opt, parser):
"""Parses the CLI options and verifies that they are consistent and
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
parser : object
OptionParser instance.
"""
try:
psd_estimation = opt.psd_estimation is not None
except AttributeError:
psd_estimation = False
for opt_group in ensure_one_opt_groups:
ensure_one_opt(opt, parser, opt_group)
if psd_estimation:
required_opts(opt, parser,
['--psd-segment-stride', '--psd-segment-length'],
required_by = "--psd-estimation")
def verify_psd_options_multi_ifo(opt, parser, ifos):
"""Parses the CLI options and verifies that they are consistent and
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
parser : object
OptionParser instance.
"""
for ifo in ifos:
for opt_group in ensure_one_opt_groups:
ensure_one_opt_multi_ifo(opt, parser, ifo, opt_group)
if opt.psd_estimation[ifo]:
required_opts_multi_ifo(opt, parser, ifo,
['--psd-segment-stride', '--psd-segment-length'],
required_by = "--psd-estimation")
def generate_overlapping_psds(opt, gwstrain, flen, delta_f, flow,
dyn_range_factor=1., precision=None):
"""Generate a set of overlapping PSDs to cover a stretch of data. This
allows one to analyse a long stretch of data with PSD measurements that
change with time.
Parameters
-----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
gwstrain : Strain object
The timeseries of raw data on which to estimate PSDs.
flen : int
The length in samples of the output PSDs.
delta_f : float
The frequency step of the output PSDs.
flow: float
The low frequncy cutoff to use when calculating the PSD.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
Returns
--------
psd_and_times : list of (start, end, PSD) tuples
This is a list of tuples containing one entry for each PSD. The first
and second entries (start, end) in each tuple represent the index
range of the gwstrain data that was used to estimate that PSD. The
third entry (psd) contains the PSD estimate between that interval.
"""
if not opt.psd_estimation:
psd = from_cli(opt, flen, delta_f, flow, strain=gwstrain,
dyn_range_factor=dyn_range_factor, precision=precision)
psds_and_times = [ (0, len(gwstrain), psd) ]
return psds_and_times
# Figure out the data length used for PSD generation
seg_stride = int(opt.psd_segment_stride * gwstrain.sample_rate)
seg_len = int(opt.psd_segment_length * gwstrain.sample_rate)
input_data_len = len(gwstrain)
if opt.psd_num_segments is None:
# FIXME: Should we make --psd-num-segments mandatory?
# err_msg = "You must supply --num-segments."
# raise ValueError(err_msg)
num_segments = int(input_data_len // seg_stride) - 1
else:
num_segments = int(opt.psd_num_segments)
psd_data_len = (num_segments - 1) * seg_stride + seg_len
# How many unique PSD measurements is this?
psds_and_times = []
if input_data_len < psd_data_len:
err_msg = "Input data length must be longer than data length needed "
err_msg += "to estimate a PSD. You specified that a PSD should be "
err_msg += "estimated with %d seconds. " %(psd_data_len)
err_msg += "Input data length is %d seconds. " %(input_data_len)
raise ValueError(err_msg)
elif input_data_len == psd_data_len:
num_psd_measurements = 1
psd_stride = 0
else:
num_psd_measurements = int(2 * (input_data_len-1) / psd_data_len)
psd_stride = int((input_data_len - psd_data_len) / num_psd_measurements)
for idx in range(num_psd_measurements):
if idx == (num_psd_measurements - 1):
start_idx = input_data_len - psd_data_len
end_idx = input_data_len
else:
start_idx = psd_stride * idx
end_idx = psd_data_len + psd_stride * idx
strain_part = gwstrain[start_idx:end_idx]
psd = from_cli(opt, flen, delta_f, flow, strain=strain_part,
dyn_range_factor=dyn_range_factor, precision=precision)
psds_and_times.append( (start_idx, end_idx, psd) )
return psds_and_times
def associate_psds_to_segments(opt, fd_segments, gwstrain, flen, delta_f, flow,
dyn_range_factor=1., precision=None):
"""Generate a set of overlapping PSDs covering the data in GWstrain.
Then associate these PSDs with the appropriate segment in strain_segments.
Parameters
-----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
fd_segments : StrainSegments.fourier_segments() object
The fourier transforms of the various analysis segments. The psd
attribute of each segment is updated to point to the appropriate PSD.
gwstrain : Strain object
The timeseries of raw data on which to estimate PSDs.
flen : int
The length in samples of the output PSDs.
delta_f : float
The frequency step of the output PSDs.
flow: float
The low frequncy cutoff to use when calculating the PSD.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
"""
psds_and_times = generate_overlapping_psds(opt, gwstrain, flen, delta_f,
flow, dyn_range_factor=dyn_range_factor,
precision=precision)
for fd_segment in fd_segments:
best_psd = None
psd_overlap = 0
inp_seg = segments.segment(fd_segment.seg_slice.start,
fd_segment.seg_slice.stop)
for start_idx, end_idx, psd in psds_and_times:
psd_seg = segments.segment(start_idx, end_idx)
if psd_seg.intersects(inp_seg):
curr_overlap = abs(inp_seg & psd_seg)
if curr_overlap > psd_overlap:
psd_overlap = curr_overlap
best_psd = psd
if best_psd is None:
err_msg = "No PSDs found intersecting segment!"
raise ValueError(err_msg)
fd_segment.psd = best_psd
def associate_psds_to_single_ifo_segments(opt, fd_segments, gwstrain, flen,
delta_f, flow, ifo,
dyn_range_factor=1., precision=None):
"""
Associate PSDs to segments for a single ifo when using the multi-detector
CLI
"""
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
associate_psds_to_segments(single_det_opt, fd_segments, gwstrain, flen,
delta_f, flow, dyn_range_factor=dyn_range_factor,
precision=precision)
def associate_psds_to_multi_ifo_segments(opt, fd_segments, gwstrain, flen,
delta_f, flow, ifos,
dyn_range_factor=1., precision=None):
"""
Associate PSDs to segments for all ifos when using the multi-detector CLI
"""
for ifo in ifos:
if gwstrain is not None:
strain = gwstrain[ifo]
else:
strain = None
if fd_segments is not None:
segments = fd_segments[ifo]
else:
segments = None
associate_psds_to_single_ifo_segments(opt, segments, strain, flen,
delta_f, flow, ifo, dyn_range_factor=dyn_range_factor,
precision=precision)
|
tjma12/pycbc
|
pycbc/psd/__init__.py
|
Python
|
gpl-3.0
| 26,178 | 0.00275 |
try:
from account.decorators import login_required
except ImportError:
from django.contrib.auth.decorators import login_required # noqa
try:
from account.mixins import LoginRequiredMixin
except ImportError:
from django.contrib.auth.mixins import LoginRequiredMixin # noqa
|
pinax/pinax-forums
|
pinax/forums/compat.py
|
Python
|
mit
| 292 | 0 |
#! /usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import json
import os.path
import re
import sys
from json_parse import OrderedDict
# This file is a peer to json_schema.py. Each of these files understands a
# certain format describing APIs (either JSON or IDL), reads files written
# in that format into memory, and emits them as a Python array of objects
# corresponding to those APIs, where the objects are formatted in a way that
# the JSON schema compiler understands. compiler.py drives both idl_schema.py
# and json_schema.py.
# idl_parser expects to be able to import certain files in its directory,
# so let's set things up the way it wants.
_idl_generators_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir, 'ppapi', 'generators')
if _idl_generators_path in sys.path:
import idl_parser
else:
sys.path.insert(0, _idl_generators_path)
try:
import idl_parser
finally:
sys.path.pop(0)
def ProcessComment(comment):
'''
Convert a comment into a parent comment and a list of parameter comments.
Function comments are of the form:
Function documentation. May contain HTML and multiple lines.
|arg1_name|: Description of arg1. Use <var>argument</var> to refer
to other arguments.
|arg2_name|: Description of arg2...
Newlines are removed, and leading and trailing whitespace is stripped.
Args:
comment: The string from a Comment node.
Returns: A tuple that looks like:
(
"The processed comment, minus all |parameter| mentions.",
{
'parameter_name_1': "The comment that followed |parameter_name_1|:",
...
}
)
'''
# Find all the parameter comments of the form '|name|: comment'.
parameter_starts = list(re.finditer(r' *\|([^|]*)\| *: *', comment))
# Get the parent comment (everything before the first parameter comment.
first_parameter_location = (parameter_starts[0].start()
if parameter_starts else len(comment))
parent_comment = comment[:first_parameter_location]
# We replace \n\n with <br/><br/> here and below, because the documentation
# needs to know where the newlines should be, and this is easier than
# escaping \n.
parent_comment = (parent_comment.strip().replace('\n\n', '<br/><br/>')
.replace('\n', ''))
params = OrderedDict()
for (cur_param, next_param) in itertools.izip_longest(parameter_starts,
parameter_starts[1:]):
param_name = cur_param.group(1)
# A parameter's comment goes from the end of its introduction to the
# beginning of the next parameter's introduction.
param_comment_start = cur_param.end()
param_comment_end = next_param.start() if next_param else len(comment)
params[param_name] = (comment[param_comment_start:param_comment_end
].strip().replace('\n\n', '<br/><br/>')
.replace('\n', ''))
return (parent_comment, params)
class Callspec(object):
'''
Given a Callspec node representing an IDL function declaration, converts into
a tuple:
(name, list of function parameters, return type)
'''
def __init__(self, callspec_node, comment):
self.node = callspec_node
self.comment = comment
def process(self, callbacks):
parameters = []
return_type = None
if self.node.GetProperty('TYPEREF') not in ('void', None):
return_type = Typeref(self.node.GetProperty('TYPEREF'),
self.node.parent,
{'name': self.node.GetName()}).process(callbacks)
# The IDL parser doesn't allow specifying return types as optional.
# Instead we infer any object return values to be optional.
# TODO(asargent): fix the IDL parser to support optional return types.
if return_type.get('type') == 'object' or '$ref' in return_type:
return_type['optional'] = True
for node in self.node.GetChildren():
parameter = Param(node).process(callbacks)
if parameter['name'] in self.comment:
parameter['description'] = self.comment[parameter['name']]
parameters.append(parameter)
return (self.node.GetName(), parameters, return_type)
class Param(object):
'''
Given a Param node representing a function parameter, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self, param_node):
self.node = param_node
def process(self, callbacks):
return Typeref(self.node.GetProperty('TYPEREF'),
self.node,
{'name': self.node.GetName()}).process(callbacks)
class Dictionary(object):
'''
Given an IDL Dictionary node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, dictionary_node):
self.node = dictionary_node
def process(self, callbacks):
properties = OrderedDict()
for node in self.node.GetChildren():
if node.cls == 'Member':
k, v = Member(node).process(callbacks)
properties[k] = v
result = {'id': self.node.GetName(),
'properties': properties,
'type': 'object'}
if self.node.GetProperty('nodoc'):
result['nodoc'] = True
elif self.node.GetProperty('inline_doc'):
result['inline_doc'] = True
elif self.node.GetProperty('noinline_doc'):
result['noinline_doc'] = True
return result
class Member(object):
'''
Given an IDL dictionary or interface member, converts into a name/value pair
where the value is a Python dictionary that the JSON schema compiler expects
to see.
'''
def __init__(self, member_node):
self.node = member_node
def process(self, callbacks):
properties = OrderedDict()
name = self.node.GetName()
if self.node.GetProperty('deprecated'):
properties['deprecated'] = self.node.GetProperty('deprecated')
for property_name in ('OPTIONAL', 'nodoc', 'nocompile', 'nodart'):
if self.node.GetProperty(property_name):
properties[property_name.lower()] = True
for option_name, sanitizer in [
('maxListeners', int),
('supportsFilters', lambda s: s == 'true'),
('supportsListeners', lambda s: s == 'true'),
('supportsRules', lambda s: s == 'true')]:
if self.node.GetProperty(option_name):
if 'options' not in properties:
properties['options'] = {}
properties['options'][option_name] = sanitizer(self.node.GetProperty(
option_name))
is_function = False
parameter_comments = OrderedDict()
for node in self.node.GetChildren():
if node.cls == 'Comment':
(parent_comment, parameter_comments) = ProcessComment(node.GetName())
properties['description'] = parent_comment
elif node.cls == 'Callspec':
is_function = True
name, parameters, return_type = (Callspec(node, parameter_comments)
.process(callbacks))
properties['parameters'] = parameters
if return_type is not None:
properties['returns'] = return_type
properties['name'] = name
if is_function:
properties['type'] = 'function'
else:
properties = Typeref(self.node.GetProperty('TYPEREF'),
self.node, properties).process(callbacks)
enum_values = self.node.GetProperty('legalValues')
if enum_values:
if properties['type'] == 'integer':
enum_values = map(int, enum_values)
elif properties['type'] == 'double':
enum_values = map(float, enum_values)
properties['enum'] = enum_values
return name, properties
class Typeref(object):
'''
Given a TYPEREF property representing the type of dictionary member or
function parameter, converts into a Python dictionary that the JSON schema
compiler expects to see.
'''
def __init__(self, typeref, parent, additional_properties):
self.typeref = typeref
self.parent = parent
self.additional_properties = additional_properties
def process(self, callbacks):
properties = self.additional_properties
result = properties
if self.parent.GetPropertyLocal('OPTIONAL'):
properties['optional'] = True
# The IDL parser denotes array types by adding a child 'Array' node onto
# the Param node in the Callspec.
for sibling in self.parent.GetChildren():
if sibling.cls == 'Array' and sibling.GetName() == self.parent.GetName():
properties['type'] = 'array'
properties['items'] = OrderedDict()
properties = properties['items']
break
if self.typeref == 'DOMString':
properties['type'] = 'string'
elif self.typeref == 'boolean':
properties['type'] = 'boolean'
elif self.typeref == 'double':
properties['type'] = 'number'
elif self.typeref == 'long':
properties['type'] = 'integer'
elif self.typeref == 'any':
properties['type'] = 'any'
elif self.typeref == 'object':
properties['type'] = 'object'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
instance_of = self.parent.GetProperty('instanceOf')
if instance_of:
properties['isInstanceOf'] = instance_of
elif self.typeref == 'ArrayBuffer':
properties['type'] = 'binary'
properties['isInstanceOf'] = 'ArrayBuffer'
elif self.typeref == 'FileEntry':
properties['type'] = 'object'
properties['isInstanceOf'] = 'FileEntry'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
elif self.parent.GetPropertyLocal('Union'):
choices = []
properties['choices'] = [Typeref(node.GetProperty('TYPEREF'),
node,
OrderedDict()).process(callbacks)
for node in self.parent.GetChildren()
if node.cls == 'Option']
elif self.typeref is None:
properties['type'] = 'function'
else:
if self.typeref in callbacks:
# Do not override name and description if they are already specified.
name = properties.get('name', None)
description = properties.get('description', None)
properties.update(callbacks[self.typeref])
if description is not None:
properties['description'] = description
if name is not None:
properties['name'] = name
else:
properties['$ref'] = self.typeref
return result
class Enum(object):
'''
Given an IDL Enum node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, enum_node):
self.node = enum_node
self.description = ''
def process(self, callbacks):
enum = []
for node in self.node.GetChildren():
if node.cls == 'EnumItem':
enum_value = {'name': node.GetName()}
for child in node.GetChildren():
if child.cls == 'Comment':
enum_value['description'] = ProcessComment(child.GetName())[0]
else:
raise ValueError('Did not process %s %s' % (child.cls, child))
enum.append(enum_value)
elif node.cls == 'Comment':
self.description = ProcessComment(node.GetName())[0]
else:
sys.exit('Did not process %s %s' % (node.cls, node))
result = {'id' : self.node.GetName(),
'description': self.description,
'type': 'string',
'enum': enum}
for property_name in (
'inline_doc', 'noinline_doc', 'nodoc', 'cpp_omit_enum_type',):
if self.node.GetProperty(property_name):
result[property_name] = True
if self.node.GetProperty('deprecated'):
result[deprecated] = self.node.GetProperty('deprecated')
return result
class Namespace(object):
'''
Given an IDLNode representing an IDL namespace, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self,
namespace_node,
description,
nodoc=False,
internal=False,
platforms=None,
compiler_options=None,
deprecated=None):
self.namespace = namespace_node
self.nodoc = nodoc
self.internal = internal
self.platforms = platforms
self.compiler_options = compiler_options
self.events = []
self.functions = []
self.types = []
self.callbacks = OrderedDict()
self.description = description
self.deprecated = deprecated
def process(self):
for node in self.namespace.GetChildren():
if node.cls == 'Dictionary':
self.types.append(Dictionary(node).process(self.callbacks))
elif node.cls == 'Callback':
k, v = Member(node).process(self.callbacks)
self.callbacks[k] = v
elif node.cls == 'Interface' and node.GetName() == 'Functions':
self.functions = self.process_interface(node)
elif node.cls == 'Interface' and node.GetName() == 'Events':
self.events = self.process_interface(node)
elif node.cls == 'Enum':
self.types.append(Enum(node).process(self.callbacks))
else:
sys.exit('Did not process %s %s' % (node.cls, node))
if self.compiler_options is not None:
compiler_options = self.compiler_options
else:
compiler_options = {}
return {'namespace': self.namespace.GetName(),
'description': self.description,
'nodoc': self.nodoc,
'types': self.types,
'functions': self.functions,
'internal': self.internal,
'events': self.events,
'platforms': self.platforms,
'compiler_options': compiler_options,
'deprecated': self.deprecated}
def process_interface(self, node):
members = []
for member in node.GetChildren():
if member.cls == 'Member':
name, properties = Member(member).process(self.callbacks)
members.append(properties)
return members
class IDLSchema(object):
'''
Given a list of IDLNodes and IDLAttributes, converts into a Python list
of api_defs that the JSON schema compiler expects to see.
'''
def __init__(self, idl):
self.idl = idl
def process(self):
namespaces = []
nodoc = False
internal = False
description = None
platforms = None
compiler_options = None
deprecated = None
for node in self.idl:
if node.cls == 'Namespace':
if not description:
# TODO(kalman): Go back to throwing an error here.
print('%s must have a namespace-level comment. This will '
'appear on the API summary page.' % node.GetName())
description = ''
namespace = Namespace(node, description, nodoc, internal,
platforms=platforms,
compiler_options=compiler_options,
deprecated=deprecated)
namespaces.append(namespace.process())
nodoc = False
internal = False
platforms = None
compiler_options = None
elif node.cls == 'Copyright':
continue
elif node.cls == 'Comment':
description = node.GetName()
elif node.cls == 'ExtAttribute':
if node.name == 'nodoc':
nodoc = bool(node.value)
elif node.name == 'internal':
internal = bool(node.value)
elif node.name == 'platforms':
platforms = list(node.value)
elif node.name == 'implemented_in':
compiler_options = {'implemented_in': node.value}
elif node.name == 'deprecated':
deprecated = str(node.value)
else:
continue
else:
sys.exit('Did not process %s %s' % (node.cls, node))
return namespaces
def Load(filename):
'''
Given the filename of an IDL file, parses it and returns an equivalent
Python dictionary in a format that the JSON schema compiler expects to see.
'''
f = open(filename, 'r')
contents = f.read()
f.close()
idl = idl_parser.IDLParser().ParseData(contents, filename)
idl_schema = IDLSchema(idl)
return idl_schema.process()
def Main():
'''
Dump a json serialization of parse result for the IDL files whose names
were passed in on the command line.
'''
for filename in sys.argv[1:]:
schema = Load(filename)
print json.dumps(schema, indent=2)
if __name__ == '__main__':
Main()
|
patrickm/chromium.src
|
tools/json_schema_compiler/idl_schema.py
|
Python
|
bsd-3-clause
| 16,913 | 0.008159 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from pykit.parsing import cirparser
from pykit.ir import verify, interp
source = """
#include <pykit_ir.h>
Int32 myglobal = 10;
float simple(float x) {
return x * x;
}
Int32 loop() {
Int32 i, sum = 0;
for (i = 0; i < 10; i = i + 1) {
sum = sum + i;
}
return sum;
}
Int32 raise() {
Exception exc = new_exc("TypeError", "");
exc_throw(exc);
return 0;
}
"""
mod = cirparser.from_c(source)
verify(mod)
class TestInterp(unittest.TestCase):
def test_simple(self):
f = mod.get_function('simple')
result = interp.run(f, args=[10.0])
assert result == 100.0, result
def test_loop(self):
loop = mod.get_function('loop')
result = interp.run(loop)
assert result == 45, result
def test_exceptions(self):
f = mod.get_function('raise')
try:
result = interp.run(f)
except interp.UncaughtException as e:
exc, = e.args
assert isinstance(exc, TypeError), exc
else:
assert False, result
if __name__ == '__main__':
unittest.main()
|
flypy/pykit
|
pykit/ir/tests/test_interp.py
|
Python
|
bsd-3-clause
| 1,210 | 0.001653 |
"""
Trabalho T2 da disciplina Teoria dos Grafos, ministrada em 2014/02
'All Hail Gabe Newell'
Alunos:
Daniel Nobusada 344443
Thales Eduardo Adair Menato 407976
Jorge Augusto Bernardo 407844
"""
import networkx as nx
import numpy as np
import plotly.plotly as py
from plotly.graph_objs import *
py.sign_in("thamenato", "aq0t3czzut")
# Importa grafo Zachary's Karate Club
graphG = nx.read_gml('karate.gml')
"""
1) Computacao da distribuicao estacionaria teorica (steady state) do grafo
w(i) = d(vi) / 2|E|
"""
w_real = []
for i in graphG.nodes_iter():
aux = float(graphG.degree(i)) / float((2 * graphG.number_of_edges()))
w_real.append(aux)
"""
2) Calcular The Power Method
http://college.cengage.com/mathematics/larson/elementary_linear/4e/shared/
downloads/c10s3.pdf
"""
# Matriz P recebe a matriz de adjacencia de matrixG
matrixP = nx.adjacency_matrix(graphG)
# A soma de cada linha eh calculado
sum_linha = []
for i in matrixP:
sum_linha.append(i.sum())
# Para cada p(i,j) de P temos p(i,j) = p(i,j)/sum_linha(i)
for i in range(0, matrixP.shape[0]):
for j in range(0, matrixP.shape[1]):
matrixP[i, j] = float(matrixP[i, j]) / float(sum_linha[i])
# Vetor w_inicial onde a soma eh 1 com divisao de probabilidade 1/G.order()
# Para o grafo utilizado G.order() = 34
w_inicial = np.array([1.0/float(graphG.order())
for i in range(0, graphG.order())])
# Calcular w_power5
w_power5 = np.dot(w_inicial, matrixP)
for i in range(0, 4):
w_power5 = np.dot(w_power5, matrixP)
# Calcular w_power100
w_power100 = np.dot(w_inicial, matrixP)
for i in range(0, 99):
w_power100 = np.dot(w_power100, matrixP)
# A soma de todos os elementos destes vetores eh 1
"""
3) Escolha de 2 vertices distintos e realizar a caminhada aleatoria de ambos
"""
# Funcao Random Walk
def random_walk(node, numPassos):
# Vetor contendo numero de posicoes = numeros de vertices(noh)
caminhada = [0.0 for i in range(0, graphG.number_of_nodes())]
# Para o numero de passos desejado, uma lista contendo os vizinhos sera armazenada
# um indice aleatorio desta lista eh selecionado como proximo noh que entao passa
# a ser o noh atual e numero de visitar naquele noh eh incrementado
for i in range(0, numPassos):
vizinhos = graphG.neighbors(node)
proxNo = vizinhos[np.random.randint(0, len(vizinhos))]
node = proxNo
caminhada[node-1] += 1
# Realiza a divisao pelo numero de passos em todos os numeros de lista
for i in range(0, len(caminhada)):
caminhada[i] /= numPassos
# Retorna vetor contendo o numero de passadas / num de passos em cada vertice (noh)
return caminhada
# Escolha de dois vertices (noh) aleatorios
nodeA = np.random.random_integers(1, graphG.number_of_nodes())
nodeB = np.random.random_integers(1, graphG.number_of_nodes())
# Caso vertice B seja igual a A, receber outros numeros ateh que sejam distintos
while nodeB is nodeA:
nodeB = np.random.random_integers(1, graphG.number_of_nodes())
# 2 caminhadas aleatorias de tamanho N = 100
w_random100a = random_walk(nodeA, 100)
w_random100b = random_walk(nodeB, 100)
# 2 caminhadas aleatorias de tamanho N = 10000
w_random10000a = random_walk(nodeA, 10000)
w_random10000b = random_walk(nodeB, 10000)
# Print no console de todos os dados obtidos
print "w_power5: "
w_power5_lista = []
for i in range(0, w_power5.size):
w_power5_lista.append('%.4f'%w_power5[0, i])
print w_power5_lista
print "w_power100: "
w_power100_lista = []
for i in range(0, w_power100.size):
w_power100_lista.append('%.4f'%w_power100[0, i])
print w_power100_lista
print "w_random100a:"
print w_random100a
print "w_random100b:"
print w_random100b
print "w_random10000a:"
print w_random10000a
print "w_random10000b:"
print w_random10000b
# Para plotar no link: https://plot.ly/~thamenato/2/t2-random-walk/
# basta descomentar e executar o codigo novamente
# Tem de instalar a biblioteca (https://plot.ly/python/getting-started/)
# no Windows eh soh abrir o menu do Python(x,y) e escolher interactive consoles: IPython(sh)
# e executar: pip install plotly
"""
trace_power5 = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_power5)),
name = 'w_power5',
marker = Marker(
color='rgb(51,102,255)'
)
)
trace_power100 = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_power100)),
name = 'w_power100',
marker = Marker(
color='rgb(0,184,245)'
)
)
trace_random100a = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random100a)),
name = 'w_random100a',
marker = Marker(
color='rgb(138,184,0)'
)
)
trace_random100b = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random100b)),
name = 'w_random100b',
marker = Marker(
color='rgb(184,245,0)'
)
)
trace_random10000a = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random10000a)),
name = 'w_random10000a',
marker = Marker(
color='rgb(245,184,0)'
)
)
trace_random10000b = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random10000b)),
name = 'w_random10000b',
marker = Marker(
color='rgb(255,102,51)'
)
)
data = Data([trace_power5, trace_power100, trace_random100a,
trace_random100b, trace_random10000a, trace_random10000b])
layout = Layout(
title = 'T2: Random Walk',
xaxis = XAxis(
title = 'Nodes',
titlefont = Font(
size = 16,
color = 'rgb(107, 107, 107)'
),
tickfont = Font(
size = 14,
color = 'rgb(107, 107, 107)'
)
),
yaxis = YAxis(
title = 'Probability',
titlefont = Font(
size = 16,
color = 'rgb(107, 107, 107)'
),
tickfont = Font(
size = 14,
color = 'rgb(107, 107, 107)'
)
),
legend = Legend(
x = 0.25,
y = 1.0,
bgcolor = 'rgba(255, 255, 255, 0)',
bordercolor = 'rgba(255, 255, 255, 0)'
),
barmode = 'group',
bargap = 0.15,
bargroupgap = 0.1
)
fig = Figure(data = data, layout = layout)
plot_url = py.plot(fig, filename='T2_Random_Walks')
"""
|
UFSCar-CS-011/graph-theory-2012-2
|
tasks/task2/t2-random-walks.py
|
Python
|
mit
| 6,342 | 0.001577 |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import proboscis
from trove.tests.api import backups
from trove.tests.api import configurations
from trove.tests.api import databases
from trove.tests.api import datastores
from trove.tests.api import flavors
from trove.tests.api import instances
from trove.tests.api import instances_actions
from trove.tests.api.mgmt import accounts
from trove.tests.api.mgmt import admin_required
from trove.tests.api.mgmt import hosts
from trove.tests.api.mgmt import instances as mgmt_instances
from trove.tests.api.mgmt import storage
from trove.tests.api import replication
from trove.tests.api import root
from trove.tests.api import user_access
from trove.tests.api import users
from trove.tests.api import versions
GROUP_SERVICES_INITIALIZE = "services.initialize"
black_box_groups = [
flavors.GROUP,
users.GROUP,
user_access.GROUP,
databases.GROUP,
root.GROUP,
GROUP_SERVICES_INITIALIZE,
instances.GROUP_START,
instances.GROUP_QUOTAS,
instances.GROUP_SECURITY_GROUPS,
backups.GROUP,
replication.GROUP,
configurations.GROUP,
datastores.GROUP,
instances_actions.GROUP_RESIZE,
# TODO(SlickNik): The restart tests fail intermittently so pulling
# them out of the blackbox group temporarily. Refer to Trove bug:
# https://bugs.launchpad.net/trove/+bug/1204233
# instances_actions.GROUP_RESTART,
instances_actions.GROUP_STOP_MYSQL,
instances.GROUP_STOP,
versions.GROUP,
instances.GROUP_GUEST,
]
proboscis.register(groups=["blackbox", "mysql"],
depends_on_groups=black_box_groups)
simple_black_box_groups = [
GROUP_SERVICES_INITIALIZE,
flavors.GROUP,
versions.GROUP,
instances.GROUP_START_SIMPLE,
admin_required.GROUP,
]
proboscis.register(groups=["simple_blackbox"],
depends_on_groups=simple_black_box_groups)
black_box_mgmt_groups = [
accounts.GROUP,
hosts.GROUP,
storage.GROUP,
instances_actions.GROUP_REBOOT,
admin_required.GROUP,
mgmt_instances.GROUP,
]
proboscis.register(groups=["blackbox_mgmt"],
depends_on_groups=black_box_mgmt_groups)
# Datastores groups for int-tests
datastore_group = [
GROUP_SERVICES_INITIALIZE,
flavors.GROUP,
versions.GROUP,
instances.GROUP_START_SIMPLE,
]
proboscis.register(groups=["cassandra", "couchbase", "mongodb", "postgresql"],
depends_on_groups=datastore_group)
|
changsimon/trove
|
trove/tests/int_tests.py
|
Python
|
apache-2.0
| 3,057 | 0 |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import sys
import os
from gpu_tests import gpu_integration_test
test_harness_script = r"""
function VerifyHardwareAccelerated(feature) {
feature += ': '
var list = document.querySelector('.feature-status-list');
for (var i=0; i < list.childElementCount; i++) {
var span_list = list.children[i].getElementsByTagName('span');
var feature_str = span_list[0].textContent;
var value_str = span_list[1].textContent;
if ((feature_str == feature) &&
(value_str == 'Hardware accelerated')) {
return true;
}
}
return false;
};
"""
def safe_feature_name(feature):
return feature.lower().replace(' ', '_')
class HardwareAcceleratedFeatureIntegrationTest(
gpu_integration_test.GpuIntegrationTest):
"""Tests GPU acceleration is reported as active for various features."""
@classmethod
def Name(cls):
"""The name by which this test is invoked on the command line."""
return 'hardware_accelerated_feature'
@classmethod
def SetUpProcess(cls):
super(cls, HardwareAcceleratedFeatureIntegrationTest).SetUpProcess()
cls.CustomizeBrowserArgs([])
cls.StartBrowser()
cls.SetStaticServerDirs([])
def _Navigate(self, url):
# It's crucial to use the action_runner, rather than the tab's
# Navigate method directly. It waits for the document ready state
# to become interactive or better, avoiding critical race
# conditions.
self.tab.action_runner.Navigate(
url, script_to_evaluate_on_commit=test_harness_script)
@classmethod
def GenerateGpuTests(cls, options):
tests = ('WebGL', 'Canvas')
for feature in tests:
yield ('HardwareAcceleratedFeature_%s_accelerated' %
safe_feature_name(feature), 'chrome://gpu', (feature))
def RunActualGpuTest(self, test_path, *args):
feature = args[0]
self._Navigate(test_path)
tab = self.tab
tab.WaitForJavaScriptCondition('window.gpuPagePopulated', timeout=30)
if not tab.EvaluateJavaScript(
'VerifyHardwareAccelerated({{ feature }})', feature=feature):
print('Test failed. Printing page contents:')
print(tab.EvaluateJavaScript('document.body.innerHTML'))
self.fail('%s not hardware accelerated' % feature)
@classmethod
def ExpectationsFiles(cls):
return [
os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'test_expectations',
'hardware_accelerated_feature_expectations.txt')
]
def load_tests(loader, tests, pattern):
del loader, tests, pattern # Unused.
return gpu_integration_test.LoadAllTestsInModule(sys.modules[__name__])
|
scheib/chromium
|
content/test/gpu/gpu_tests/hardware_accelerated_feature_integration_test.py
|
Python
|
bsd-3-clause
| 2,830 | 0.007067 |
from django.db import models
class AssetField(models.ForeignKey):
description = 'A file asset'
def __init__(self, **kwargs):
kwargs.setdefault('blank', True)
kwargs.setdefault('null', True)
kwargs.setdefault('default', None)
# Normally, Django creates a backwards relationship, so you can have
#
# class Student(models.Model):
# classroom = models.ForeignKey(Classroom)
#
# and then access students like this:
#
# classroom.student_set.filter(...)
#
# This doesn't work when you have two ForeignKeys that point at the same
# model:
#
# class Student(models.Model):
# study_hall_room = models.ForeignKey(Classroom)
# homeroom = models.ForeignKey(Classroom)
#
# Because now the backwards relationship has the same name
# (`student_set`) on Classroom. We don't want this for assets, so we
# strip it off by setting `related_name` to `+`.
kwargs.setdefault('related_name', '+')
kwargs['to'] = 'assets.Asset'
kwargs['on_delete'] = models.SET_NULL
super(AssetField, self).__init__(**kwargs)
|
AlmostBetterNetwork/podmaster-host
|
assets/fields.py
|
Python
|
apache-2.0
| 1,224 | 0.000817 |
import htmlPy
import os
import json
from naoqi import ALProxy
from pptx import Presentation
import time
import math
#linked class with front end
class PepperApp(htmlPy.Object):
#GUI callable functions need to be in a class, inherited from htmlPy.Object and binded to the GUI
def __init__(self, app):
super(PepperApp, self).__init__()
self.app = app
self.my_path = ""
self.my_ip = ""
self.current_slide =""
self.start_slide =""
self.slides_path =""
self.tts =""
self.my_pres =""
self.number_of_slides = ""
self.gestures_dict = {":)":"happy", ":(":"sad", ":|":"unknown"}
self.pointing_right = {"[point=1]":"uf", "[point=2]":"un", "[point=3]":"ln", "[point=4]":"lf"}
self.pointing_left = {"[point=1]":"un", "[point=2]":"uf", "[point=3]":"lf", "[point=4]":"ln"}
self.isRobotPointing = False
self.leds = {":)":"green", ":(":"magenta", ":|":"yellow"}
#functions are defined here, under the .Slot decoration
############################################################
############################################################
#
# STARTING PRESENTATION (ON CLICKING START)
# imports everything, shows first slide, calculates pointing angles
#
############################################################
############################################################
@htmlPy.Slot(str)
def start_presentation(self, json_data): #initializes everything
#retrieve pointing data and calculates turn angle
def retrieve_pointing_data():
self.width = float(form_data['screen-width'])
self.height = float(form_data['screen-height'])
self.x = float(form_data['x'])
self.y = float(form_data['y'])
self.z = float(form_data['z'])
self.near = self.x + self.width/4
self.far = self.x + 3*self.width/4
self.upper = self.z + 3*self.height/4 -0.91
self.lower = self.z + self.height/4 -0.91
print(self.near)
print(self.far)
if form_data['side'] == '':
self.side ='left'
self.shoulder = 'LShoulderPitch'
self.pointing_dict = self.pointing_left
self.zNear = (math.atan(self.y/self.near)+math.pi/2)
self.zFar = (math.atan(self.y/self.far)+math.pi/2)
print(self.zNear)
print(self.zFar)
elif form_data['side'] =='on':
self.side = 'right'
self.shoulder = 'RShoulderPitch'
self.pointing_dict = self.pointing_right
self.zNear = -(math.atan(self.y/self.near)+math.pi/2)
self.zFar = -(math.atan(self.y/self.far)+math.pi/2)
print(self.side)
return
#calculates pointing angles vector = {'un':[angleZ,angleShoulder],'uf':[angleZ,angleShoulder],'ln':[angleZ,angleShoulder], 'lf':[angleZ,angleShoulder]}
def calculate_angles():
self.upperNear = -math.atan(self.upper/(math.sqrt(math.pow(self.near,2)+math.pow(self.y,2))))
self.upperFar = -math.atan(self.upper/(math.sqrt(math.pow(self.far,2)+math.pow(self.y,2))))
self.lowerNear = -math.atan(self.lower/(math.sqrt(math.pow(self.near,2)+math.pow(self.y,2))))
self.lowerFar = -math.atan(self.lower/(math.sqrt(math.pow(self.far,2)+math.pow(self.y,2))))
self.angles = {'un':[self.zNear,self.upperNear],'uf':[self.zFar,self.upperFar],'ln':[self.zNear,self.lowerNear], 'lf':[self.zFar,self.lowerFar]}
print(self.angles)
return
#read the json data from the form (ip, file, start)
form_data = json.loads(json_data)
self.my_path = form_data['file']
self.my_ip = form_data['ip']
self.start_slide = form_data['start']
self.point_check = form_data['enable_point']
if self.point_check == '':
self.point_enabled = False
#if gesture is enable then get the coordinates variables
elif self.point_check == 'on':
self.point_enabled = True
retrieve_pointing_data()
calculate_angles()
#initialize the presentation and variables
self.start_slide = int(self.start_slide)
self.current_slide = self.start_slide
self.slides_path = os.path.dirname(self.my_path) + '/slides/slide'
self.my_pres = Presentation(self.my_path)
self.number_of_slides = len(self.my_pres.slides)
notes = []
#connect to the robot and show initial slide
#COMENT THIS WHEN TESTING OUTISDE ROBOT
########################################################################
self.tts = ALProxy("ALAnimatedSpeech", str(self.my_ip), 9559)
self.motion = ALProxy("ALMotion", str(self.my_ip), 9559)
self.posture = ALProxy("ALRobotPosture", str(self.my_ip), 9559)
self.aw = ALProxy("ALBasicAwareness", str(self.my_ip), 9559)
self.motion.moveInit()
self.motion.setStiffnesses(self.shoulder, 1)
self.posture.goToPosture("StandInit", 0.5)
self.aw.setTrackingMode("Head")
self.aw.setEngagementMode("Unengaged")
# self.aw.resumeAwareness()
self.aw.pauseAwareness()
########################################################################
slide_src = self.slides_path + str(self.start_slide) + '.jpg'
self.app.evaluate_javascript("document.getElementById('presentation_image').style.display='block'")
self.app.evaluate_javascript("document.getElementById('presentation_content').innerHTML = 'Log:<br>Starting presentation at: %s<br>IP: %s<br>Notes: '" %(self.my_path, self.my_ip))
self.app.evaluate_javascript("document.getElementById('slide').src = '%s'" %(slide_src))
self.app.evaluate_javascript("document.getElementById('presentation_image').style.display = 'block'")
self.app.evaluate_javascript("scroll(0,0)")
print('Showing slide ' + str(self.current_slide) +'. Source: '+ slide_src)
#the calculations of angles for the pointing function should be done here, define them as self variables to access from present_slide
return
############################################################
############################################################
#
# PRESENTING SLIDE (ON CLICK SLIDE)
#
############################################################
############################################################
@htmlPy.Slot()
def present_slide(self):
self.aw.setTrackingMode("Head")
self.aw.setEngagementMode("Unengaged")
#this will use the dictionary to check for gestures FIX IT TO ONLY ADD ONE CLOSING TAG
def check_gestures(text):
for key in self.gestures_dict:
if text.find(key) != -1:
# text = text.replace(key, "^startTag(" + self.gestures_dict[key] + ")" ) + " ^stopTag(" + self.gestures_dict[key] + ")"
#here we try to make the emotion at the end of the sentence
#not sure if the double \\ is needed as escape character
text = '^call(ALLeds.fadeRGB("FaceLeds", "'+ self.leds[key] + '", 3)) ' + text.replace(key, "" ) + ' \\pau=1000\\ '+ ' ^startTag(' + self.gestures_dict[key] + ')' + ' ^waitTag(' + self.gestures_dict[key] + ')'
return text
def check_point(text):
for key in self.pointing_dict:
if text.find(key) != -1:
text = text.replace(key, "")
print('Pointing to ' + self.pointing_dict[key])
#COMMENT THIS WHEN TESTING OUTISDE ROBOT
####################################################################
# self.aw.pauseAwareness()
####################################################################
#point to that position
point(self.pointing_dict[key])
return text
def point(position):
self.angles_vector = self.angles[position] #now we have the vector with the Z angle and the shoulder angle for a given position
#COMMENT THIS WHEN TESTING OUTISDE ROBOT
####################################################################
self.motion.setStiffnesses(self.shoulder, 1)
self.motion.moveTo(0,0,self.angles_vector[0])
self.motion.setAngles(self.shoulder, self.angles_vector[1],0.3)
####################################################################
self.isRobotPointing = True
return
#with calls inside the text
################################################################################
# def point(position, text):
# self.angles_vector = self.angles[position] #now we have the vector with the Z angle and the shoulder angle for a given position
# my_z_str = str(self.angles_vector[0])
# my_shoulder_str = str(self.angles_vector[1])
# #^pcall() = asynchronous, ^call() = ^synchronous
# animated_text = '^call(ALMotion.moveTo(0,0,'+ my_z_str+')) ^call(ALMotion.setAngles("' + self.shoulder + '",' + my_shoulder_str+',0.3))' + text
# self.isRobotPointing = True
# return animated_text
#ORiginal code, the upper one is modified
################################################################################
# def check_point(text):
# for key in self.pointing_dict:
# if text.find(key) != -1:
# text = text.replace(key, "")
# print('Pointing to ' + self.pointing_dict[key])
# #point to that position
# point(self.pointing_dict[key])
# return text
# #the function to point should be added here also
# def point(position):
# self.angles_vector = self.angles[position] #now we have the vector with the Z angle and the shoulder angle for a given position
# #COMMENT THIS WHEN TESTING OUTISDE ROBOT
# ####################################################################
# self.motion.setStiffnesses(self.shoulder, 1)
# self.motion.moveTo(0,0,self.angles_vector[0])
# self.motion.setAngles(self.shoulder, self.angles_vector[1],0.5)
# ####################################################################
# self.isRobotPointing = True
# return
###########################################################################################
#the slide is showing, so when you click on it it will read the notes of the slide
#if it is not the last one it will show the next slide, if it is the last one will elapse some time and close the image view
slide = self.my_pres.slides[self.current_slide-1]
if slide.has_notes_slide:
notes_slide = slide.notes_slide
text_frame = notes_slide.notes_text_frame
for paragraph in text_frame.paragraphs:
if self.point_enabled:
after_pointing_txt = check_point(paragraph.text)
else:
after_pointing_txt = paragraph.text
modified_text = check_gestures(after_pointing_txt)
self.app.evaluate_javascript("document.getElementById('presentation_content').innerHTML += '<br> %s - %s '" %(paragraph.text, modified_text))
print('Notes line of slide ' + str(self.current_slide) +': ' + paragraph.text)
print('Modified notes line of slide ' + str(self.current_slide) +': ' + modified_text)
#COMMENT THIS WHEN TESTING OUTISDE ROBOT
####################################################################
self.tts.say(str(modified_text))
if self.isRobotPointing:
self.motion.moveTo(0,0,-self.angles_vector[0], _async = True)
self.isRobotPointing = False
# self.aw.resumeAwareness()
####################################################################
time.sleep(0.5)
self.current_slide +=1
if self.current_slide <=self.number_of_slides:
slide_src = self.slides_path + str(self.current_slide) + '.jpg'
self.app.evaluate_javascript("document.getElementById('slide').src = '%s'" %(slide_src))
else:
time.sleep(2)
#COMMENT THIS WHEN TESTING OUTISDE ROBOT
####################################################################
self.aw.resumeAwareness()
self.aw.setTrackingMode("Head")
self.aw.setEngagementMode("Unengaged")
####################################################################
self.app.evaluate_javascript("document.getElementById('presentation_image').style.display = 'none'")
return
# #WORKING!
|
wladiarce/PepperPresenter
|
Presenter app (computer side)/back_end/BackEnd.py
|
Python
|
mit
| 11,402 | 0.029205 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2002-2018, Neo4j
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from unittest import TestCase
from cypy.graph import Node, relationship_type, Path
from cypy.encoding import cypher_repr, cypher_escape
KNOWS = relationship_type("KNOWS")
LOVES = relationship_type("LOVES")
HATES = relationship_type("HATES")
KNOWS_FR = relationship_type(u"CONNAÎT")
class CypherEscapeTestCase(TestCase):
def test_can_write_simple_identifier(self):
escaped = cypher_escape("foo")
assert escaped == "foo"
def test_can_write_identifier_with_odd_chars(self):
escaped = cypher_escape("foo bar")
assert escaped == "`foo bar`"
def test_can_write_identifier_containing_back_ticks(self):
escaped = cypher_escape("foo `bar`")
assert escaped == "`foo ``bar```"
def test_cannot_write_empty_identifier(self):
with self.assertRaises(ValueError):
_ = cypher_escape("")
def test_cannot_write_none_identifier(self):
with self.assertRaises(TypeError):
_ = cypher_escape(None)
class CypherNoneRepresentationTestCase(TestCase):
def test_should_encode_none(self):
encoded = cypher_repr(None)
assert encoded == u"null"
class CypherBooleanRepresentationTestCase(TestCase):
def test_should_encode_true(self):
encoded = cypher_repr(True)
assert encoded == u"true"
def test_should_encode_false(self):
encoded = cypher_repr(False)
assert encoded == u"false"
class CypherIntegerRepresentationTestCase(TestCase):
def test_should_encode_zero(self):
encoded = cypher_repr(0)
assert encoded == u"0"
def test_should_encode_positive_integer(self):
encoded = cypher_repr(123)
assert encoded == u"123"
def test_should_encode_negative_integer(self):
encoded = cypher_repr(-123)
assert encoded == u"-123"
class CypherFloatRepresentationTestCase(TestCase):
def test_should_encode_zero(self):
encoded = cypher_repr(0.0)
assert encoded == u"0.0"
def test_should_encode_positive_float(self):
encoded = cypher_repr(123.456)
assert encoded == u"123.456"
def test_should_encode_negative_float(self):
encoded = cypher_repr(-123.456)
assert encoded == u"-123.456"
class CypherStringRepresentationTestCase(TestCase):
def test_should_encode_bytes(self):
encoded = cypher_repr(b"hello, world")
assert encoded == u"'hello, world'"
def test_should_encode_unicode(self):
encoded = cypher_repr(u"hello, world")
assert encoded == u"'hello, world'"
def test_should_encode_bytes_with_escaped_chars(self):
encoded = cypher_repr(b"hello, 'world'", quote=u"'")
assert encoded == u"'hello, \\'world\\''"
def test_should_encode_unicode_with_escaped_chars(self):
encoded = cypher_repr(u"hello, 'world'", quote=u"'")
assert encoded == u"'hello, \\'world\\''"
def test_should_encode_empty_string(self):
encoded = cypher_repr(u"")
assert encoded == u"''"
def test_should_encode_bell(self):
encoded = cypher_repr(u"\a")
assert encoded == u"'\\u0007'"
def test_should_encode_backspace(self):
encoded = cypher_repr(u"\b")
assert encoded == u"'\\b'"
def test_should_encode_form_feed(self):
encoded = cypher_repr(u"\f")
assert encoded == u"'\\f'"
def test_should_encode_new_line(self):
encoded = cypher_repr(u"\n")
assert encoded == u"'\\n'"
def test_should_encode_carriage_return(self):
encoded = cypher_repr(u"\r")
assert encoded == u"'\\r'"
def test_should_encode_horizontal_tab(self):
encoded = cypher_repr(u"\t")
assert encoded == u"'\\t'"
def test_should_encode_double_quote_when_single_quoted(self):
encoded = cypher_repr(u"\"")
assert encoded == u"'\"'"
def test_should_encode_single_quote_when_single_quoted(self):
encoded = cypher_repr(u"'", quote=u"'")
assert encoded == u"'\\''"
def test_should_encode_double_quote_when_double_quoted(self):
encoded = cypher_repr(u"\"", quote=u"\"")
assert encoded == u'"\\""'
def test_should_encode_single_quote_when_double_quoted(self):
encoded = cypher_repr(u"'", quote=u"\"")
assert encoded == u'"\'"'
def test_should_encode_2_byte_extended_character(self):
encoded = cypher_repr(u"\xAB")
assert encoded == u"'\\u00ab'"
def test_should_encode_4_byte_extended_character(self):
encoded = cypher_repr(u"\uABCD")
assert encoded == u"'\\uabcd'"
def test_should_encode_8_byte_extended_character(self):
encoded = cypher_repr(u"\U0010ABCD")
assert encoded == u"'\\U0010abcd'"
def test_should_encode_complex_sequence(self):
encoded = cypher_repr(u"' '' '''")
assert encoded == u"\"' '' '''\""
class CypherListRepresentationTestCase(TestCase):
def test_should_encode_list(self):
encoded = cypher_repr([1, 2.0, u"three"])
assert encoded == u"[1, 2.0, 'three']"
def test_should_encode_empty_list(self):
encoded = cypher_repr([])
assert encoded == u"[]"
class CypherMapRepresentationTestCase(TestCase):
def test_should_encode_map(self):
encoded = cypher_repr(OrderedDict([("one", 1), ("two", 2.0), ("number three", u"three")]))
assert encoded == u"{one: 1, two: 2.0, `number three`: 'three'}"
def test_should_encode_empty_map(self):
encoded = cypher_repr({})
assert encoded == u"{}"
class CypherNodeRepresentationTestCase(TestCase):
def test_should_encode_empty_node(self):
a = Node()
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"({})"
def test_should_encode_node_with_property(self):
a = Node(name="Alice")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"({name: 'Alice'})"
def test_should_encode_node_with_label(self):
a = Node("Person")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"(:Person {})"
def test_should_encode_node_with_label_and_property(self):
a = Node("Person", name="Alice")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"(:Person {name: 'Alice'})"
class CypherRelationshipRepresentationTestCase(TestCase):
def test_can_encode_relationship(self):
a = Node(name="Alice")
b = Node(name="Bob")
ab = KNOWS(a, b)
encoded = cypher_repr(ab, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:KNOWS {}]->(Bob)", encoded)
def test_can_encode_relationship_with_names(self):
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
ab = KNOWS(a, b)
encoded = cypher_repr(ab, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:KNOWS {}]->(Bob)", encoded)
def test_can_encode_relationship_with_alternative_names(self):
a = Node("Person", nom=u"Aimée")
b = Node("Person", nom=u"Baptiste")
ab = KNOWS_FR(a, b)
encoded = cypher_repr(ab, related_node_template=u"{property.nom}")
self.assertEqual(u"(Aimée)-[:CONNAÎT {}]->(Baptiste)", encoded)
def test_can_encode_relationship_with_properties(self):
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
ab = KNOWS(a, b, since=1999)
encoded = cypher_repr(ab, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:KNOWS {since: 1999}]->(Bob)", encoded)
class CypherPathRepresentationTestCase(TestCase):
def test_can_write_path(self):
alice, bob, carol, dave = Node(name="Alice"), Node(name="Bob"), \
Node(name="Carol"), Node(name="Dave")
ab = LOVES(alice, bob)
cb = HATES(carol, bob)
cd = KNOWS(carol, dave)
path = Path(alice, ab, bob, cb, carol, cd, dave)
encoded = cypher_repr(path, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:LOVES {}]->(Bob)<-[:HATES {}]-(Carol)-[:KNOWS {}]->(Dave)", encoded)
|
technige/cypy
|
test/test_encoding.py
|
Python
|
apache-2.0
| 8,939 | 0.000224 |
# This is a sample configuration file for an ISAPI filter and extension
# written in Python.
#
# Please see README.txt in this directory, and specifically the
# information about the "loader" DLL - installing this sample will create
# "_redirector_with_filter.dll" in the current directory. The readme explains
# this.
# Executing this script (or any server config script) will install the extension
# into your web server. As the server executes, the PyISAPI framework will load
# this module and create your Extension and Filter objects.
# This sample provides sample redirector:
# It is implemented by a filter and an extension, so that some requests can
# be ignored. Compare with 'redirector_simple' which avoids the filter, but
# is unable to selectively ignore certain requests.
# The process is sample uses is:
# * The filter is installed globally, as all filters are.
# * A Virtual Directory named "python" is setup. This dir has our ISAPI
# extension as the only application, mapped to file-extension '*'. Thus, our
# extension handles *all* requests in this directory.
# The basic process is that the filter does URL rewriting, redirecting every
# URL to our Virtual Directory. Our extension then handles this request,
# forwarding the data from the proxied site.
# For example:
# * URL of "index.html" comes in.
# * Filter rewrites this to "/python/index.html"
# * Our extension sees the full "/python/index.html", removes the leading
# portion, and opens and forwards the remote URL.
# This sample is very small - it avoid most error handling, etc. It is for
# demonstration purposes only.
from isapi import isapicon, threaded_extension
from isapi.simple import SimpleFilter
import sys
import traceback
import urllib.request, urllib.parse, urllib.error
# sys.isapidllhandle will exist when we are loaded by the IIS framework.
# In this case we redirect our output to the win32traceutil collector.
if hasattr(sys, "isapidllhandle"):
import win32traceutil
# The site we are proxying.
proxy = "http://www.python.org"
# The name of the virtual directory we install in, and redirect from.
virtualdir = "/python"
# The key feature of this redirector over the simple redirector is that it
# can choose to ignore certain responses by having the filter not rewrite them
# to our virtual dir. For this sample, we just exclude the IIS help directory.
# The ISAPI extension - handles requests in our virtual dir, and sends the
# response to the client.
class Extension(threaded_extension.ThreadPoolExtension):
"Python sample Extension"
def Dispatch(self, ecb):
# Note that our ThreadPoolExtension base class will catch exceptions
# in our Dispatch method, and write the traceback to the client.
# That is perfect for this sample, so we don't catch our own.
#print 'IIS dispatching "%s"' % (ecb.GetServerVariable("URL"),)
url = ecb.GetServerVariable("URL")
if url.startswith(virtualdir):
new_url = proxy + url[len(virtualdir):]
print("Opening", new_url)
fp = urllib.request.urlopen(new_url)
headers = fp.info()
ecb.SendResponseHeaders("200 OK", str(headers) + "\r\n", False)
ecb.WriteClient(fp.read())
ecb.DoneWithSession()
print("Returned data from '%s'!" % (new_url,))
else:
# this should never happen - we should only see requests that
# start with our virtual directory name.
print("Not proxying '%s'" % (url,))
# The ISAPI filter.
class Filter(SimpleFilter):
"Sample Python Redirector"
filter_flags = isapicon.SF_NOTIFY_PREPROC_HEADERS | \
isapicon.SF_NOTIFY_ORDER_DEFAULT
def HttpFilterProc(self, fc):
#print "Filter Dispatch"
nt = fc.NotificationType
if nt != isapicon.SF_NOTIFY_PREPROC_HEADERS:
return isapicon.SF_STATUS_REQ_NEXT_NOTIFICATION
pp = fc.GetData()
url = pp.GetHeader("url")
#print "URL is '%s'" % (url,)
prefix = virtualdir
if not url.startswith(prefix):
new_url = prefix + url
print("New proxied URL is '%s'" % (new_url,))
pp.SetHeader("url", new_url)
# For the sake of demonstration, show how the FilterContext
# attribute is used. It always starts out life as None, and
# any assignments made are automatically decref'd by the
# framework during a SF_NOTIFY_END_OF_NET_SESSION notification.
if fc.FilterContext is None:
fc.FilterContext = 0
fc.FilterContext += 1
print("This is request number %d on this connection" % fc.FilterContext)
return isapicon.SF_STATUS_REQ_HANDLED_NOTIFICATION
else:
print("Filter ignoring URL '%s'" % (url,))
# Some older code that handled SF_NOTIFY_URL_MAP.
#~ print "Have URL_MAP notify"
#~ urlmap = fc.GetData()
#~ print "URI is", urlmap.URL
#~ print "Path is", urlmap.PhysicalPath
#~ if urlmap.URL.startswith("/UC/"):
#~ # Find the /UC/ in the physical path, and nuke it (except
#~ # as the path is physical, it is \)
#~ p = urlmap.PhysicalPath
#~ pos = p.index("\\UC\\")
#~ p = p[:pos] + p[pos+3:]
#~ p = r"E:\src\pyisapi\webroot\PyTest\formTest.htm"
#~ print "New path is", p
#~ urlmap.PhysicalPath = p
# The entry points for the ISAPI extension.
def __FilterFactory__():
return Filter()
def __ExtensionFactory__():
return Extension()
if __name__=='__main__':
# If run from the command-line, install ourselves.
from isapi.install import *
params = ISAPIParameters()
# Setup all filters - these are global to the site.
params.Filters = [
FilterParameters(Name="PythonRedirector",
Description=Filter.__doc__),
]
# Setup the virtual directories - this is a list of directories our
# extension uses - in this case only 1.
# Each extension has a "script map" - this is the mapping of ISAPI
# extensions.
sm = [
ScriptMapParams(Extension="*", Flags=0)
]
vd = VirtualDirParameters(Name=virtualdir[1:],
Description = Extension.__doc__,
ScriptMaps = sm,
ScriptMapUpdate = "replace"
)
params.VirtualDirs = [vd]
HandleCommandLine(params)
|
sserrot/champion_relationships
|
venv/Lib/site-packages/isapi/samples/redirector_with_filter.py
|
Python
|
mit
| 6,630 | 0.006184 |
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from base64 import b64decode, b64encode
from werkzeug.datastructures import FileStorage
from odoo import models, fields, _
_logger = logging.getLogger(__name__)
try:
import magic
except ImportError:
_logger.warning("Please install magic in order to use Muskathlon module")
class OrderMaterialForm(models.AbstractModel):
_name = "cms.form.order.material.mixin"
_inherit = "cms.form"
_form_model = "crm.lead"
_form_model_fields = ["partner_id", "description"]
_form_required_fields = ["flyer_german", "flyer_french"]
partner_id = fields.Many2one("res.partner", readonly=False)
event_id = fields.Many2one("crm.event.compassion", readonly=False)
form_id = fields.Char()
flyers_select = [(i, str(i)) for i in (0, 5, 10, 15, 20, 30)]
flyer_german = fields.Selection(flyers_select, string="Number of flyers in german", default=0)
flyer_french = fields.Selection(flyers_select, string="Number of flyers in french", default=0)
@property
def _form_fieldsets(self):
return [
{"id": "flyers", "fields": ["flyer_german", "flyer_french", "form_id"]},
]
@property
def form_msg_success_created(self):
return _(
"Thank you for your request. You will hear back from us "
"within the next days."
)
@property
def form_widgets(self):
# Hide fields
res = super(OrderMaterialForm, self).form_widgets
res.update(
{
"form_id": "cms_form_compassion.form.widget.hidden",
"partner_id": "cms_form_compassion.form.widget.hidden",
"event_id": "cms_form_compassion.form.widget.hidden",
"description": "cms_form_compassion.form.widget.hidden",
}
)
return res
@staticmethod
def create_description(material, values, languages=["french", "german"]):
lines = []
for lang in languages:
if int(values[f'flyer_{lang}']) > 0:
lines.append(f"<li>{values[f'flyer_{lang}']} <b>{material}</b> in {lang}</li>")
description = f"<ul>{''.join(lines)}</ul>"
return description
def form_init(self, request, main_object=None, **kw):
form = super(OrderMaterialForm, self).form_init(request, main_object, **kw)
# Set default values
registration = kw.get("registration")
form.partner_id = registration and registration.partner_id
form.event_id = registration and registration.compassion_event_id
return form
def form_before_create_or_update(self, values, extra_values):
""" Dismiss any pending status message, to avoid multiple
messages when multiple forms are present on same page.
"""
super(OrderMaterialForm, self).form_before_create_or_update(
values, extra_values
)
self.o_request.website.get_status_message()
staff_id = (
self.env["res.config.settings"]
.sudo()
.get_param("muskathlon_order_notify_id")
)
values.update(
{
"name": f"Muskathlon flyer order - {self.partner_id.name}",
"description": self.create_description("flyer", extra_values),
"user_id": staff_id,
"event_ids": [(4, self.event_id.id, None)],
"partner_id": self.partner_id.id,
}
)
def form_check_empty_value(self, fname, field, value, **req_values):
"""Invalidate the form if they order 0 flyers"""
is_valid = super().form_check_empty_value(fname, field, value, **req_values)
is_valid |= int(req_values["flyer_french"]) + int(req_values["flyer_german"]) <= 0
return is_valid
def _form_create(self, values):
""" Run as Muskathlon user to authorize lead creation,
and prevents default mail notification to staff
(a better one is sent just after)."""
uid = self.env.ref("muskathlon.user_muskathlon_portal").id
self.main_object = self.form_model\
.sudo(uid).with_context(tracking_disable=True).create(values.copy())
def form_after_create_or_update(self, values, extra_values):
super(OrderMaterialForm, self).form_after_create_or_update(
values, extra_values
)
# Update contact fields on lead
self.main_object._onchange_partner_id()
# Send mail
email_template = self.env.ref("muskathlon.order_material_mail_template")
email_template.sudo().send_mail(
self.main_object.id,
raise_exception=False,
force_send=True,
email_values={
"attachments": [("picture.jpg", self.main_object.partner_id.image)],
"email_to": self.main_object.user_email,
},
)
return True
class OrderMaterialFormFlyer(models.AbstractModel):
_name = "cms.form.order.material"
_inherit = "cms.form.order.material.mixin"
form_id = fields.Char(default="order_material")
class OrderMaterialFormChildpack(models.AbstractModel):
_name = "cms.form.order.muskathlon.childpack"
_inherit = "cms.form.order.material.mixin"
form_id = fields.Char(default="muskathlon_childpack")
flyer_german = fields.Selection(string="Number of childpacks in german", default=0)
flyer_french = fields.Selection(string="Number of childpacks in french", default=0)
def form_before_create_or_update(self, values, extra_values):
super(OrderMaterialFormChildpack, self).form_before_create_or_update(
values, extra_values
)
values.update(
{
"name": f"Muskathlon childpack order - {self.partner_id.name}",
"description": self.create_description("childpack", extra_values),
}
)
|
CompassionCH/compassion-switzerland
|
muskathlon/forms/order_material_form.py
|
Python
|
agpl-3.0
| 6,229 | 0.002087 |
#################################################################################
#
# Copyright (c) 2013 Genome Research Ltd.
#
# Author: Irina Colgiu <ic4@sanger.ac.uk>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from serapis.controller.db import models, data_access
from serapis.com import constants
#from bson.objectid import ObjectId
from collections import namedtuple
#from mongoengine.base import ObjectIdField
########################################################################
########## General classes ###############
TaskInfo = namedtuple('TaskInfo', ['id', 'type', 'status'])
class Result:
def __init__(self, result, error_dict=None, warning_dict=None, message=None):
self.result = result
self.error_dict = error_dict
self.warning_dict = warning_dict
self.message = message
class SerapisModel(object):
''' Parent class for all the model classes.'''
def get_internal_fields(self):
''' Method that returns a list of fields that have an internal usage
and shouldn't be exposed to the user at all.'''
return []
class EntityModel(SerapisModel):
def __init__(self, internal_id=None, name=None):
self.internal_id = internal_id
self.name = name
def __eq__(self, other):
if other == None:
return False
for id_field in constants.ENTITY_IDENTIFYING_FIELDS:
if id_field in other and hasattr(self, id_field) and other[id_field] != None and getattr(self, id_field) != None:
return other[id_field] == getattr(self, id_field)
return False
def get_entity_identifying_field(self):
if self.internal_id:
return str(self.internal_id)
elif self.name:
return self.name
return None
@staticmethod
def build_from_db_model(db_model):
raise NotImplementedError("Any entity subclass should implement the build_from_db_model method!")
class StudyModel(EntityModel):
@staticmethod
def build_from_db_model(db_study):
study_model = StudyModel()
study_model = StudyModel.copy_fields(db_study, study_model)
return study_model
@staticmethod
def copy_fields(src_study, dest_study):
dest_study.accession_number = src_study.accession_number
dest_study.study_type = src_study.study_type
dest_study.study_title = src_study.study_title
dest_study.faculty_sponsor = src_study.faculty_sponsor
dest_study.ena_project_id = src_study.ena_project_id
dest_study.study_visibility = src_study.ena_project_id
dest_study.description = src_study.description
dest_study.pi_list = src_study.pi_list
return dest_study
def get_entity_identifying_field(self):
if self.name:
return str(self.name)
elif self.internal_id:
return self.internal_id
return None
class AbstractLibraryModel(SerapisModel):
@staticmethod
def build_from_db_model(db_alib):
alib_model = AbstractLibraryModel()
alib_model = AbstractLibraryModel.copy_fields(db_alib, alib_model)
return alib_model
@staticmethod
def copy_fields(src_alib, dest_alib):
dest_alib.coverage = src_alib.coverage
dest_alib.library_source = src_alib.library_source
dest_alib.library_strategy = src_alib.library_strategy
dest_alib.instrument_model = src_alib.instrument_model
return dest_alib
class LibraryModel(AbstractLibraryModel, EntityModel):
@staticmethod
def build_from_db_model(db_lib):
lib_model = LibraryModel()
lib_model = LibraryModel.copy_fields(db_lib,lib_model)
return lib_model
@staticmethod
def copy_fields(src_lib, dest_lib):
dest_lib.library_type = src_lib.library_type
dest_lib.public_name = src_lib.public_name
dest_lib.sample_internal_id = src_lib.sample_internal_id
return dest_lib
class ReferenceGenomeModel(SerapisModel):
@staticmethod
def build_from_db_model(db_reference_genome):
ref_model = ReferenceGenomeModel()
ref_model = ReferenceGenomeModel.copy_fields(db_reference_genome, ref_model)
#ref_model.md5 = db_reference_genome.id
return ref_model
@staticmethod
def copy_fields(old_ref, new_ref):
#new_ref.md5 = old_ref.md5
new_ref.paths = old_ref.paths
new_ref.name = old_ref.name
return new_ref
class SampleModel(EntityModel):
@staticmethod
def build_from_db_model(db_sample):
sample_model = SampleModel()
sample_model = SampleModel.copy_fields(db_sample, sample_model)
return sample_model
@staticmethod
def copy_fields(old_sample, new_sample):
new_sample.accession_number = old_sample.accession_number
new_sample.sanger_sample_id = old_sample.sanger_sample_id
new_sample.public_name = old_sample.public_name
new_sample.sample_tissue_type = old_sample.sample_tissue_type
new_sample.reference_genome = old_sample.reference_genome
new_sample.taxon_id = old_sample.taxon_id
new_sample.gender = old_sample.gender
new_sample.cohort = old_sample.cohort
new_sample.ethnicity = old_sample.ethnicity
new_sample.country_of_origin = old_sample.country_of_origin
new_sample.geographical_region = old_sample.geographical_region
new_sample.organism = old_sample.organism
new_sample.common_name = old_sample.common_name
return new_sample
class IndexFileModel(SerapisModel):
@staticmethod
def build_from_db_model(db_index):
index_model = IndexFileModel()
index_model = IndexFileModel.copy_fields(db_index, index_model)
return index_model
@staticmethod
def copy_fields(src_index, dest_index):
dest_index.dest_path = src_index.dest_path
dest_index.file_path_client = src_index.file_path_client
dest_index.md5 = src_index.md5
return dest_index
class SubmittedFileModel(SerapisModel):
@staticmethod
def build_from_db_model(db_file):
''' Receives a database model as parameter and extracts from it
the information needed to create this (model) object.
'''
file_model = SubmittedFileModel()
file_model = SubmittedFileModel.copy_fields(db_file, file_model)
ref_genome = data_access.ReferenceGenomeDataAccess.retrieve_reference_by_id(db_file.file_reference_genome_id)
file_model.reference_genome = ReferenceGenomeModel.build_from_db_model(ref_genome)
return SubmittedFileModel.copy_fields(db_file, file_model)
@staticmethod
def copy_fields(src_file, dest_file):
dest_file.file_id = src_file.file_id
dest_file.file_type = src_file.file_type
dest_file.file_path_client = src_file.file_path_client
dest_file.dest_path = src_file.dest_path
dest_file.md5 = src_file.md5
dest_file.data_type = src_file.data_type
dest_file.data_subtype_tags = src_file.data_subtype_tags
dest_file.access_group = src_file.access_group
dest_file.security_level = src_file.security_level
dest_file.pmid_list = src_file.pmid_list
# Nested:
dest_file.study_list = [ StudyModel.build_from_db_model(a) for a in src_file.study_list]
dest_file.library_list = [ LibraryModel.build_from_db_model(a) for a in src_file.library_list]
dest_file.entity_set = [SampleModel.build_from_db_model(a) for a in src_file.entity_set]
dest_file.abstract_library = AbstractLibraryModel.build_from_db_model(src_file.abstract_library)
dest_file.index_file = IndexFileModel.build_from_db_model(src_file.index_file)
return dest_file
class BAMFileModel(SubmittedFileModel):
@staticmethod
def build_from_db_model(db_bamfile):
bamfile_model = BAMFileModel()
bamfile_model = BAMFileModel.copy_fields(db_bamfile, bamfile_model)
return SubmittedFileModel.copy_fields(db_bamfile, bamfile_model)
@staticmethod
def copy_fields(src_file, dest_file):
dest_file.seq_centers = src_file.seq_centers
dest_file.run_list = src_file.run_list
dest_file.platform_list = src_file.platform_list
dest_file.seq_date_list = src_file.seq_date_list
dest_file.library_well_list = src_file.library_well_list
dest_file.multiplex_lib_list = src_file.multiplex_lib_list
return dest_file
class VCFFileModel(SubmittedFileModel):
@staticmethod
def build_from_db_model(db_vcffile):
vcf_model = VCFFileModel()
vcf_model = VCFFileModel.copy_fields(db_vcffile, vcf_model)
return SubmittedFileModel.copy_fields(db_vcffile, vcf_model)
@staticmethod
def copy_fields(src_file, dest_file):
dest_file.file_format = src_file.file_format
dest_file.used_samtools = src_file.used_samtools
dest_file.used_unified_genotyper = src_file.used_unified_genotyper
return dest_file
class Submission(SerapisModel):
@staticmethod
def build_from_db_model(db_submission):
submission_model = Submission()
submission_model = Submission.copy_fields(db_submission, submission_model)
files = data_access.SubmissionDataAccess.retrieve_all_files_for_submission(db_submission.id)
submission_model.files_list = [f.file_id for f in files]
return submission_model
@staticmethod
def copy_fields(src_subm, dest_subm):
dest_subm.sanger_user_id = src_subm.sanger_user_id
dest_subm.access_group = src_subm.access_group
dest_subm.submission_date = src_subm.submission_date
dest_subm.file_type = src_subm.file_type
dest_subm.irods_collection = src_subm.irods_collection
return dest_subm
# def __init__(self, md5=None, paths=None, name=None):
# self.md5 = md5
# self.paths = paths
# self.name = name
# def __init__(self, accession_number=None, sanger_sample_id=None, public_name=None, sample_tissue_type=None,
# reference_genome=None,taxon_id=None, gender=None, cohort=None, ethnicity=None, country_of_origin=None,
# geographical_region=None, organism=None, common_name=None):
# self.accession_number = accession_number
# self.sanger_sample_id = sanger_sample_id
# self.public_name = public_name
# self.sample_tissue_type = sample_tissue_type
# self.reference_genome = reference_genome
# self.taxon_id = taxon_id
# self.gender = gender
# self.cohort = cohort
# self.ethnicity = ethnicity
# self.country_of_origin = country_of_origin
# self.geographical_region = geographical_region
# self.organism = organism
# self.common_name = common_name
# def __init__(self, dest_path=None, file_path_client=None, md5=None):
# self.dest_path = dest_path
# self.file_path_client = file_path_client
# self.md5 = md5
# def __init__(self, library_type=None, public_name=None, sample_internal_id=None):
# self.library_type = library_type
# self.public_name = public_name
# self.sample_internal_id = sample_internal_id
# def __init__(self, accession_number=None, study_type=None, study_title=None, faculty_sponsor=None,
# ena_project_id=None, study_visibility=None, description=None, pi_list=None):
# self.accession_number = accession_number
# self.study_type = study_type
# self.study_title = study_title
# self.faculty_sponsor = faculty_sponsor
# self.ena_project_id = ena_project_id
# self.study_visibility = ena_project_id
# self.description = description
# self.pi_list = pi_list
# def __init__(self, library_source=None, library_strategy=None, instrument_model=None, coverage=None):
# self.library_source = library_source
# self.library_strategy = library_strategy
# self.instrument_model = instrument_model
# self.coverage = coverage
|
wtsi-hgi/serapis
|
serapis/controller/logic/serapis_models.py
|
Python
|
agpl-3.0
| 13,059 | 0.008423 |
'''
TODO (29.05.2012):
1) show 1x, 2x, 3x threshold (as line)
2) auto scale in y axis? (calc and save min & max values of buffer)
3) draw y axis?
4) 'max_nbr_buffers_transmitted' must be 1 and 'framesize' must be 512 otherwise we get in trouble in RT mode.
5) set 'SHIFT_VIEW' in update() and dequeue in 'do_draw'? does this get rid of the shift / lag? --> IT DOES NOT!
6) how do I connect points across VBOs? currently only points inside a VBO are connected.
7) make code modular so that I don't have keep to versions up-to-date.
0) WINDOWS only:
A) if you are planning to run this program on Windows (32bit and 64 bit), make
sure to install python 32bit - 64bit python on Windows won't work with pyglet!
B) install 32bit installer of 'setuptools' http://pypi.python.org/pypi/setuptools
C) $ cd c:\python27\Scripts
$ easy_install numpy
D) set the nvidia driver 3D settings to 'performance' if you want highest FPS
1) you need to install a recent version of pyglet to run this program:
$ hg clone https://pyglet.googlecode.com/hg/ pyglet
$ sudo python setup.py install
# on windows do:
# d:
# cd d:\code\pyglet
# c:\Python27\python.exe setup.py install
2) you also need numpy to be installed; on ubuntu do:
$ sudo apt-get install python-numpy
3) Ubuntu / Linux only: in case this applications freezes make sure the following
points are met:
- Nvidia driver 280.13; I had lots of problems with version 290 & 295
- latest pyglet dev version is installed (see point 1). I tried both pyglet-1.1.2 and
pyglet-1.1.4 that come with ubuntu but I get very poor performance.
4) check remaining 'TODO' sections
Profiling)
A) per function
$ python -m cProfile pyglet_vbo_test7.py
B) per line
$ sudo /usr/bin/easy_install line_profiler
# add decorator '@profile' in front of each function
$ kernprof.py -l pyglet_vbo_test7.py
$ python /usr/local/lib/python2.7/dist-packages/line_profiler-1.0b3-py2.7-linux-x86_64.egg/line_profiler.py pyglet_vbo_test7.py.lprof > prof.txt
$ python /usr/local/lib/python2.7/dist-packages/RunSnakeRun-2.0.2a1-py2.7.egg/runsnakerun/runsnake.py prof.txt
C) with runsnakerun GUI - not compatible with method B)
$ sudo /usr/bin/easy_install RunSnakeRun
$ python -m cProfile -o pyglet_vbo_test7.profile pyglet_vbo_test7.py
$ python /usr/local/lib/python2.7/dist-packages/RunSnakeRun-2.0.2a1-py2.7.egg/runsnakerun/runsnake.py pyglet_vbo_test7.profile
'''
''' turn on debugger if necessary
import pdb
pdb.set_trace()
'''
import pyglet
from pyglet.gl import *
from ctypes import pointer, sizeof
import numpy as np
import random
from time import time
from math import ceil, floor
''' mmap stuff '''
import os, sys
import mmap
from datetime import datetime
from struct import unpack, pack
# switch between drawing modes. all modes render ~ the same amount of data points.
# mode = 0; few segments -> high FPS since not many gl* calls
# mode = 1; many segments -> low FPS since gl* calls are executed many more times.
MODE = 1
# default window dimensions
WIN_HEIGHT_DEFAULT = 800
WIN_WIDTH_DEFAULT = 800
# 512 is neuralynx specific.
NBR_DATA_POINTS_PER_BUFFER = 1.0
NBR_DATA_POINTS_PER_BUFFER_INT = int(NBR_DATA_POINTS_PER_BUFFER)
SCANRATE = 1
SECONDS_TO_VISUALIZE_PER_PANEL = 1.0
# approximate number of data point per VBO. will change and be adjusted so that
# this number is a multiple of NBR_DATA_POINTS_PER_BUFFER
NBR_DATA_POINTS_PER_VBO = 200
# how many times per second should we call the update function?
#CALL_UPDATE_X_TIMES_PER_SECOND = 67.0
# TODO: check what a reasonable value for 'CALL_UPDATE_X_TIMES_PER_SECOND' is.
# going from 67.0 to 60.0 gives me a huge performance improvement.
CALL_UPDATE_X_TIMES_PER_SECOND = 60.0
# into how many data panels should we split up the window?
NBR_PANELS = 1
# use same color for all segments?
USE_UNIFORM_COLOR = True
# default color to be used by 'USE_UNIFORM_COLOR'
DEFAULT_COLOR = [1, 0, 0]
# y scaling factors for spike and noise values.
SPIKE_SIZE = 200
NOISE_SIZE = 100
# numpy's randint is exclusive, therefore we need to add one.
NOISE_SIZE_NP = NOISE_SIZE + 1
# generate spike every N points
if MODE == 0:
GENERATE_SPIKE_EVERY_N_POINTS = 10000
elif MODE == 1:
GENERATE_SPIKE_EVERY_N_POINTS = 128
# where to put the 0/0 point of the data points.
X_OFFSET_PANEL = 20
Y_OFFSET_PANEL = 200
# update counter used to determine when to generate a new segment of data.
update_counter = 1;
SHIFT_VIEW = False
# enable debug 'print' statements?
DEBUG = 0
# number of independent data streams?
# e.g., 'StimOMatic' feeds in one spike and one LFP channel
NBR_INDEPENDENT_CHANNELS = 2
# should we use multiprocessing if possible? this might speed things up.
USE_MULTIPROCESSING = False
MULTIPROCESSING_NBR_PROCESSES = 12
DO_PROFILE = False
PLUGIN_NAME = 'pCtrlLFP'
# where's your temporary directory? mmap will write into it.
TMP_DIR = '/tmp'
if os.name == 'nt': # windows systems
# make sure you use double '\\' to separate directories
TMP_DIR = 'c:\\temp'
else: # unix systems
TMP_DIR = '/tmp'
TMP_DIR = TMP_DIR + os.sep + PLUGIN_NAME
# should we use mmap to receive data from matlab?
USE_MMAP = 1
MMAP_BYTES_PER_FLOAT = 8
MMAP_stats_file = TMP_DIR + os.sep + 'bla_stats'
# location of shared file(s)
MMAP_FILENAME = []
for j in range(NBR_INDEPENDENT_CHANNELS):
MMAP_FILENAME.append(TMP_DIR + os.sep + 'bla' + str(j+1))
# number of elements to store in memory
MMAP_STORE_LENGTH = MMAP_BYTES_PER_FLOAT * int(NBR_DATA_POINTS_PER_BUFFER)
# null string used to initalize memory
MMAP_NULL_HEX = '\x00'
################## function needed to calculate dependent parameters
def calc_VOB_numbers(NBR_DATA_POINTS_PER_VBO, NBR_DATA_POINTS_PER_BUFFER, SECONDS_TO_VISUALIZE_PER_PANEL):
NBR_DATA_POINTS_PER_VBO = ceil(NBR_DATA_POINTS_PER_VBO / NBR_DATA_POINTS_PER_BUFFER) * NBR_DATA_POINTS_PER_BUFFER
# calculate the number of VBOs that are need to display all data
NBR_VBOS_PER_PANEL = ceil(SECONDS_TO_VISUALIZE_PER_PANEL * SCANRATE / NBR_DATA_POINTS_PER_VBO)
# how many buffers of size 'NBR_DATA_POINTS_PER_BUFFER' does each panel hold?
# NBR_BUFFERS_PER_PANEL = NBR_VBOS_PER_PANEL * NBR_DATA_POINTS_PER_VBO / NBR_DATA_POINTS_PER_BUFFER
# update 'SECONDS_TO_VISUALIZE_PER_PANEL' to its true value
SECONDS_TO_VISUALIZE_PER_PANEL = NBR_VBOS_PER_PANEL * NBR_DATA_POINTS_PER_VBO / SCANRATE
# add one VBO to each panel since we want to smoothly add new data points.
NBR_VBOS_PER_PANEL += 1
return int(NBR_DATA_POINTS_PER_VBO), int(NBR_VBOS_PER_PANEL), SECONDS_TO_VISUALIZE_PER_PANEL
################## dependent parameters / settings
output = calc_VOB_numbers(NBR_DATA_POINTS_PER_VBO, NBR_DATA_POINTS_PER_BUFFER, SECONDS_TO_VISUALIZE_PER_PANEL)
NBR_DATA_POINTS_PER_VBO, NBR_VBOS_PER_PANEL, SECONDS_TO_VISUALIZE_PER_PANEL = output
# default X values
X_MIN = 0
X_MAX = float(WIN_WIDTH_DEFAULT) - X_OFFSET_PANEL
# shift each VBO by how much in X & Y direction, relative to the previous VBO?
SHIFT_Y_BY = 0
SHIFT_X_BY = abs(X_MIN) + abs(X_MAX)
# while generating the fake data, what is the stepsize between individual x data
# points?
STEPSIZE_X = float(SHIFT_X_BY) / NBR_DATA_POINTS_PER_VBO
# how much distance do 'NBR_DATA_POINTS_PER_BUFFER' points cover in x direction?
SHIFT_X_SINGLE_BUFFER = STEPSIZE_X * NBR_DATA_POINTS_PER_BUFFER
# Definitions for 'glColorPointer' and 'glVertexPointer'
n_COORDINATES_PER_VERTEX = 2
BYTES_PER_POINT = 8
# indicator values used to confirm that data is received.
DATA_RECEIVED_ACK_NUM = 3.14159265
DATA_RECEIVED_ACK_STR = pack('d', DATA_RECEIVED_ACK_NUM)
NBR_BUFFERS_ZERO_STR = pack('d', 0)
##################
# default window dimensions
WIN_HEIGHT_current = WIN_HEIGHT_DEFAULT
WIN_WIDTH_current = WIN_WIDTH_DEFAULT
''' decorator to quickly switch between profiling and no profiling '''
def do_profile(cond):
def resdec(f):
if not cond:
return f
return profile(f)
return resdec
@do_profile(DO_PROFILE)
def generate_line_segment_zeros(x_shift=SHIFT_X_BY, min_x=X_MIN, max_x=X_MAX, step_size=STEPSIZE_X):
''' same as 'generate_line_segment' but will generate zero y-values '''
zeros = True
x, y = generate_points(min_x, max_x, x_shift, step_size, zeros)
return create_2dim_list_from_arrays(x, y)
@do_profile(DO_PROFILE)
def generate_line_segment(x_shift=SHIFT_X_BY, min_x=X_MIN, max_x=X_MAX, step_size=STEPSIZE_X):
# ~ 1ms
x, y = generate_points(min_x, max_x, x_shift, step_size)
return create_2dim_list_from_arrays(x, y)
@do_profile(DO_PROFILE)
def generate_numbers_for_x_vector(x, zeros = False):
nbr_elements = len(x)
if zeros: # generate zeros
# TODO: check whether we need to add offset (Y_OFFSET_PANEL + 1)
y = np.zeros(nbr_elements)# + Y_OFFSET_PANEL + 1
else: # generate random values.
# generate a vector of random numbers in range [0, 1]
# y = [random.random() for i in range(nbr_elements)]
y = np.random.random(nbr_elements)
# generate a scaling vector of random numbers in range [1, NOISE_SIZE]
# this vector will scale each data point
# y_scale = [random.randint(1, NOISE_SIZE) for i in range(nbr_elements)]
y_scale = np.random.randint(1, NOISE_SIZE_NP, nbr_elements)
# generate a spike every 'GENERATE_SPIKE_EVERY_N_POINTS' data points
# generate an intial offset so that spikes don't occur at same position.
y_scale_offset = np.random.randint(1, GENERATE_SPIKE_EVERY_N_POINTS)
y_scale[GENERATE_SPIKE_EVERY_N_POINTS - 1 + y_scale_offset::GENERATE_SPIKE_EVERY_N_POINTS] = SPIKE_SIZE
# rescale each data point accordingly
y = (y * y_scale) + SHIFT_Y_BY + Y_OFFSET_PANEL
return y
@do_profile(DO_PROFILE)
def generate_points(min_x=X_MIN, max_x=X_MAX, x_shift=SHIFT_X_BY, step_size = STEPSIZE_X, zeros = False):
# < 0.1ms
# 'range' can only generate integer arrays
# x = np.array(range(min_x, max_x), int)
# use 'arrange' from numpy to generate a float array
x = np.arange(min_x, max_x, step_size)
x = x + x_shift
y = generate_numbers_for_x_vector(x, zeros)
return x, y
@do_profile(DO_PROFILE)
def create_2dim_list_from_arrays(x, y):
data = []
for i, j in zip(x, y):
data.extend([i, j])
return data
@do_profile(DO_PROFILE)
def transform_line_points_to_data_format_for_GPU(line_points):
# ~ 0.2ms
#print "nbr data points generated: " + str(len(line_points) / 2)
return (GLfloat*len(line_points))(*line_points)
@do_profile(DO_PROFILE)
def generate_color_for_segment():
# < 0.1ms
# generate well visible (not too dark) colors
if not USE_UNIFORM_COLOR:
while True:
color = [random.random() for j in xrange(0, 3)]
if sum(color) > 0.5:
break
else:
color = [1, 0, 0]
return color
@do_profile(DO_PROFILE)
def create_VBO():
# < 0.1ms
vbo_id = GLuint()
# generates 1 buffer object names, which are stored in pointer(vbo_id)
glGenBuffers(1, pointer(vbo_id))
return vbo_id
@do_profile(DO_PROFILE)
def create_VBO_send_data_to_VBO(data):
# < 0.1ms
vbo_id = create_VBO()
send_data_to_VBO(vbo_id, data)
return vbo_id
@do_profile(DO_PROFILE)
def send_data_to_VBO(vbo_id, data):
# < 0.1ms
# binds the named buffer object
glBindBuffer(GL_ARRAY_BUFFER, vbo_id)
# creates and initializes a buffer object's data store -> transfers data
# from the CPU to the GPU.
# TODO: check whether GL_DYNAMIC_DRAW or GL_STREAM_DRAW is faster.
# GL_STREAM_DRAW should be faster when updating the buffer @ every frame?
# see redbook page 95 & 96.
glBufferData(GL_ARRAY_BUFFER, sizeof(data), data, GL_DYNAMIC_DRAW)
@do_profile(DO_PROFILE)
def overwrite_line_segment_on_GPU(x_shift=SHIFT_X_BY, line_points=False, vbo_to_update=False):
# ~ 0.3ms
if not vbo_to_update:
print "!! no vbo pointer found - aborting !!"
print "update_counter: %d " % update_counter
return
if not line_points:
if DEBUG:
print "overwrite_line_segment_on_GPU: need to generate points"
line_points = generate_line_segment(x_shift)
data = transform_line_points_to_data_format_for_GPU(line_points)
color = generate_color_for_segment()
nbr_points = len(line_points)/2
# update data on VBO
send_data_to_VBO(vbo_to_update, data)
return nbr_points, color
@do_profile(DO_PROFILE)
def create_vbos(NBR_PANELS, NBR_VBOS_PER_PANEL):
vbos = [ [None] * int(NBR_VBOS_PER_PANEL) for i in xrange(NBR_PANELS) ]
for panel in range(NBR_PANELS):
for vbo in range(NBR_VBOS_PER_PANEL):
vbos[panel][vbo] = create_VBO()
return vbos
@do_profile(DO_PROFILE)
def create_initial_data(nPanels, nVbosPerPanel, nDataPointsPerVbo):
data = [ [None] * int(nVbosPerPanel) for i in xrange(nPanels) ]
for panel in range(nPanels):
for vbo in range(nVbosPerPanel):
curr_x_offset = (vbo * SHIFT_X_BY) + X_OFFSET_PANEL
#print "vbo %d, offset %d " % (vbo, curr_x_offset)
if (vbo + 1) == nVbosPerPanel:
tmp = generate_line_segment_zeros(x_shift=curr_x_offset)
else:
tmp = generate_line_segment(x_shift=curr_x_offset)
data[panel][vbo] = transform_line_points_to_data_format_for_GPU(tmp)
return data, curr_x_offset
@do_profile(DO_PROFILE)
def create_initial_colors(nPanels, nVbosPerPanel):
colors = [ [None] * int(nVbosPerPanel) for i in xrange(nPanels) ]
for panel in range(nPanels):
for vbo in range(nVbosPerPanel):
colors[panel][vbo] = generate_color_for_segment()
return colors
@do_profile(DO_PROFILE)
def initialize_vbos_with_start_data(NBR_PANELS, NBR_VBOS_PER_PANEL, vbos, data):
for panel in range(NBR_PANELS):
for vbo in range(NBR_VBOS_PER_PANEL):
send_data_to_VBO(vbos[panel][vbo], data[panel][vbo])
@do_profile(DO_PROFILE)
def setup_vbo_stuff(NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO):
t0 = time()
vbos = create_vbos(NBR_PANELS, NBR_VBOS_PER_PANEL)
data, curr_x_offset = create_initial_data(NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO)
initialize_vbos_with_start_data(NBR_PANELS, NBR_VBOS_PER_PANEL, vbos, data)
colors = create_initial_colors(NBR_PANELS, NBR_VBOS_PER_PANEL)
print 'initial setup time was %f seconds.' %(time() - t0)
return vbos, colors, curr_x_offset
def setup_plotting_queue():
# setup plotting queue
import collections
max_nbr_buffers = 20000
plot_queue = collections.deque([], max_nbr_buffers)
return plot_queue
@do_profile(DO_PROFILE)
def update_line_segment_on_GPU(vbo_id, pointer_offset, data):
# bind buffer and overwrite position with offset 'pos_to_overwrite*BYTES_PER_POINT'
#try:
glBindBuffer(GL_ARRAY_BUFFER, vbo_id)
glBufferSubData(GL_ARRAY_BUFFER, pointer_offset, sizeof(data), data)
#except:
#print "pointer_offset: ", pointer_offset
#print "sizeof(data): ", sizeof(data)
#pass
@do_profile(DO_PROFILE)
def calc_x_values_single_buffer():
x_values = np.arange(0, SHIFT_X_SINGLE_BUFFER, STEPSIZE_X)
return x_values
@do_profile(DO_PROFILE)
def append_data_to_plot_queue(new_data, nbr_buffers_per_mmap_file):
# reformat data so that the buffers from 'j' mmap files
# are paired together.
for j in range(int(min(nbr_buffers_per_mmap_file))):
data_to_add = []
for k in range(len(new_data)):
data_to_add.append(new_data[k][j])
# append 'data_to_add' to end (right side) of queue
plot_queue.append(data_to_add)
@do_profile(DO_PROFILE)
def get_data_from_plot_queue():
# remove & return left most element from queue
data = []
if len(plot_queue) > 0:
data = plot_queue.popleft()
return data
@do_profile(DO_PROFILE)
def request_new_data():
''' generates new raw data or grabs new data from MMAP '''
if USE_MMAP == 1:
new_data = get_data_from_mmap()
#update_data_stream_status(new_data)
#print new_data
else:
new_data = []
# get the x-spacing right
x_values = calc_x_values_single_buffer()
for j in xrange(NBR_INDEPENDENT_CHANNELS):
# put data into zero-th buffer
new_data.append([generate_numbers_for_x_vector(x_values)])
nbr_mmap_files = len(new_data)
nbr_buffers_per_mmap_file = np.zeros(nbr_mmap_files)
empty_data = np.zeros(nbr_mmap_files)
for j in range(nbr_mmap_files):
# update number of buffers in this 'file'. Will fail
# if len(new_data) != NBR_INDEPENDENT_CHANNELS
try:
nbr_buffers_per_mmap_file[j] = len(new_data[j])
except:
continue
# check whether the first buffer of the current mmap file is empty
sum_data = sum(new_data[j][0])
if sum_data == 0 or sum_data == DATA_RECEIVED_ACK_NUM:
empty_data[j] = 1
# print empty_data
return new_data, empty_data, nbr_buffers_per_mmap_file
def transform_vector_of_buffers_to_GPU_format(raw_data, x_shift_single_buffer_current):
# calc correct x_value
x_values = calc_x_values_single_buffer() + x_shift_single_buffer_current
nbr_mmap_files = len(raw_data)
data = []
for j in range(nbr_mmap_files):
line_points = create_2dim_list_from_arrays(x_values, raw_data[j])
data.append(transform_line_points_to_data_format_for_GPU(line_points))
return data
def mmap_stats_go_to_nbr_received_buffers_pos():
# go to 2nd position relative to 0.
mmap_stats.seek(MMAP_BYTES_PER_FLOAT * 2, 0)
@do_profile(DO_PROFILE)
def get_nbr_received_buffers_from_mmap():
# go to position where 'number of new buffers' is stored
mmap_stats_go_to_nbr_received_buffers_pos()
# read-in the string value
nbr_buffers_received = mmap_stats.read(MMAP_BYTES_PER_FLOAT)
# convert into decimal value
nbr_buffers_received = unpack('d', nbr_buffers_received)[0]
# debugging:
#print str(nbr_buffers_received) + ' number buffers received'
return nbr_buffers_received
def create_empty_data_buffer(nbr_mmap_files, zeros, nbr_buffers = 1):
# pre-allocate each buffer
buffers = []
for buffer_index in xrange(nbr_buffers):
# create deep copy of zeros, otherwise we create multiple references to
# the same object.
zeros_copy = zeros.copy()
buffers.append(zeros)
data = []
for mmap_file_index in xrange(nbr_mmap_files):
# put data into zero-th buffer
data.append(buffers)
return data
@do_profile(DO_PROFILE)
def splitIterator(text, size):
# assert size > 0, "size should be > 0"
for start in range(0, len(text), size):
yield text[start:start + size]
prev_sum = 0
MMAP_NO_DATA_INDICATE_ZERO = False
MMAP_NO_DATA_INDICATE_NON_ZERO = True
@do_profile(DO_PROFILE)
def get_data_from_mmap():
#
#t0 = time()
nbr_buffers_received = get_nbr_received_buffers_from_mmap()
nbr_mmap_files = len(mmap_data)
zeros = np.zeros(NBR_DATA_POINTS_PER_BUFFER_INT)
''' no new buffers - generate one empty dummy buffer and return '''
if nbr_buffers_received == 0 or nbr_buffers_received == -1:
return create_empty_data_buffer(nbr_mmap_files, zeros)
nbr_buffers_received = int(nbr_buffers_received)
nbr_elements = nbr_buffers_received * NBR_DATA_POINTS_PER_BUFFER_INT
range_nbr_mmap_files = range(nbr_mmap_files)
# check if there's any data that's ready for pickup.
new_data_found = np.zeros(nbr_mmap_files)
for mmap_file_index in range_nbr_mmap_files:
# go to beginning of memory mapped area
mmap_data[mmap_file_index].seek(0)
# quit right away if no new data has been written yet.
this_element = mmap_data[mmap_file_index].read(MMAP_BYTES_PER_FLOAT)
this_element = unpack('d', this_element)[0]
if round(this_element, 8) != DATA_RECEIVED_ACK_NUM:
new_data_found[mmap_file_index] = 1
# none of the files contain new data
if sum(new_data_found) == 0:
return create_empty_data_buffer(nbr_mmap_files, zeros, nbr_buffers_received)
''' read out transferred data '''
data = []
# this is ~ 10ms slower.
#data = np.zeros((nbr_mmap_files, nbr_buffers_received, NBR_DATA_POINTS_PER_BUFFER_INT))
# at least one new buffer has arrived.
for mmap_file_index in range_nbr_mmap_files:
#'''
# pre-allocate each buffer
buffers = []
for buffer_index in xrange(nbr_buffers_received):
# DONE: find out what the problem here is:
# there seems to be a bug in python on windows, or I don't understand the way things work:
# if I create 'zeros' outside this loop, the second time that 'zeros' gets called,
# it will contain all values found in data[mmap_file_index][buffer][j]. Therefore I have to re-generate
# the 'zeros' for each mmap_file_index'th loop.
# SOLUTION:
# We need to make a 'deep-copy' of zeros, otherwise we are just
# passing a reference to the same object (which is a np.array object).
zero_copy = zeros.copy()
buffers.append(zero_copy)
# add all buffers to mmap_file_index'th data stream.
data.append(buffers)
#'''
# go to beginning of memory mapped area & read out all elements
mmap_data[mmap_file_index].seek(0)
all_values_string = mmap_data[mmap_file_index].read(nbr_elements * MMAP_BYTES_PER_FLOAT)
# 0.1632 per call in debugger
# grab sub-list so we avoid having to call this list by its index.
this_data = data[mmap_file_index]
# unpack all values at once
unpacked_values = unpack("d" * nbr_elements, all_values_string)
# using list comprehension is better than a regular loop with random array access
this_data = [unpacked_values[i:i+NBR_DATA_POINTS_PER_BUFFER_INT] for i in xrange(0, nbr_elements, NBR_DATA_POINTS_PER_BUFFER_INT)]
# slower version of above line.
#for abs_idx in range(nbr_elements):
# this_data[abs_idx / NBR_DATA_POINTS_PER_BUFFER_INT][abs_idx % NBR_DATA_POINTS_PER_BUFFER_INT] = unpacked_values[abs_idx]
# write-back sub-list
data[mmap_file_index] = this_data
''' original version.
# these next few lines are responsible for 90% of the time spent in this function.
# 0.4974s per call in debugger
element_values_list = list(splitIterator(all_values_string, MMAP_BYTES_PER_FLOAT))
for abs_element_index in range(nbr_elements):
this_element = element_values_list[abs_element_index]
this_element = unpack('d', this_element)[0]
buffer_nbr = abs_element_index / NBR_DATA_POINTS_PER_BUFFER_INT
index_in_buffer = abs_element_index % NBR_DATA_POINTS_PER_BUFFER_INT
data[mmap_file_index][buffer_nbr][index_in_buffer] = this_element
'''
''' useless alternatives
# even worse: -> ~ 0.0063 secs per call
unpacked_values = [unpack('d', element_values_list[j])[0] for j in range(nbr_elements)]
# worst: ~0.0160 secs per call
buffer_ids = np.arange(nbr_elements) / NBR_DATA_POINTS_PER_BUFFER_INT
index_in_buffer_id = np.arange(nbr_elements) % NBR_DATA_POINTS_PER_BUFFER_INT
for abs_element_index in range(nbr_elements):
data[mmap_file_index][buffer_ids[abs_element_index]][index_in_buffer_id[abs_element_index]] = unpacked_values[abs_element_index]
'''
#t1 = time()
#print 'get_data_from_mmap() takes %f seconds' %(t1-t0)
# go to beginning of memory mapped area and overwrite first value with
# ACK string so that the sender knows that it is safe to overwrite the
# previous data (== send new data).
for mmap_file_index in range_nbr_mmap_files:
mmap_data[mmap_file_index].seek(0)
mmap_data[mmap_file_index].write(DATA_RECEIVED_ACK_STR)
# overwrite the 'number of buffers received' field with zero, so that we don't
# keep reading in this very same data.
mmap_stats_go_to_nbr_received_buffers_pos()
mmap_stats.write(NBR_BUFFERS_ZERO_STR)
return data
@do_profile(DO_PROFILE)
def update_vbo_with_data_from_plot_queue():
global x_shift_current, x_shift_single_buffer_current
global pointer_shift
global vbos, colors
global c_vbo # counter needed for VBO positioning
global pointer_offset, nbr_points_rendered_in_last_vbo
for j in xrange(NBR_BUFFERS_TO_UPDATE):
# grab 'raw_data' from beginning of plot queue.
raw_data = get_data_from_plot_queue()
data = transform_vector_of_buffers_to_GPU_format(raw_data, x_shift_single_buffer_current)
### VBO POSITIONING
pos_to_overwrite = c_vbo % (NBR_DATA_POINTS_PER_VBO / NBR_DATA_POINTS_PER_BUFFER)
nbr_points_rendered_in_last_vbo = int(NBR_DATA_POINTS_PER_BUFFER * pos_to_overwrite)
# at which location in the memory (in bytes) of the VBO should we replace the data?
# also needed for plotting.
pointer_offset = nbr_points_rendered_in_last_vbo * BYTES_PER_POINT
nbr_data_streams = len(data)
for panel in range(NBR_PANELS):
update_line_segment_on_GPU(vbos[panel][-1], pointer_offset, data[panel % nbr_data_streams])
c_vbo += 1
x_shift_single_buffer_current += SHIFT_X_SINGLE_BUFFER
pointer_shift += NBR_DATA_POINTS_PER_BUFFER
# check whether we reached the end of the VBO and thus need to rotate it.
if pointer_shift == NBR_DATA_POINTS_PER_VBO:
pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo = rotate_vbos_clear_last_vbo(pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo)
@do_profile(DO_PROFILE)
def rotate_vbos_clear_last_vbo(pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo):
# reset pointer offsets / shifts
# TODO: clean up and clarify 'pointer_shift' vs 'pointer_offset'!
pointer_shift = 0
pointer_offset = 0
c_vbo = 0
x_shift_current += SHIFT_X_BY
''' this is not fast enough and will lead to jitter effects
# generate new data set for each panel
tmp_points = [ [None] for j in range(NBR_PANELS)]
for panel in range(NBR_PANELS):
tmp_points_panel = generate_line_segment_zeros(x_shift=x_shift_current)
tmp_points[panel] = transform_line_points_to_data_format_for_GPU(tmp_points_panel)
'''
for panel in range(NBR_PANELS):
this_vbo = vbos[panel][0]
this_color = colors[panel][0]
# Delete current vbo and replace with new one.
# We could just re-use the current vbo, however this might lead to 'blinking' artifacts
# with the first VBO (probably because of incorrect referencing).
# By deleting the VBO, we make sure that this VBO is not being used for plotting.
glDeleteBuffers(1, pointer(this_vbo))
this_vbo = create_VBO()
# bind VBO and allocate memory.
glBindBuffer(GL_ARRAY_BUFFER, this_vbo)
glBufferData(GL_ARRAY_BUFFER, n_COORDINATES_PER_VERTEX * NBR_DATA_POINTS_PER_VBO * BYTES_PER_POINT, None, GL_DYNAMIC_DRAW)
# vbo pointer & color from arrays
vbos[panel] = vbos[panel][1:]
colors[panel] = colors[panel][1:]
# add color and pointer to VBO
vbos[panel].append(this_vbo)
colors[panel].append(this_color)
return pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo
@do_profile(DO_PROFILE)
def update_data_stream_status(data):
global prev_sum, MMAP_NO_DATA_INDICATE_ZERO, MMAP_NO_DATA_INDICATE_NON_ZERO
# check if new data has arrived and tell user
# we only check for the first data stream - I'm assuming here that either
# all channels or no channels with fail.
nbr_mmap_files = 0
buffer_to_check = 0
current_sum = sum(data[nbr_mmap_files][buffer_to_check])
if current_sum == prev_sum:
if prev_sum == 0:
# indicate zero state only once
if not MMAP_NO_DATA_INDICATE_ZERO:
print datetime.now(), ' - No new data received (sum(data) == zero)'
MMAP_NO_DATA_INDICATE_ZERO = True
else:
if not MMAP_NO_DATA_INDICATE_NON_ZERO:
print datetime.now(), ' - No new data received (sum(data) != zero)'
MMAP_NO_DATA_INDICATE_NON_ZERO = True
else:
if MMAP_NO_DATA_INDICATE_ZERO:
MMAP_NO_DATA_INDICATE_ZERO = False
print datetime.now(), ' - New data received!'
if MMAP_NO_DATA_INDICATE_NON_ZERO:
MMAP_NO_DATA_INDICATE_NON_ZERO = False
print datetime.now(), ' - New data received!'
prev_sum = current_sum
# t1 = time()
# print 'get_data_from_mmap() takes %f seconds' %(t1-t0)
@do_profile(DO_PROFILE)
def create_mmap_file_on_disk(fname):
# (over-) write file
fd = os.open(fname, os.O_CREAT | os.O_TRUNC | os.O_RDWR)
assert os.write(fd, MMAP_NULL_HEX * MMAP_STORE_LENGTH)
os.close(fd)
@do_profile(DO_PROFILE)
def setup_mmap(filenames):
# matlab:
# m = memmapfile('/tmp/bla', 'Format', 'double', 'Writable', true)
# m.Data = sin(linspace(200, 203, 512))*100
# m.Data = linspace(200, 300, 512);
# t = timer('TimerFcn', 'm.Data=sin(linspace(200, 203, 512)) * rand(1)*512;', 'Period', 0.015, 'ExecutionMode', 'fixedRate');
# start(t)
mmap_false = False
mmap_data = []
for i in range(len(filenames)):
fname = filenames[i]
# check if file exists
if not os.path.isfile(fname):
# check if directory exists
path_to_file = os.path.dirname(fname)
if not os.path.isdir(path_to_file):
print "Directory '" + path_to_file + "' not found - creating it."
os.makedirs(path_to_file)
create_mmap_file_on_disk(fname)
# initialize the memory map
f = open(fname, "r+b")
mmap_data.append(mmap.mmap(f.fileno(), 0))
# initialize memory with default value
for j in range(len(mmap_data)):
mmap_data[i][j] = MMAP_NULL_HEX
return mmap_data
##################### MAIN #####################################################
# animation is enabled by default. you can pause / resume it by pressing 'a'
DO_ANIMATE = True
DO_NEXT_STEP = False
''' BEGIN setup part 1 '''
if USE_MMAP:
# initialize MMAP
mmap_data = setup_mmap(MMAP_FILENAME)
if not mmap_data:
print "Could not read mmap-file. Aborting."
sys.exit(1)
if not os.path.isfile(MMAP_stats_file):
create_mmap_file_on_disk(MMAP_stats_file)
f = open(MMAP_stats_file, "r+b")
mmap_stats = mmap.mmap(f.fileno(), 0)
vbos, colors, x_shift_current = setup_vbo_stuff(NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO)
# TODO: clarify difference between 'x_shift_single_buffer_current' and 'x_shift_current'
x_shift_single_buffer_current = x_shift_current
plot_queue = setup_plotting_queue()
info_str = "%d panels; %d segments per panel; %d number of points per segment." % ( NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO )
print info_str
# setup window
window = pyglet.window.Window(width=WIN_WIDTH_DEFAULT, height=WIN_HEIGHT_DEFAULT, resizable=True)
window.set_caption(info_str)
# initialize FPS display
fps_display = pyglet.clock.ClockDisplay(interval=0.125, format='FPS %(fps).2f')
''' END setup part 1 '''
''' BEGIN periodic event function - check whether we need to replace a VBO '''
# variables needed while updating the VBOs
pointer_shift = 0
pointer_offset = 0
nbr_points_rendered_in_last_vbo = 0
c_vbo = 0
# definitions needed for dequeueing of plot buffers.
NBR_BUFFERS_TO_UPDATE = 1
MIN_NBR_BUFFERS_NECESSARY_FOR_UPDATE = NBR_BUFFERS_TO_UPDATE
@do_profile(DO_PROFILE)
def update(dt):
# ~ 24 ms, generating new data set for each panel
# ~ 6 ms, generating only one new data set and re-using it.
# ~ 0.4 ms, without 'generate_line_segment' and 'overwrite_line_segment_on_GPU'
if not DO_ANIMATE:
# quit right away if animation is disabled. Ideally we would want to still
# compute at least the next set of 'tmp_points', however we need to make sure that
# 'x_shift_current' doesn't get updated more than once (or 'SHIFT_X_BY' is updated
# accordingly).
return
if DO_NEXT_STEP:
raw_input('please press key to continue ')
if DEBUG:
print "update_counter in 'update()' %d " % update_counter
t0 = time()
''' START 'DATA MANAGEMENT' '''
# pick up new data from mmap or other system (i.e. generated)
new_data, new_data_is_empty, nbr_buffers_per_mmap_file = request_new_data()
# don't add empty data to the queue
# don't use 'NBR_INDEPENDENT_CHANNELS' here, because we might be skipping this channel
if sum(new_data_is_empty) != len(new_data):
append_data_to_plot_queue(new_data, nbr_buffers_per_mmap_file)
''' END 'DATA MANAGEMENT' '''
''' START 'dequeue enough buffers and prepare them for plotting' '''
# don't purge entire queue - keep at least one element in queue.
if len(plot_queue) < MIN_NBR_BUFFERS_NECESSARY_FOR_UPDATE:
return
# dequeue buffers and update VBOs
update_vbo_with_data_from_plot_queue()
''' END 'dequeue enough buffers and prepare them for plotting' '''
# indicate that view needs to be shifted
global SHIFT_VIEW
SHIFT_VIEW = True
if DEBUG:
t1 = time()
print 'update() takes %f seconds' %(t1-t0)
pyglet.clock.schedule_interval(update, 1.0/CALL_UPDATE_X_TIMES_PER_SECOND)
''' END periodic event function '''
from pyglet.window import key
KEYPRESS_STEPSIZE = 10
zoom = 0
currentScale = 1
@window.event
@do_profile(DO_PROFILE)
def on_key_press(symbol, modifiers):
global DO_ANIMATE, DO_NEXT_STEP, KEYPRESS_STEPSIZE, zoom, currentScale
global x_shift_single_buffer_current
global plot_queue
# turn animation on / off.
if symbol == key.A:
DO_ANIMATE = not DO_ANIMATE
if DO_ANIMATE:
print 'animation on'
else:
print 'animation off'
elif symbol == key.C:
plot_queue = setup_plotting_queue()
print "Cleared Plot-Queue"
elif symbol == key.Q:
print "Plot-Queue size: %d" % (len(plot_queue))
# zero the plot along the x axis. in case of drifting, this should get the
# back onto the screen.
elif symbol == key.Z:
glTranslatef(+x_shift_single_buffer_current, 0.0, 0.0)
fps_display.label.x = fps_display.label.x - x_shift_single_buffer_current
x_shift_single_buffer_current = 0
x_shift_current = 0
elif symbol == key.S:
DO_NEXT_STEP = not DO_NEXT_STEP
elif symbol == key.LEFT:
glTranslatef(-KEYPRESS_STEPSIZE, 0.0, 0.0)
elif symbol == key.RIGHT:
glTranslatef(KEYPRESS_STEPSIZE, 0.0, 0.0)
elif (symbol == key.PLUS or symbol == key.NUM_ADD):
KEYPRESS_STEPSIZE += 10
print 'step size is now %d ' % KEYPRESS_STEPSIZE
elif (symbol == key.MINUS or symbol == key.NUM_SUBTRACT):
KEYPRESS_STEPSIZE -= 10
KEYPRESS_STEPSIZE = max(10, KEYPRESS_STEPSIZE)
print 'step size is now %d ' % KEYPRESS_STEPSIZE
else:
print '%s key, %s modifier was pressed' % (symbol, modifiers)
''' zooming
elif symbol == key.Z:
if modifiers == key.MOD_ALT + 16:
#zoom -= 0.5;
#glOrtho(+1.5 + zoom, 1.0 + zoom, +2.0 + zoom, 0.5 + zoom, +1.0, -3.5)
#currentScale -= 0.1
#glScaled(currentScale, currentScale, 1);
elif modifiers == key.MOD_SHIFT + 16:
#zoom += 0.5;
#glOrtho(-1.5 + zoom, 1.0 - zoom, -2.0 + zoom, 0.5 - zoom, -1.0, 3.5)
#currentScale += 0.1
#glScaled(currentScale, currentScale, 1);
'''
''' rotations
elif symbol == key.PAGEDOWN:
# we need to move objects into center, before rotating
#glRotatef(0.5, 1, 0, 0)
# need to move object back to original position
elif symbol == key.PAGEUP:
# we need to move objects into center, before rotating
#glRotatef(-0.5, 1, 0, 0)
# need to move object back to original position
'''
'''
BEGIN 'on_resize' function - can only be defined once 'window' exists
'''
@window.event
@do_profile(DO_PROFILE)
def on_resize(width, height):
global WIN_HEIGHT_current, WIN_WIDTH_current
WIN_HEIGHT_current = height
WIN_WIDTH_current = width
# TODO: currently we only rescale the Y dimension. Add X-Scaling!
if DEBUG:
print "new height %d " %(height)
print "new width %d " %(width)
''' END 'on_resize' function - can only be defined once 'window' exists '''
'''
BEGIN 'draw' function - can only be defined once 'window' exists
The EventLoop will dispatch this event when the window should be redrawn.
This will happen during idle time after any window events and after any
scheduled functions were called.
'''
@window.event
@do_profile(DO_PROFILE)
def on_draw():
# ~ 21ms (test6 was ~260ms)
global SHIFT_VIEW
# clear buffers to preset values
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# TODO:
# maybe we should move back to the origin and translate from there?
# glLoadIdentity()
# glTranslatef(-x_shift_single_buffer_current/2, 0.0, 0.0)
if SHIFT_VIEW:
#local_shift = (SHIFT_X_BY/CALL_UPDATE_X_TIMES_PER_SECOND)
# TODO: fix 'local_shift', right now we override it to '1'
# 'SHIFT_X_BY' needs to be an integral number, otherwise we get
# artifacts of single points moving up and down between shifts.
local_shift = NBR_BUFFERS_TO_UPDATE * STEPSIZE_X * NBR_DATA_POINTS_PER_BUFFER
#local_shift = 1
glTranslatef(-local_shift, 0.0, 0.0)
# shift location of FPS display by same amount - but in opposite direction
# TODO: this must be because of a different reference point?
fps_display.label.x = fps_display.label.x + local_shift
SHIFT_VIEW = False
if USE_UNIFORM_COLOR:
glColor3f(DEFAULT_COLOR[0], DEFAULT_COLOR[1], DEFAULT_COLOR[2])
height_per_panel = (WIN_HEIGHT_current / NBR_PANELS)
for panel in range(NBR_PANELS):
#glViewport(x, y, w, h)
glViewport(0, panel * height_per_panel, WIN_WIDTH_current, height_per_panel)
# plot each VBO
for segment in range(NBR_VBOS_PER_PANEL):
if not USE_UNIFORM_COLOR:
this_color = colors[panel][segment]
glColor3f(this_color[0], this_color[1], this_color[2])
# bind the named buffer object so we can work with it.
glBindBuffer(GL_ARRAY_BUFFER, vbos[panel][segment])
## TODO!
''' hide individual buffers in first VBO so that points disappear
smoothly in the first buffer '''
this_pointer_offset = 0
nbr_points_to_draw = NBR_DATA_POINTS_PER_VBO
if segment == 0:
this_pointer_offset = pointer_offset
nbr_points_to_draw = NBR_DATA_POINTS_PER_VBO - (pointer_offset / BYTES_PER_POINT)
elif segment == NBR_VBOS_PER_PANEL - 1:
# TODO: is 'nbr_points_rendered_in_last_vbo' correct? or are we plotting too few points?
this_pointer_offset = 0
nbr_points_to_draw = nbr_points_rendered_in_last_vbo
# specifies the location and data format of an array of vertex coordinates to use when rendering
glVertexPointer(n_COORDINATES_PER_VERTEX, GL_FLOAT, 0, this_pointer_offset)
# render primitives from array data
glDrawArrays(GL_LINE_STRIP, 0, nbr_points_to_draw)
# update the FPS display.
glViewport(0, 0, WIN_WIDTH_current, WIN_HEIGHT_current)
fps_display.draw()
''' END 'draw' function - can only be defined once 'window' exists '''
''' BEGIN setup part 2 '''
glClearColor(0, 0, 0, 1.0)
# enable VERTEX_ARRAY mode.
glEnableClientState(GL_VERTEX_ARRAY)
# try to render a smooth line
glEnable(GL_LINE_SMOOTH)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
# start application event loop
pyglet.app.run()
'''
print "quit counter " + str(on_draw_quit_counter)
print "re-draw counter " + str(on_draw_redraw_counter)
print "update counter " + str(update_counter)
'''
''' END setup part 2 '''
|
StimOMatic/StimOMatic
|
python/OpenGLPlotting/pomp/apps/deprecated/01.06.2012/pCtrlLFP_old.py
|
Python
|
bsd-2-clause
| 41,129 | 0.011598 |
# -*- coding: utf-8 -*-
import urllib
from oauth2 import Consumer
from webob import Request, Response
from wsgioauth import calls
from wsgioauth.provider import Application, Storage
from wsgioauth.utils import CALLS
from wsgioauth.provider import Storage, Token
ROUTES = {
u'getConsumers': calls.getConsumers,
u'getRequestTokens': calls.getRequestTokens,
u'getAccessTokens': calls.getAccessTokens,
u'provisionConsumer': calls.provisionConsumer,
u'provisionRequestToken': calls.provisionRequestToken,
u'provisionAccessToken': calls.provisionAccessToken,
u'deleteConsumer': calls.deleteConsumer,
u'deleteRequestToken': calls.deleteRequestToken,
u'deleteAccessToken': calls.deleteAccessToken,
}
def getMockStorage():
from wsgioauth.provider import OAUTH_CLASSES
OAUTH_CLASSES['consumer'] = Consumer
OAUTH_CLASSES['request_token'] = Token
return Storage
def echo_app(environ, start_response):
"""Simple app that echos a POST request"""
req = Request(environ)
resp = Response(urllib.urlencode(req.params))
return resp(environ, start_response)
def echo_app_factory(*global_conf, **local_conf):
return echo_app
STORAGE = None
def app_factory(*global_conf, **local_conf):
CALLS.update(ROUTES)
global STORAGE
if STORAGE is None:
storage_cls = getMockStorage()
STORAGE = storage_cls(local_conf)
def storage_lookup(environ, conf):
return STORAGE
return Application(storage_lookup, **local_conf)
def filter_factory(app, *global_conf, **local_conf):
"""This function returns a wsgioauth.provider.Filter services factory."""
from wsgioauth.mock import getMockStorage
global STORAGE
if STORAGE is None:
storage_cls = getMockStorage()
STORAGE = storage_cls(local_conf)
def storage_lookup(environ, conf):
return STORAGE
from wsgioauth.provider import Middleware
return Middleware(app, storage_lookup, **local_conf)
|
karacos/karacos-wsgi
|
lib/wsgioauth/mock.py
|
Python
|
lgpl-3.0
| 2,042 | 0.003918 |
import uuid
import xbmc
import xbmcaddon
settings = {}
addon = xbmcaddon.Addon()
settings['debug'] = addon.getSetting('debug') == "true"
settings['powersave_minutes'] = int(addon.getSetting('powersave_minutes'))
settings['version'] = addon.getAddonInfo('version')
settings['uuid'] = str(addon.getSetting('uuid')) or str(uuid.uuid4())
addon.setSetting('uuid', settings['uuid'])
|
devshans/script.service.hdmipowersave
|
resources/lib/settings.py
|
Python
|
mit
| 415 | 0.009639 |
syscall_table = {}
syscall_table[202]="accept"
syscall_table[242]="accept4"
syscall_table[1033]="access"
syscall_table[89]="acct"
syscall_table[217]="add_key"
syscall_table[171]="adjtimex"
syscall_table[1059]="alarm"
syscall_table[1075]="bdflush"
syscall_table[200]="bind"
syscall_table[214]="brk"
syscall_table[90]="capget"
syscall_table[91]="capset"
syscall_table[49]="chdir"
syscall_table[1028]="chmod"
syscall_table[1029]="chown"
syscall_table[51]="chroot"
syscall_table[114]="clock_getres"
syscall_table[113]="clock_gettime"
syscall_table[115]="clock_nanosleep"
syscall_table[112]="clock_settime"
syscall_table[220]="clone"
syscall_table[57]="close"
syscall_table[203]="connect"
syscall_table[1064]="creat"
syscall_table[106]="delete_module"
syscall_table[23]="dup"
syscall_table[1041]="dup2"
syscall_table[24]="dup3"
syscall_table[1042]="epoll_create"
syscall_table[20]="epoll_create1"
syscall_table[21]="epoll_ctl"
syscall_table[22]="epoll_pwait"
syscall_table[1069]="epoll_wait"
syscall_table[1044]="eventfd"
syscall_table[19]="eventfd2"
syscall_table[221]="execve"
syscall_table[93]="exit"
syscall_table[94]="exit_group"
syscall_table[48]="faccessat"
syscall_table[223]="fadvise64"
syscall_table[47]="fallocate"
syscall_table[262]="fanotify_init"
syscall_table[263]="fanotify_mark"
syscall_table[50]="fchdir"
syscall_table[52]="fchmod"
syscall_table[53]="fchmodat"
syscall_table[55]="fchown"
syscall_table[54]="fchownat"
syscall_table[25]="fcntl"
syscall_table[1052]="fcntl64"
syscall_table[83]="fdatasync"
syscall_table[10]="fgetxattr"
syscall_table[13]="flistxattr"
syscall_table[32]="flock"
syscall_table[1079]="fork"
syscall_table[16]="fremovexattr"
syscall_table[7]="fsetxattr"
syscall_table[80]="fstat"
syscall_table[1051]="fstat64"
syscall_table[79]="fstatat64"
syscall_table[44]="fstatfs"
syscall_table[1055]="fstatfs64"
syscall_table[82]="fsync"
syscall_table[46]="ftruncate"
syscall_table[1047]="ftruncate64"
syscall_table[98]="futex"
syscall_table[1066]="futimesat"
syscall_table[168]="getcpu"
syscall_table[17]="getcwd"
syscall_table[1065]="getdents"
syscall_table[61]="getdents64"
syscall_table[177]="getegid"
syscall_table[175]="geteuid"
syscall_table[176]="getgid"
syscall_table[158]="getgroups"
syscall_table[102]="getitimer"
syscall_table[236]="get_mempolicy"
syscall_table[205]="getpeername"
syscall_table[155]="getpgid"
syscall_table[1060]="getpgrp"
syscall_table[172]="getpid"
syscall_table[173]="getppid"
syscall_table[141]="getpriority"
syscall_table[150]="getresgid"
syscall_table[148]="getresuid"
syscall_table[163]="getrlimit"
syscall_table[100]="get_robust_list"
syscall_table[165]="getrusage"
syscall_table[156]="getsid"
syscall_table[204]="getsockname"
syscall_table[209]="getsockopt"
syscall_table[178]="gettid"
syscall_table[169]="gettimeofday"
syscall_table[174]="getuid"
syscall_table[8]="getxattr"
syscall_table[105]="init_module"
syscall_table[27]="inotify_add_watch"
syscall_table[1043]="inotify_init"
syscall_table[26]="inotify_init1"
syscall_table[28]="inotify_rm_watch"
syscall_table[3]="io_cancel"
syscall_table[29]="ioctl"
syscall_table[1]="io_destroy"
syscall_table[4]="io_getevents"
syscall_table[31]="ioprio_get"
syscall_table[30]="ioprio_set"
syscall_table[0]="io_setup"
syscall_table[2]="io_submit"
syscall_table[104]="kexec_load"
syscall_table[219]="keyctl"
syscall_table[129]="kill"
syscall_table[1032]="lchown"
syscall_table[9]="lgetxattr"
syscall_table[1025]="link"
syscall_table[37]="linkat"
syscall_table[201]="listen"
syscall_table[11]="listxattr"
syscall_table[12]="llistxattr"
syscall_table[18]="lookup_dcookie"
syscall_table[15]="lremovexattr"
syscall_table[62]="lseek"
syscall_table[6]="lsetxattr"
syscall_table[1039]="lstat"
syscall_table[1050]="lstat64"
syscall_table[233]="madvise"
syscall_table[235]="mbind"
syscall_table[238]="migrate_pages"
syscall_table[232]="mincore"
syscall_table[1030]="mkdir"
syscall_table[34]="mkdirat"
syscall_table[1027]="mknod"
syscall_table[33]="mknodat"
syscall_table[228]="mlock"
syscall_table[230]="mlockall"
syscall_table[222]="mmap"
syscall_table[40]="mount"
syscall_table[239]="move_pages"
syscall_table[226]="mprotect"
syscall_table[185]="mq_getsetattr"
syscall_table[184]="mq_notify"
syscall_table[180]="mq_open"
syscall_table[183]="mq_timedreceive"
syscall_table[182]="mq_timedsend"
syscall_table[181]="mq_unlink"
syscall_table[216]="mremap"
syscall_table[187]="msgctl"
syscall_table[186]="msgget"
syscall_table[188]="msgrcv"
syscall_table[189]="msgsnd"
syscall_table[227]="msync"
syscall_table[229]="munlock"
syscall_table[231]="munlockall"
syscall_table[215]="munmap"
syscall_table[101]="nanosleep"
syscall_table[1054]="newfstatat"
syscall_table[42]="nfsservctl"
syscall_table[1024]="open"
syscall_table[56]="openat"
syscall_table[1061]="pause"
syscall_table[241]="perf_event_open"
syscall_table[92]="personality"
syscall_table[1040]="pipe"
syscall_table[59]="pipe2"
syscall_table[41]="pivot_root"
syscall_table[1068]="poll"
syscall_table[73]="ppoll"
syscall_table[167]="prctl"
syscall_table[67]="pread64"
syscall_table[69]="preadv"
syscall_table[261]="prlimit64"
syscall_table[72]="pselect6"
syscall_table[117]="ptrace"
syscall_table[68]="pwrite64"
syscall_table[70]="pwritev"
syscall_table[60]="quotactl"
syscall_table[63]="read"
syscall_table[213]="readahead"
syscall_table[1035]="readlink"
syscall_table[78]="readlinkat"
syscall_table[65]="readv"
syscall_table[142]="reboot"
syscall_table[1073]="recv"
syscall_table[207]="recvfrom"
syscall_table[243]="recvmmsg"
syscall_table[212]="recvmsg"
syscall_table[234]="remap_file_pages"
syscall_table[14]="removexattr"
syscall_table[1034]="rename"
syscall_table[38]="renameat"
syscall_table[218]="request_key"
syscall_table[128]="restart_syscall"
syscall_table[1031]="rmdir"
syscall_table[134]="rt_sigaction"
syscall_table[136]="rt_sigpending"
syscall_table[135]="rt_sigprocmask"
syscall_table[138]="rt_sigqueueinfo"
syscall_table[139]="rt_sigreturn"
syscall_table[133]="rt_sigsuspend"
syscall_table[137]="rt_sigtimedwait"
syscall_table[240]="rt_tgsigqueueinfo"
syscall_table[123]="sched_getaffinity"
syscall_table[121]="sched_getparam"
syscall_table[125]="sched_get_priority_max"
syscall_table[126]="sched_get_priority_min"
syscall_table[120]="sched_getscheduler"
syscall_table[127]="sched_rr_get_interval"
syscall_table[122]="sched_setaffinity"
syscall_table[118]="sched_setparam"
syscall_table[119]="sched_setscheduler"
syscall_table[124]="sched_yield"
syscall_table[1067]="select"
syscall_table[191]="semctl"
syscall_table[190]="semget"
syscall_table[193]="semop"
syscall_table[192]="semtimedop"
syscall_table[1074]="send"
syscall_table[71]="sendfile"
syscall_table[1046]="sendfile64"
syscall_table[211]="sendmsg"
syscall_table[206]="sendto"
syscall_table[162]="setdomainname"
syscall_table[152]="setfsgid"
syscall_table[151]="setfsuid"
syscall_table[144]="setgid"
syscall_table[159]="setgroups"
syscall_table[161]="sethostname"
syscall_table[103]="setitimer"
syscall_table[237]="set_mempolicy"
syscall_table[154]="setpgid"
syscall_table[140]="setpriority"
syscall_table[143]="setregid"
syscall_table[149]="setresgid"
syscall_table[147]="setresuid"
syscall_table[145]="setreuid"
syscall_table[164]="setrlimit"
syscall_table[99]="set_robust_list"
syscall_table[157]="setsid"
syscall_table[208]="setsockopt"
syscall_table[96]="set_tid_address"
syscall_table[170]="settimeofday"
syscall_table[146]="setuid"
syscall_table[5]="setxattr"
syscall_table[196]="shmat"
syscall_table[195]="shmctl"
syscall_table[197]="shmdt"
syscall_table[194]="shmget"
syscall_table[210]="shutdown"
syscall_table[132]="sigaltstack"
syscall_table[1045]="signalfd"
syscall_table[74]="signalfd4"
syscall_table[1999]="sigreturn"
syscall_table[198]="socket"
syscall_table[199]="socketpair"
syscall_table[76]="splice"
syscall_table[1038]="stat"
syscall_table[1049]="stat64"
syscall_table[43]="statfs"
syscall_table[1056]="statfs64"
syscall_table[225]="swapoff"
syscall_table[224]="swapon"
syscall_table[1036]="symlink"
syscall_table[36]="symlinkat"
syscall_table[81]="sync"
syscall_table[84]="sync_file_range2"
syscall_table[1078]="_sysctl"
syscall_table[179]="sysinfo"
syscall_table[116]="syslog"
syscall_table[77]="tee"
syscall_table[131]="tgkill"
syscall_table[1062]="time"
syscall_table[107]="timer_create"
syscall_table[111]="timer_delete"
syscall_table[85]="timerfd_create"
syscall_table[87]="timerfd_gettime"
syscall_table[86]="timerfd_settime"
syscall_table[109]="timer_getoverrun"
syscall_table[108]="timer_gettime"
syscall_table[110]="timer_settime"
syscall_table[153]="times"
syscall_table[130]="tkill"
syscall_table[45]="truncate"
syscall_table[1048]="truncate64"
syscall_table[166]="umask"
syscall_table[1076]="umount"
syscall_table[39]="umount2"
syscall_table[160]="uname"
syscall_table[1026]="unlink"
syscall_table[35]="unlinkat"
syscall_table[97]="unshare"
syscall_table[1077]="uselib"
syscall_table[1070]="ustat"
syscall_table[1063]="utime"
syscall_table[88]="utimensat"
syscall_table[1037]="utimes"
syscall_table[1071]="vfork"
syscall_table[58]="vhangup"
syscall_table[75]="vmsplice"
syscall_table[260]="wait4"
syscall_table[95]="waitid"
syscall_table[64]="write"
syscall_table[66]="writev"
def get(no):
return syscall_table[no]
|
alexpark07/ARMSCGen
|
shellcodes/arm64/syscall.py
|
Python
|
gpl-2.0
| 9,178 | 0.032905 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.