text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import os
import battlenet
try:
import unittest2 as unittest
except ImportError:
import unittest as unittest
PUBLIC_KEY = os.environ.get('BNET_PUBLIC_KEY')
PRIVATE_KEY = os.environ.get('BNET_PRIVATE_KEY')
class RegionsTest(unittest.TestCase):
def setUp(self):
self.connection = battlenet.Connection(public_key=PUBLIC_KEY, private_key=PRIVATE_KEY)
def test_us(self):
realms = self.connection.get_all_realms(battlenet.UNITED_STATES)
self.assertTrue(len(realms) > 0)
def test_eu(self):
realms = self.connection.get_all_realms(battlenet.EUROPE)
self.assertTrue(len(realms) > 0)
def test_kr(self):
realms = self.connection.get_all_realms(battlenet.KOREA)
self.assertTrue(len(realms) > 0)
def test_tw(self):
realms = self.connection.get_all_realms(battlenet.TAIWAN)
self.assertTrue(len(realms) > 0)
def test_cn(self):
realms = self.connection.get_all_realms(battlenet.CHINA)
self.assertTrue(len(realms) > 0)
def tearDown(self):
del self.connection
if __name__ == '__main__':
unittest.main()
|
vishnevskiy/battlenet
|
tests/test_regions.py
|
Python
|
mit
| 1,132 | 0.001767 |
"""Disassembler of Python byte code into mnemonics."""
import sys
import types
from opcode_25 import *
from opcode_25 import __all__ as _opcodes_all
__all__ = ["dis","disassemble","distb","disco"] + _opcodes_all
del _opcodes_all
def dis(x=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb()
return
if type(x) is types.InstanceType:
x = x.__class__
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, 'func_code'):
x = x.func_code
if hasattr(x, '__dict__'):
items = x.__dict__.items()
items.sort()
for name, x1 in items:
if type(x1) in (types.MethodType,
types.FunctionType,
types.CodeType,
types.ClassType):
print "Disassembly of %s:" % name
try:
dis(x1)
except TypeError, msg:
print "Sorry:", msg
print
elif hasattr(x, 'co_code'):
disassemble(x)
elif isinstance(x, str):
disassemble_string(x)
else:
raise TypeError, \
"don't know how to disassemble %s objects" % \
type(x).__name__
def distb(tb=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError, "no last traceback to disassemble"
while tb.tb_next: tb = tb.tb_next
disassemble(tb.tb_frame.f_code, tb.tb_lasti)
def disassemble(co, lasti=-1):
"""Disassemble a code object."""
code = co.co_code
byte_increments = [ord(c) for c in co.co_lnotab[0::2]]
line_increments = [ord(c) for c in co.co_lnotab[1::2]]
table_length = len(byte_increments) # == len(line_increments)
lineno = co.co_firstlineno
table_index = 0
while (table_index < table_length
and byte_increments[table_index] == 0):
lineno += line_increments[table_index]
table_index += 1
addr = 0
line_incr = 0
labels = findlabels(code)
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
c = code[i]
op = ord(c)
if i >= addr:
lineno += line_incr
while table_index < table_length:
addr += byte_increments[table_index]
line_incr = line_increments[table_index]
table_index += 1
if line_incr:
break
else:
addr = sys.maxint
if i > 0:
print
print "%3d"%lineno,
else:
print ' ',
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print `i`.rjust(4),
print opname[op].ljust(20),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = oparg*65536L
print `oparg`.rjust(5),
if op in hasconst:
print '(' + `co.co_consts[oparg]` + ')',
elif op in hasname:
print '(' + co.co_names[oparg] + ')',
elif op in hasjrel:
print '(to ' + `i + oparg` + ')',
elif op in haslocal:
print '(' + co.co_varnames[oparg] + ')',
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
elif op in hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print '(' + free[oparg] + ')',
print
def disassemble_string(code, lasti=-1, varnames=None, names=None,
constants=None):
labels = findlabels(code)
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
if op == opmap['SET_LINENO'] and i > 0:
print # Extra blank line
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print `i`.rjust(4),
print opname[op].ljust(15),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
print `oparg`.rjust(5),
if op in hasconst:
if constants:
print '(' + `constants[oparg]` + ')',
else:
print '(%d)'%oparg,
elif op in hasname:
if names is not None:
print '(' + names[oparg] + ')',
else:
print '(%d)'%oparg,
elif op in hasjrel:
print '(to ' + `i + oparg` + ')',
elif op in haslocal:
if varnames:
print '(' + varnames[oparg] + ')',
else:
print '(%d)' % oparg,
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
print
disco = disassemble # XXX For backwards compatibility
def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
label = -1
if op in hasjrel:
label = i+oparg
elif op in hasjabs:
label = oparg
if label >= 0:
if label not in labels:
labels.append(label)
return labels
def _test():
"""Simple test program to disassemble a file."""
if sys.argv[1:]:
if sys.argv[2:]:
sys.stderr.write("usage: python dis.py [-|file]\n")
sys.exit(2)
fn = sys.argv[1]
if not fn or fn == "-":
fn = None
else:
fn = None
if fn is None:
f = sys.stdin
else:
f = open(fn)
source = f.read()
if fn is not None:
f.close()
else:
fn = "<stdin>"
code = compile(source, fn, "exec")
dis(code)
if __name__ == "__main__":
_test()
|
whymirror/unholy
|
decompyle/decompyle/dis_23.py
|
Python
|
mit
| 6,551 | 0.005343 |
#coding:utf-8
import sys,os
import copy
import re
import itertools
import net
"""basic from web.py: makes web apps (http://webpy.org)"""
__version__ = "0.37"
__author__ = [
"Aaron Swartz <me@aaronsw.com>",
"Anand Chitipothu <anandology@gmail.com>"
]
__license__ = "public domain"
__contributors__ = "see https://github.com/webpy/webpy"
class Storage(dict):
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
def attrget(obj, attr, value=None):
try:
if hasattr(obj, 'has_key') and obj.has_key(attr):
return obj[attr]
except TypeError:
# Handle the case where has_key takes different number of arguments.
# This is the case with Model objects on appengine. See #134
pass
if hasattr(obj, attr):
return getattr(obj, attr)
return value
def safeunicode(obj, encoding='utf-8'):
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
if isinstance(obj, unicode):
return obj.encode(encoding)
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
def autoassign(self, locals):
for (key, value) in locals.iteritems():
if key == 'self':
continue
setattr(self, key, value)
class Form(object):
def __init__(self, *inputs, **kw):
self.inputs = inputs
self.valid = True
self.note = None
self.validators = kw.pop('validators', [])
self.action = kw.pop("action","")
self.title = kw.pop("title","")
self.method = kw.pop("method","post")
self.onsubmit = kw.pop("onsubmit","")
self.error = None
def __call__(self, x=None):
o = copy.deepcopy(self)
if x: o.validates(x)
return o
def render(self):
out = ''
out += self.rendernote(self.note)
out += '<table class="formtab table table-bordered">\n'
out += '<thead ><tr class=active><th>%s</th><th class=rtd><a class="btn"\
href="javascript:history.go(-1);">%s</a></th></tr></thead>\n'%(self.title, net.websafe("返回"))
for i in self.inputs:
html = safeunicode(i.pre) + i.render() + self.rendernote(i.note) + safeunicode(i.post)
if i.is_hidden():
out += ' <tr style="display: none;"><td></td><td>%s</td></tr>\n' % (html)
else:
out += ' <tr><td>%s</td><td>%s</td></tr>\n' % ( net.websafe(i.description),html)
if self.error:
out += ' <tr><td colspan=2>%s</td></tr>\n' % ( self.rendernote(self.error))
out += "</table>"
return out
def render_css(self):
out = []
out.append(self.rendernote(self.note))
for i in self.inputs:
out.append(' <div class="form-group">\n')
if not i.is_hidden():
out.append(' <label class="col-sm-4 control-label" id="lab_%s" for="%s">%s</label>\n' % (i.id,i.id, net.websafe(i.description)))
out.append(i.pre)
out.append(' <div class="col-sm-6">\n')
out.append(" %s\n"%i.render())
out.append(' </div>\n')
if i.help:
out.append(' <a id="%s_help" href="javascript:void(0);" data-container="body" data-toggle="popover" data-trigger="focus" data-placement="top" data-content="%s">\n'%(i.id,i.help))
out.append(' <span class="input-help glyphicon glyphicon-question-sign"></span></a>\n')
out.append(self.rendernote(i.note))
out.append(i.post)
out.append(' </div>\n')
if i.hr:
out.append("<hr/>\n")
return ''.join(out)
def rendernote(self, note):
if note: return '<span class="wrong">%s</span>' % net.websafe(note)
else: return ""
def validates(self, source=None, _validate=True, **kw):
source = source or kw
out = True
for i in self.inputs:
v = attrget(source, i.name)
if _validate:
out = i.validate(v) and out
else:
i.set_value(v)
if _validate:
out = out and self._validate(source)
self.valid = out
return out
def _validate(self, value):
self.value = value
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
def fill(self, source=None, **kw):
return self.validates(source, _validate=False, **kw)
def __getitem__(self, i):
for x in self.inputs:
if x.name == i: return x
raise KeyError, i
def __getattr__(self, name):
# don't interfere with deepcopy
inputs = self.__dict__.get('inputs') or []
for x in inputs:
if x.name == name: return x
raise AttributeError, name
def get(self, i, default=None):
try:
return self[i]
except KeyError:
return default
def _get_d(self): #@@ should really be form.attr, no?
return storage([(i.name, i.get_value()) for i in self.inputs])
d = property(_get_d)
class Input(object):
def __init__(self, name, *validators, **attrs):
self.name = name
self.validators = validators
self.attrs = attrs = AttributeList(attrs)
self.description = attrs.pop('description', name)
self.help = attrs.pop("help","")
self.value = attrs.pop('value', None)
self.pre = attrs.pop('pre', "")
self.post = attrs.pop('post', "")
self.hr = attrs.pop('hr',False)
self.note = None
self.id = attrs.setdefault('id', self.get_default_id())
if 'class_' in attrs:
attrs['class'] = attrs['class_']
del attrs['class_']
attrs['placeholder'] = self.description
for vd in self.validators:
attrs['placeholder'] += ", "+vd.msg
def is_hidden(self):
return False
def get_type(self):
raise NotImplementedError
def get_default_id(self):
return self.name
def validate(self, value):
self.set_value(value)
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
def set_value(self, value):
self.value = value
def get_value(self):
return self.value
def render(self):
attrs = self.attrs.copy()
attrs['type'] = self.get_type()
if self.value is not None:
attrs['value'] = self.value
attrs['name'] = self.name
return '<input %s/>' % attrs
def rendernote(self, note):
if note: return '<strong class="wrong">%s</strong>' % net.websafe(note)
else: return ""
def addatts(self):
# add leading space for backward-compatibility
return " " + str(self.attrs)
class AttributeList(dict):
def copy(self):
return AttributeList(self)
def __str__(self):
return " ".join(['%s="%s"' % (k, net.websafe(v)) for k, v in self.items()])
def __repr__(self):
return '<attrs: %s>' % repr(str(self))
class Textbox(Input):
def get_type(self):
return 'text'
class Password(Input):
def get_type(self):
return 'password'
class Textarea(Input):
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
value = net.websafe(self.value or '')
return '<textarea %s>%s</textarea>' % (attrs, value)
class Dropdown(Input):
def __init__(self, name, args, *validators, **attrs):
self.args = args
super(Dropdown, self).__init__(name, *validators, **attrs)
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
x = '<select %s>\n' % attrs
for arg in self.args:
x += self._render_option(arg)
x += ' </select>\n'
return x
def _render_option(self, arg, indent=' '):
if isinstance(arg, (tuple, list)):
value, desc= arg
else:
value, desc = arg, arg
if self.value == value or (isinstance(self.value, list) and value in self.value):
select_p = ' selected="selected"'
else:
select_p = ''
return indent + ' <option%s value="%s">%s</option>\n' % (select_p, net.websafe(value), net.websafe(desc))
class GroupedDropdown(Dropdown):
def __init__(self, name, args, *validators, **attrs):
self.args = args
super(Dropdown, self).__init__(name, *validators, **attrs)
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
x = ' <select %s>\n' % attrs
for label, options in self.args:
x += ' <optgroup label="%s">\n' % net.websafe(label)
for arg in options:
x += self._render_option(arg, indent = ' ')
x += ' </optgroup>\n'
x += ' </select>\n'
return x
class Radio(Input):
def __init__(self, name, args, *validators, **attrs):
self.args = args
super(Radio, self).__init__(name, *validators, **attrs)
def render(self):
x = '<span>'
for arg in self.args:
if isinstance(arg, (tuple, list)):
value, desc= arg
else:
value, desc = arg, arg
attrs = self.attrs.copy()
attrs['name'] = self.name
attrs['type'] = 'radio'
attrs['value'] = value
if self.value == value:
attrs['checked'] = 'checked'
x += '<input %s/> %s' % (attrs, net.websafe(desc))
x += '</span>'
return x
class Checkbox(Input):
def __init__(self, name, *validators, **attrs):
self.checked = attrs.pop('checked', False)
Input.__init__(self, name, *validators, **attrs)
def get_default_id(self):
value = safestr(self.value or "")
return self.name + '_' + value.replace(' ', '_')
def render(self):
attrs = self.attrs.copy()
attrs['type'] = 'checkbox'
attrs['name'] = self.name
attrs['value'] = self.value
if self.checked:
attrs['checked'] = 'checked'
return '<input %s/>' % attrs
def set_value(self, value):
self.checked = bool(value)
def get_value(self):
return self.checked
class Button(Input):
def __init__(self, name, *validators, **attrs):
super(Button, self).__init__(name, *validators, **attrs)
self.description = ""
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
if self.value is not None:
attrs['value'] = self.value
html = attrs.pop('html', None) or net.websafe(self.name)
return '<button %s>%s</button>' % (attrs, html)
class Hidden(Input):
def is_hidden(self):
return True
def get_type(self):
return 'hidden'
class File(Input):
def get_type(self):
return 'file'
class Validator:
def __deepcopy__(self, memo): return copy.copy(self)
def __init__(self, msg, test, jstest=None): autoassign(self, locals())
def valid(self, value):
try: return self.test(value)
except: return False
notnull = Validator("Required", bool)
class regexp(Validator):
def __init__(self, rexp, msg):
self.rexp = re.compile(rexp)
self.msg = msg
def valid(self, value):
return bool(self.rexp.match(value))
if __name__ == "__main__":
import doctest
doctest.testmod()
|
davislidaqing/Mcoderadius
|
toughradius/console/libs/pyforms/__init__.py
|
Python
|
agpl-3.0
| 12,706 | 0.009762 |
#!/usr/bin/env python
"""
@package ion.agents.platform.platform_agent_stream_publisher
@file ion/agents/platform/platform_agent_stream_publisher.py
@author Carlos Rueda
@brief Stream publishing support for platform agents.
"""
__author__ = 'Carlos Rueda'
import logging
import uuid
from coverage_model.parameter import ParameterDictionary
import numpy
from pyon.public import log
from pyon.core.bootstrap import get_obj_registry
from pyon.core.object import IonObjectDeserializer
from pyon.ion.stream import StreamPublisher
from ion.services.dm.utility.granule.record_dictionary import RecordDictionaryTool
from interface.objects import StreamRoute
class PlatformAgentStreamPublisher(object):
"""
Stream publishing support for platform agents.
"""
def __init__(self, agent):
self._agent = agent
self._platform_id = agent._platform_id
self.resource_id = agent.resource_id
self._pp = agent._pp
self.CFG = agent.CFG
# Dictionaries used for data publishing.
self._data_streams = {}
self._param_dicts = {}
self._stream_defs = {}
self._data_publishers = {}
self._connection_ID = None
self._connection_index = {}
# Set of parameter names received in event notification but not
# configured. Allows to log corresponding warning only once.
self._unconfigured_params = set()
stream_info = self.CFG.get('stream_config', None)
if stream_info is None:
# should not happen: PlatformAgent._validate_configuration validates this.
log.error("%r: No stream_config given in CFG", self._platform_id)
return
for stream_name, stream_config in stream_info.iteritems():
self._construct_stream_and_publisher(stream_name, stream_config)
log.debug("%r: PlatformAgentStreamPublisher complete", self._platform_id)
def _construct_stream_and_publisher(self, stream_name, stream_config):
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r: _construct_stream_and_publisher: "
"stream_name:%r, stream_config:\n%s",
self._platform_id, stream_name,
self._pp.pformat(stream_config))
decoder = IonObjectDeserializer(obj_registry=get_obj_registry())
if 'stream_def_dict' not in stream_config:
# should not happen: PlatformAgent._validate_configuration validates this.
log.error("'stream_def_dict' key not in configuration for stream %r" % stream_name)
return
stream_def_dict = stream_config['stream_def_dict']
stream_def_dict['type_'] = 'StreamDefinition'
stream_def_obj = decoder.deserialize(stream_def_dict)
self._stream_defs[stream_name] = stream_def_obj
routing_key = stream_config['routing_key']
stream_id = stream_config['stream_id']
exchange_point = stream_config['exchange_point']
parameter_dictionary = stream_def_dict['parameter_dictionary']
log.debug("%r: got parameter_dictionary from stream_def_dict", self._platform_id)
self._data_streams[stream_name] = stream_id
self._param_dicts[stream_name] = ParameterDictionary.load(parameter_dictionary)
stream_route = StreamRoute(exchange_point=exchange_point, routing_key=routing_key)
publisher = self._create_publisher(stream_id, stream_route)
self._data_publishers[stream_name] = publisher
log.debug("%r: created publisher for stream_name=%r", self._platform_id, stream_name)
def _create_publisher(self, stream_id, stream_route):
publisher = StreamPublisher(process=self._agent,
stream_id=stream_id,
stream_route=stream_route)
return publisher
def reset_connection(self):
self._connection_ID = uuid.uuid4()
self._connection_index = {stream_name : 0 for
stream_name in self._data_streams.keys()}
log.debug("%r: reset_connection: connection_id=%s, connection_index=%s",
self._platform_id, self._connection_ID.hex, self._connection_index)
def handle_attribute_value_event(self, driver_event):
if log.isEnabledFor(logging.TRACE): # pragma: no cover
# show driver_event as retrieved (driver_event.vals_dict might be large)
log.trace("%r: driver_event = %s", self._platform_id, driver_event)
log.trace("%r: vals_dict:\n%s",
self._platform_id, self._pp.pformat(driver_event.vals_dict))
elif log.isEnabledFor(logging.DEBUG): # pragma: no cover
log.debug("%r: driver_event = %s", self._platform_id, driver_event.brief())
stream_name = driver_event.stream_name
publisher = self._data_publishers.get(stream_name, None)
if not publisher:
log.warn('%r: no publisher configured for stream_name=%r. '
'Configured streams are: %s',
self._platform_id, stream_name, self._data_publishers.keys())
return
param_dict = self._param_dicts[stream_name]
stream_def = self._stream_defs[stream_name]
if isinstance(stream_def, str):
rdt = RecordDictionaryTool(param_dictionary=param_dict.dump(),
stream_definition_id=stream_def)
else:
rdt = RecordDictionaryTool(stream_definition=stream_def)
self._publish_granule_with_multiple_params(publisher, driver_event,
param_dict, rdt)
def _publish_granule_with_multiple_params(self, publisher, driver_event,
param_dict, rdt):
stream_name = driver_event.stream_name
pub_params = {}
selected_timestamps = None
for param_name, param_value in driver_event.vals_dict.iteritems():
param_name = param_name.lower()
if not param_name in rdt:
if param_name not in self._unconfigured_params:
# an unrecognized attribute for this platform:
self._unconfigured_params.add(param_name)
log.warn('%r: got attribute value event for unconfigured parameter %r in stream %r'
' rdt.keys=%s',
self._platform_id, param_name, stream_name, list(rdt.iterkeys()))
continue
# separate values and timestamps:
vals, timestamps = zip(*param_value)
self._agent._dispatch_value_alerts(stream_name, param_name, vals)
# Use fill_value in context to replace any None values:
param_ctx = param_dict.get_context(param_name)
if param_ctx:
fill_value = param_ctx.fill_value
log.debug("%r: param_name=%r fill_value=%s",
self._platform_id, param_name, fill_value)
# do the replacement:
vals = [fill_value if val is None else val for val in vals]
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r: vals array after replacing None with fill_value:\n%s",
self._platform_id, self._pp.pformat(vals))
else:
log.warn("%r: unexpected: parameter context not found for %r",
self._platform_id, param_name)
# Set values in rdt:
rdt[param_name] = numpy.array(vals)
pub_params[param_name] = vals
selected_timestamps = timestamps
if selected_timestamps is None:
# that is, all param_name's were unrecognized; just return:
return
self._publish_granule(stream_name, publisher, param_dict, rdt,
pub_params, selected_timestamps)
def _publish_granule(self, stream_name, publisher, param_dict, rdt,
pub_params, timestamps):
# Set timestamp info in rdt:
if param_dict.temporal_parameter_name is not None:
temp_param_name = param_dict.temporal_parameter_name
rdt[temp_param_name] = numpy.array(timestamps)
#@TODO: Ensure that the preferred_timestamp field is correct
rdt['preferred_timestamp'] = numpy.array(['internal_timestamp'] * len(timestamps))
if log.isEnabledFor(logging.DEBUG): # pragma: no cover
log.debug('Preferred timestamp is unresolved, using "internal_timestamp"')
else:
log.warn("%r: Not including timestamp info in granule: "
"temporal_parameter_name not defined in parameter dictionary",
self._platform_id)
g = rdt.to_granule(data_producer_id=self.resource_id,
connection_id=self._connection_ID.hex,
connection_index=str(self._connection_index[stream_name]))
try:
publisher.publish(g)
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r: Platform agent published data granule on stream %r: "
"%s timestamps: %s",
self._platform_id, stream_name,
self._pp.pformat(pub_params), self._pp.pformat(timestamps))
elif log.isEnabledFor(logging.DEBUG): # pragma: no cover
summary_params = {attr_id: "(%d vals)" % len(vals)
for attr_id, vals in pub_params.iteritems()}
summary_timestamps = "(%d vals)" % len(timestamps)
log.debug("%r: Platform agent published data granule on stream %r: "
"%s timestamps: %s",
self._platform_id, stream_name,
summary_params, summary_timestamps)
log.debug("%r: granule published with connection_id=%s, connection_index=%i",
self._platform_id,
self._connection_ID.hex,
self._connection_index[stream_name])
self._connection_index[stream_name] += 1
except Exception:
log.exception("%r: Platform agent could not publish data on stream %s.",
self._platform_id, stream_name)
def reset(self):
self._unconfigured_params.clear()
|
ooici/coi-services
|
ion/agents/platform/platform_agent_stream_publisher.py
|
Python
|
bsd-2-clause
| 10,637 | 0.003572 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from __future__ import print_function
import os
import re
import sys
import llnl.util.tty as tty
from llnl.util.lang import attr_setdefault, index_by
from llnl.util.tty.colify import colify
from llnl.util.tty.color import colorize
from llnl.util.filesystem import working_dir
import spack
import spack.config
import spack.spec
import spack.store
#
# Settings for commands that modify configuration
#
# Commands that modify configuration by default modify the *highest*
# priority scope.
default_modify_scope = spack.config.highest_precedence_scope().name
# Commands that list configuration list *all* scopes by default.
default_list_scope = None
# cmd has a submodule called "list" so preserve the python list module
python_list = list
# Patterns to ignore in the commands directory when looking for commands.
ignore_files = r'^\.|^__init__.py$|^#'
SETUP_PARSER = "setup_parser"
DESCRIPTION = "description"
command_path = os.path.join(spack.lib_path, "spack", "cmd")
commands = []
for file in os.listdir(command_path):
if file.endswith(".py") and not re.search(ignore_files, file):
cmd = re.sub(r'.py$', '', file)
commands.append(cmd)
commands.sort()
def remove_options(parser, *options):
"""Remove some options from a parser."""
for option in options:
for action in parser._actions:
if vars(action)['option_strings'][0] == option:
parser._handle_conflict_resolve(None, [(option, action)])
break
def get_python_name(name):
"""Commands can have '-' in their names, unlike Python identifiers."""
return name.replace("-", "_")
def get_module(name):
"""Imports the module for a particular command name and returns it."""
module_name = "%s.%s" % (__name__, name)
module = __import__(module_name,
fromlist=[name, SETUP_PARSER, DESCRIPTION],
level=0)
attr_setdefault(module, SETUP_PARSER, lambda *args: None) # null-op
attr_setdefault(module, DESCRIPTION, "")
fn_name = get_python_name(name)
if not hasattr(module, fn_name):
tty.die("Command module %s (%s) must define function '%s'." %
(module.__name__, module.__file__, fn_name))
return module
def get_command(name):
"""Imports the command's function from a module and returns it."""
python_name = get_python_name(name)
return getattr(get_module(python_name), python_name)
def parse_specs(args, **kwargs):
"""Convenience function for parsing arguments from specs. Handles common
exceptions and dies if there are errors.
"""
concretize = kwargs.get('concretize', False)
normalize = kwargs.get('normalize', False)
try:
specs = spack.spec.parse(args)
for spec in specs:
if concretize:
spec.concretize() # implies normalize
elif normalize:
spec.normalize()
return specs
except spack.parse.ParseError as e:
tty.error(e.message, e.string, e.pos * " " + "^")
sys.exit(1)
except spack.spec.SpecError as e:
tty.error(e.message)
sys.exit(1)
def elide_list(line_list, max_num=10):
"""Takes a long list and limits it to a smaller number of elements,
replacing intervening elements with '...'. For example::
elide_list([1,2,3,4,5,6], 4)
gives::
[1, 2, 3, '...', 6]
"""
if len(line_list) > max_num:
return line_list[:max_num - 1] + ['...'] + line_list[-1:]
else:
return line_list
def disambiguate_spec(spec):
matching_specs = spack.store.db.query(spec)
if not matching_specs:
tty.die("Spec '%s' matches no installed packages." % spec)
elif len(matching_specs) > 1:
args = ["%s matches multiple packages." % spec,
"Matching packages:"]
args += [colorize(" @K{%s} " % s.dag_hash(7)) +
s.cformat('$_$@$%@$=') for s in matching_specs]
args += ["Use a more specific spec."]
tty.die(*args)
return matching_specs[0]
def gray_hash(spec, length):
return colorize('@K{%s}' % spec.dag_hash(length))
def display_specs(specs, args=None, **kwargs):
"""Display human readable specs with customizable formatting.
Prints the supplied specs to the screen, formatted according to the
arguments provided.
Specs are grouped by architecture and compiler, and columnized if
possible. There are three possible "modes":
* ``short`` (default): short specs with name and version, columnized
* ``paths``: Two columns: one for specs, one for paths
* ``deps``: Dependency-tree style, like ``spack spec``; can get long
Options can add more information to the default display. Options can
be provided either as keyword arguments or as an argparse namespace.
Keyword arguments take precedence over settings in the argparse
namespace.
Args:
specs (list of spack.spec.Spec): the specs to display
args (optional argparse.Namespace): namespace containing
formatting arguments
Keyword Args:
mode (str): Either 'short', 'paths', or 'deps'
long (bool): Display short hashes with specs
very_long (bool): Display full hashes with specs (supersedes ``long``)
namespace (bool): Print namespaces along with names
show_flags (bool): Show compiler flags with specs
variants (bool): Show variants with specs
"""
def get_arg(name, default=None):
"""Prefer kwargs, then args, then default."""
if name in kwargs:
return kwargs.get(name)
elif args is not None:
return getattr(args, name, default)
else:
return default
mode = get_arg('mode', 'short')
hashes = get_arg('long', False)
namespace = get_arg('namespace', False)
flags = get_arg('show_flags', False)
full_compiler = get_arg('show_full_compiler', False)
variants = get_arg('variants', False)
hlen = 7
if get_arg('very_long', False):
hashes = True
hlen = None
nfmt = '.' if namespace else '_'
ffmt = ''
if full_compiler or flags:
ffmt += '$%'
if full_compiler:
ffmt += '@'
ffmt += '+'
vfmt = '$+' if variants else ''
format_string = '$%s$@%s%s' % (nfmt, ffmt, vfmt)
# Make a dict with specs keyed by architecture and compiler.
index = index_by(specs, ('architecture', 'compiler'))
# Traverse the index and print out each package
for i, (architecture, compiler) in enumerate(sorted(index)):
if i > 0:
print()
header = "%s{%s} / %s{%s}" % (spack.spec.architecture_color,
architecture, spack.spec.compiler_color,
compiler)
tty.hline(colorize(header), char='-')
specs = index[(architecture, compiler)]
specs.sort()
abbreviated = [s.cformat(format_string) for s in specs]
if mode == 'paths':
# Print one spec per line along with prefix path
width = max(len(s) for s in abbreviated)
width += 2
format = " %%-%ds%%s" % width
for abbrv, spec in zip(abbreviated, specs):
prefix = gray_hash(spec, hlen) if hashes else ''
print(prefix + (format % (abbrv, spec.prefix)))
elif mode == 'deps':
for spec in specs:
print(spec.tree(
format=format_string,
indent=4,
prefix=(lambda s: gray_hash(s, hlen)) if hashes else None))
elif mode == 'short':
# Print columns of output if not printing flags
if not flags and not full_compiler:
def fmt(s):
string = ""
if hashes:
string += gray_hash(s, hlen) + ' '
string += s.cformat('$-%s$@%s' % (nfmt, vfmt))
return string
colify(fmt(s) for s in specs)
# Print one entry per line if including flags
else:
for spec in specs:
# Print the hash if necessary
hsh = gray_hash(spec, hlen) + ' ' if hashes else ''
print(hsh + spec.cformat(format_string) + '\n')
else:
raise ValueError(
"Invalid mode for display_specs: %s. Must be one of (paths,"
"deps, short)." % mode)
def spack_is_git_repo():
"""Ensure that this instance of Spack is a git clone."""
with working_dir(spack.prefix):
return os.path.isdir('.git')
|
skosukhin/spack
|
lib/spack/spack/cmd/__init__.py
|
Python
|
lgpl-2.1
| 9,998 | 0.0004 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
使用paramiko模块远程管理服务器
通过key登录
'''
import paramiko
private_key_path = 'D:\workspace\Python-oldboy\day07\zhangyage_pass'
#key = paramiko.RSAKey.from_private_key_file(filename, password)
key = paramiko.RSAKey.from_private_key_file(private_key_path,'12345678') #private_key_path是秘钥文件的位置,'12345678'是秘钥的口令
ssh = paramiko.SSHClient() #实例化一个客户端
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #自动恢复yes,在我们使用ssh客户端链接的时候第一次的时候都会让我们输入一个yes确定的
ssh.connect('192.168.75.133', 22, username='root', pkey=key)
stdin,stdout,stderr = ssh.exec_command('ifconfig') #定义三个变量进行输出,默认输出是个元组会赋值给三个变量
print stdout.read()
ssh.close()
|
zhangyage/Python-oldboy
|
day07/ssh_client2.py
|
Python
|
apache-2.0
| 880 | 0.025074 |
import exifread
import os
import shutil
import dmcutils
from dmcutils import mylog
report = {}
trashFiles = [
'.DS_Store',
'Thumbs.db',
'.picasa.ini'
]
def processVideo(file):
vp = os.path.join(args.targetFolder,'videos')
if not os.path.exists(vp):
os.mkdir(vp)
outPath = os.path.join(vp,os.path.split(file)[1])
#check filesize ... if same then SHA -- just to save a little bit of time
while os.path.exists(outPath) :
print outPath + " exists geenrating new name"
outPath = os.path.join(vp,"dif_" + os.path.split(file)[1])
move(file, outPath);
def processFile(file):
if not os.path.isfile(file):
mylog("File %s does not exist." % file)
return
if str(file).lower().endswith(('.jpg', '.jpeg')):
processImage(file)
report["processImageCount"] += 1
elif str(file).lower().endswith(('.mp4', '.mov', '.avi')):
processVideo(file)
pass
elif any(bf.lower() in str(file).lower() for bf in trashFiles):
mylog("Deleting %s because defindes as Trash" % file)
os.remove(file)
pass
else:
mylog("Unhandled %s " % file)
def scanAndProcessFolders(inputDir):
mylog("Starting in " + inputDir)
fileList = []
for root, dirs, files in os.walk(inputDir):
for file in files:
candidate = os.path.join(root, file)
fileList.append(candidate);
for candidate in fileList:
processImage(candidate);
def processImage(img):
with open(img, "rb") as f:
tags = exifread.process_file(f, stop_tag='EXIF DateTimeOriginal')
datestr = "0"
if "EXIF DateTimeOriginal" in tags:
datestr = str(tags["EXIF DateTimeOriginal"])
elif "Image DateTime" in tags:
datestr = str(tags["Image DateTime"])
if not datestr == "0" and not datestr == " ":
moveImage(img, datestr)
else:
report["processNoExif"] = report["processNoExif"] +1
if(args.requireExif):
mylog("Skip %s due missing EXIF Date" % img)
return
mylog("%s - No EXIFDate Found" % img)
ndd = os.path.join(args.targetFolder,"nodate") #maybe old directory structure could be preserved
if(not os.path.exists(ndd)):
os.mkdir(ndd)
move(img,os.path.join(ndd,os.path.split(img)[1]))
def moveImage(image,datestr):
dateList = datestr.split(':')
year, month = createDirsIfNotExist(dateList)
filename = os.path.split(image)[1]
newPath = os.path.join(args.targetFolder, year,month,filename)
if(os.path.exists(newPath)):
if(not checkForDublette(image,newPath)):
newPath = os.path.join(args.targetFolder, year, month, "dif_" + filename)
mylog("New filename for conflicting file generated %s" % newPath)
move(image,newPath)
else:
if not args.copyonly:
mylog("Deleting %s it already exists in %s" % (image,newPath))
os.remove(image)
else:
move(image,newPath)
def move(srcFile, toDir):
if args.copyonly:
mylog("copy %s to direcotry %s" % (srcFile,toDir))
shutil.copy(srcFile,toDir)
else:
mylog("move %s to direcotry %s" % (srcFile,toDir))
shutil.move(srcFile,toDir)
def checkForDublette(image,newPath):
imageHash = dmcutils.fileSha265Sum(image)
copyedHash = dmcutils.fileSha265Sum(newPath)
if(imageHash == copyedHash):
return True
else:
return False
def createDirsIfNotExist(dateList):
year = os.path.join(args.targetFolder,dateList[0].strip())
month = os.path.join(year,dateList[1].strip())
if(not os.path.exists(year)):
mylog("Create new Folder %s" % year)
os.mkdir(year)
if(not os.path.exists(month)):
mylog("Create new Folder %s" % month)
os.mkdir(month)
return year, month
def init(commandArgs):
global args
report["processImageCount"] = 0;
report["processNoExif"] = 0;
mylog("Init FileUtils")
args = commandArgs
if __name__ == '__main__':
dmcutils.init()
init(dmcutils.commandArgs)
scanAndProcessFolders(args.inputFolder)
mylog("Images processed %s" % report["processImageCount"])
mylog("Images without valid EXIF Date %s" % report["processNoExif"])
|
HaBaLeS/digital-mess-cleaner
|
filesorter.py
|
Python
|
mit
| 4,411 | 0.009522 |
# Copyright 1999-2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Patches by Mike Poidinger to support multiple databases.
# Updated by Peter Cock in 2007 to do a better job on BLAST 2.2.15
"""Code for calling standalone BLAST and parsing plain text output (DEPRECATED).
Rather than parsing the human readable plain text BLAST output (which seems to
change with every update to BLAST), we and the NBCI recommend you parse the
XML output instead. The plain text parser in this module still works at the
time of writing, but is considered obsolete and updating it to cope with the
latest versions of BLAST is not a priority for us.
This module also provides code to work with the "legacy" standalone version of
NCBI BLAST, tools blastall, rpsblast and blastpgp via three helper functions of
the same name. These functions are very limited for dealing with the output as
files rather than handles, for which the wrappers in Bio.Blast.Applications are
preferred. Furthermore, the NCBI themselves regard these command line tools as
"legacy", and encourage using the new BLAST+ tools instead. Biopython has
wrappers for these under Bio.Blast.Applications (see the tutorial).
"""
from __future__ import print_function
from Bio import BiopythonDeprecationWarning
import warnings
warnings.warn("This module has been deprecated. Consider Bio.SearchIO for "
"parsing BLAST output instead.", BiopythonDeprecationWarning)
import os
import re
from Bio._py3k import StringIO
from Bio import File
from Bio.ParserSupport import *
from Bio.Blast import Record
from Bio.Application import _escape_filename
__docformat__ = "restructuredtext en"
_score_e_re = re.compile(r'Score +E')
class LowQualityBlastError(Exception):
"""Error caused by running a low quality sequence through BLAST.
When low quality sequences (like GenBank entries containing only
stretches of a single nucleotide) are BLASTed, they will result in
BLAST generating an error and not being able to perform the BLAST.
search. This error should be raised for the BLAST reports produced
in this case.
"""
pass
class ShortQueryBlastError(Exception):
"""Error caused by running a short query sequence through BLAST.
If the query sequence is too short, BLAST outputs warnings and errors::
Searching[blastall] WARNING: [000.000] AT1G08320: SetUpBlastSearch failed.
[blastall] ERROR: [000.000] AT1G08320: Blast:
[blastall] ERROR: [000.000] AT1G08320: Blast: Query must be at least wordsize
done
This exception is raised when that condition is detected.
"""
pass
class _Scanner(object):
"""Scan BLAST output from blastall or blastpgp.
Tested with blastall and blastpgp v2.0.10, v2.0.11
Methods:
- feed Feed data into the scanner.
"""
def feed(self, handle, consumer):
"""S.feed(handle, consumer)
Feed in a BLAST report for scanning. handle is a file-like
object that contains the BLAST report. consumer is a Consumer
object that will receive events as the report is scanned.
"""
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
# Try to fast-forward to the beginning of the blast report.
read_and_call_until(uhandle, consumer.noevent, contains='BLAST')
# Now scan the BLAST report.
self._scan_header(uhandle, consumer)
self._scan_rounds(uhandle, consumer)
self._scan_database_report(uhandle, consumer)
self._scan_parameters(uhandle, consumer)
def _scan_header(self, uhandle, consumer):
# BLASTP 2.0.10 [Aug-26-1999]
#
#
# Reference: Altschul, Stephen F., Thomas L. Madden, Alejandro A. Schaf
# Jinghui Zhang, Zheng Zhang, Webb Miller, and David J. Lipman (1997),
# "Gapped BLAST and PSI-BLAST: a new generation of protein database sea
# programs", Nucleic Acids Res. 25:3389-3402.
#
# Query= test
# (140 letters)
#
# Database: sdqib40-1.35.seg.fa
# 1323 sequences; 223,339 total letters
#
# ========================================================
# This next example is from the online version of Blast,
# note there are TWO references, an RID line, and also
# the database is BEFORE the query line.
# Note there possibleuse of non-ASCII in the author names.
# ========================================================
#
# BLASTP 2.2.15 [Oct-15-2006]
# Reference: Altschul, Stephen F., Thomas L. Madden, Alejandro A. Sch??ffer,
# Jinghui Zhang, Zheng Zhang, Webb Miller, and David J. Lipman
# (1997), "Gapped BLAST and PSI-BLAST: a new generation of
# protein database search programs", Nucleic Acids Res. 25:3389-3402.
#
# Reference: Sch??ffer, Alejandro A., L. Aravind, Thomas L. Madden, Sergei
# Shavirin, John L. Spouge, Yuri I. Wolf, Eugene V. Koonin, and
# Stephen F. Altschul (2001), "Improving the accuracy of PSI-BLAST
# protein database searches with composition-based statistics
# and other refinements", Nucleic Acids Res. 29:2994-3005.
#
# RID: 1166022616-19998-65316425856.BLASTQ1
#
#
# Database: All non-redundant GenBank CDS
# translations+PDB+SwissProt+PIR+PRF excluding environmental samples
# 4,254,166 sequences; 1,462,033,012 total letters
# Query= gi:16127998
# Length=428
#
consumer.start_header()
read_and_call(uhandle, consumer.version, contains='BLAST')
read_and_call_while(uhandle, consumer.noevent, blank=1)
# There might be a <pre> line, for qblast output.
attempt_read_and_call(uhandle, consumer.noevent, start="<pre>")
# Read the reference(s)
while attempt_read_and_call(uhandle,
consumer.reference, start='Reference'):
# References are normally multiline terminated by a blank line
# (or, based on the old code, the RID line)
while True:
line = uhandle.readline()
if is_blank_line(line):
consumer.noevent(line)
break
elif line.startswith("RID"):
break
else:
# More of the reference
consumer.reference(line)
# Deal with the optional RID: ...
read_and_call_while(uhandle, consumer.noevent, blank=1)
attempt_read_and_call(uhandle, consumer.reference, start="RID:")
read_and_call_while(uhandle, consumer.noevent, blank=1)
# blastpgp may have a reference for compositional score matrix
# adjustment (see Bug 2502):
if attempt_read_and_call(
uhandle, consumer.reference, start="Reference"):
read_and_call_until(uhandle, consumer.reference, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# blastpgp has a Reference for composition-based statistics.
if attempt_read_and_call(
uhandle, consumer.reference, start="Reference"):
read_and_call_until(uhandle, consumer.reference, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
line = uhandle.peekline()
assert line.strip() != ""
assert not line.startswith("RID:")
if line.startswith("Query="):
# This is an old style query then database...
# Read the Query lines and the following blank line.
read_and_call(uhandle, consumer.query_info, start='Query=')
read_and_call_until(uhandle, consumer.query_info, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Read the database lines and the following blank line.
read_and_call_until(uhandle, consumer.database_info, end='total letters')
read_and_call(uhandle, consumer.database_info, contains='sequences')
read_and_call_while(uhandle, consumer.noevent, blank=1)
elif line.startswith("Database:"):
# This is a new style database then query...
read_and_call_until(uhandle, consumer.database_info, end='total letters')
read_and_call(uhandle, consumer.database_info, contains='sequences')
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Read the Query lines and the following blank line.
# Or, on BLAST 2.2.22+ there is no blank link - need to spot
# the "... Score E" line instead.
read_and_call(uhandle, consumer.query_info, start='Query=')
# BLAST 2.2.25+ has a blank line before Length=
read_and_call_until(uhandle, consumer.query_info, start='Length=')
while True:
line = uhandle.peekline()
if not line.strip() or _score_e_re.search(line) is not None:
break
# It is more of the query (and its length)
read_and_call(uhandle, consumer.query_info)
read_and_call_while(uhandle, consumer.noevent, blank=1)
else:
raise ValueError("Invalid header?")
consumer.end_header()
def _scan_rounds(self, uhandle, consumer):
# Scan a bunch of rounds.
# Each round begins with either a "Searching......" line
# or a 'Score E' line followed by descriptions and alignments.
# The email server doesn't give the "Searching....." line.
# If there is no 'Searching.....' line then you'll first see a
# 'Results from round' line
while not self._eof(uhandle):
line = safe_peekline(uhandle)
if not line.startswith('Searching') and \
not line.startswith('Results from round') and \
_score_e_re.search(line) is None and \
'No hits found' not in line:
break
self._scan_descriptions(uhandle, consumer)
self._scan_alignments(uhandle, consumer)
def _scan_descriptions(self, uhandle, consumer):
# Searching..................................................done
# Results from round 2
#
#
# Sc
# Sequences producing significant alignments: (b
# Sequences used in model and found again:
#
# d1tde_2 3.4.1.4.4 (119-244) Thioredoxin reductase [Escherichia ...
# d1tcob_ 1.31.1.5.16 Calcineurin regulatory subunit (B-chain) [B...
# d1symb_ 1.31.1.2.2 Calcyclin (S100) [RAT (RATTUS NORVEGICUS)]
#
# Sequences not found previously or not previously below threshold:
#
# d1osa__ 1.31.1.5.11 Calmodulin [Paramecium tetraurelia]
# d1aoza3 2.5.1.3.3 (339-552) Ascorbate oxidase [zucchini (Cucurb...
#
# If PSI-BLAST, may also have:
#
# CONVERGED!
consumer.start_descriptions()
# Read 'Searching'
# This line seems to be missing in BLASTN 2.1.2 (others?)
attempt_read_and_call(uhandle, consumer.noevent, start='Searching')
# blastpgp 2.0.10 from NCBI 9/19/99 for Solaris sometimes crashes here.
# If this happens, the handle will yield no more information.
if not uhandle.peekline():
raise ValueError("Unexpected end of blast report. " +
"Looks suspiciously like a PSI-BLAST crash.")
# BLASTN 2.2.3 sometimes spews a bunch of warnings and errors here:
# Searching[blastall] WARNING: [000.000] AT1G08320: SetUpBlastSearch
# [blastall] ERROR: [000.000] AT1G08320: Blast:
# [blastall] ERROR: [000.000] AT1G08320: Blast: Query must be at leas
# done
# Reported by David Weisman.
# Check for these error lines and ignore them for now. Let
# the BlastErrorParser deal with them.
line = uhandle.peekline()
if "ERROR:" in line or line.startswith("done"):
read_and_call_while(uhandle, consumer.noevent, contains="ERROR:")
read_and_call(uhandle, consumer.noevent, start="done")
# Check to see if this is PSI-BLAST.
# If it is, the 'Searching' line will be followed by:
# (version 2.0.10)
# Searching.............................
# Results from round 2
# or (version 2.0.11)
# Searching.............................
#
#
# Results from round 2
# Skip a bunch of blank lines.
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Check for the results line if it's there.
if attempt_read_and_call(uhandle, consumer.round, start='Results'):
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Three things can happen here:
# 1. line contains 'Score E'
# 2. line contains "No hits found"
# 3. no descriptions
# The first one begins a bunch of descriptions. The last two
# indicates that no descriptions follow, and we should go straight
# to the alignments.
if not attempt_read_and_call(
uhandle, consumer.description_header,
has_re=_score_e_re):
# Either case 2 or 3. Look for "No hits found".
attempt_read_and_call(uhandle, consumer.no_hits,
contains='No hits found')
try:
read_and_call_while(uhandle, consumer.noevent, blank=1)
except ValueError as err:
if str(err) != "Unexpected end of stream.":
raise err
consumer.end_descriptions()
# Stop processing.
return
# Read the score header lines
read_and_call(uhandle, consumer.description_header,
start='Sequences producing')
# If PSI-BLAST, read the 'Sequences used in model' line.
attempt_read_and_call(uhandle, consumer.model_sequences,
start='Sequences used in model')
read_and_call_while(uhandle, consumer.noevent, blank=1)
# In BLAT, rather than a "No hits found" line, we just
# get no descriptions (and no alignments). This can be
# spotted because the next line is the database block:
if safe_peekline(uhandle).startswith(" Database:"):
consumer.end_descriptions()
# Stop processing.
return
# Read the descriptions and the following blank lines, making
# sure that there are descriptions.
if not uhandle.peekline().startswith('Sequences not found'):
read_and_call_until(uhandle, consumer.description, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# If PSI-BLAST, read the 'Sequences not found' line followed
# by more descriptions. However, I need to watch out for the
# case where there were no sequences not found previously, in
# which case there will be no more descriptions.
if attempt_read_and_call(uhandle, consumer.nonmodel_sequences,
start='Sequences not found'):
# Read the descriptions and the following blank lines.
read_and_call_while(uhandle, consumer.noevent, blank=1)
l = safe_peekline(uhandle)
# Brad -- added check for QUERY. On some PSI-BLAST outputs
# there will be a 'Sequences not found' line followed by no
# descriptions. Check for this case since the first thing you'll
# get is a blank line and then 'QUERY'
if not l.startswith('CONVERGED') and l[0] != '>' \
and not l.startswith('QUERY'):
read_and_call_until(uhandle, consumer.description, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
attempt_read_and_call(uhandle, consumer.converged, start='CONVERGED')
read_and_call_while(uhandle, consumer.noevent, blank=1)
consumer.end_descriptions()
def _scan_alignments(self, uhandle, consumer):
if self._eof(uhandle):
return
# qblast inserts a helpful line here.
attempt_read_and_call(uhandle, consumer.noevent, start="ALIGNMENTS")
# First, check to see if I'm at the database report.
line = safe_peekline(uhandle)
if not line:
# EOF
return
elif line.startswith(' Database') or line.startswith("Lambda"):
return
elif line[0] == '>':
# XXX make a better check here between pairwise and masterslave
self._scan_pairwise_alignments(uhandle, consumer)
elif line.startswith('Effective'):
return
else:
# XXX put in a check to make sure I'm in a masterslave alignment
self._scan_masterslave_alignment(uhandle, consumer)
def _scan_pairwise_alignments(self, uhandle, consumer):
while not self._eof(uhandle):
line = safe_peekline(uhandle)
if line[0] != '>':
break
self._scan_one_pairwise_alignment(uhandle, consumer)
def _scan_one_pairwise_alignment(self, uhandle, consumer):
if self._eof(uhandle):
return
consumer.start_alignment()
self._scan_alignment_header(uhandle, consumer)
# Scan a bunch of score/alignment pairs.
while True:
if self._eof(uhandle):
# Shouldn't have issued that _scan_alignment_header event...
break
line = safe_peekline(uhandle)
if not line.startswith(' Score'):
break
self._scan_hsp(uhandle, consumer)
consumer.end_alignment()
def _scan_alignment_header(self, uhandle, consumer):
# >d1rip__ 2.24.7.1.1 Ribosomal S17 protein [Bacillus
# stearothermophilus]
# Length = 81
#
# Or, more recently with different white space:
#
# >gi|15799684|ref|NP_285696.1| threonine synthase ...
# gi|15829258|ref|NP_308031.1| threonine synthase
# ...
# Length=428
read_and_call(uhandle, consumer.title, start='>')
while True:
line = safe_readline(uhandle)
if line.lstrip().startswith('Length =') \
or line.lstrip().startswith('Length='):
consumer.length(line)
break
elif is_blank_line(line):
# Check to make sure I haven't missed the Length line
raise ValueError("I missed the Length in an alignment header")
consumer.title(line)
# Older versions of BLAST will have a line with some spaces.
# Version 2.0.14 (maybe 2.0.13?) and above print a true blank line.
if not attempt_read_and_call(uhandle, consumer.noevent,
start=' '):
read_and_call(uhandle, consumer.noevent, blank=1)
def _scan_hsp(self, uhandle, consumer):
consumer.start_hsp()
self._scan_hsp_header(uhandle, consumer)
self._scan_hsp_alignment(uhandle, consumer)
consumer.end_hsp()
def _scan_hsp_header(self, uhandle, consumer):
# Score = 22.7 bits (47), Expect = 2.5
# Identities = 10/36 (27%), Positives = 18/36 (49%)
# Strand = Plus / Plus
# Frame = +3
#
read_and_call(uhandle, consumer.score, start=' Score')
read_and_call(uhandle, consumer.identities, start=' Identities')
# BLASTN
attempt_read_and_call(uhandle, consumer.strand, start=' Strand')
# BLASTX, TBLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.frame, start=' Frame')
read_and_call(uhandle, consumer.noevent, blank=1)
def _scan_hsp_alignment(self, uhandle, consumer):
# Query: 11 GRGVSACA-------TCDGFFYRNQKVAVIGGGNTAVEEALYLSNIASEVHLIHRRDGF
# GRGVS+ TC Y + + V GGG+ + EE L + I R+
# Sbjct: 12 GRGVSSVVRRCIHKPTCKE--YAVKIIDVTGGGSFSAEEVQELREATLKEVDILRKVSG
#
# Query: 64 AEKILIKR 71
# I +K
# Sbjct: 70 PNIIQLKD 77
#
while True:
# Blastn adds an extra line filled with spaces before Query
attempt_read_and_call(uhandle, consumer.noevent, start=' ')
read_and_call(uhandle, consumer.query, start='Query')
read_and_call(uhandle, consumer.align, start=' ')
read_and_call(uhandle, consumer.sbjct, start='Sbjct')
try:
read_and_call_while(uhandle, consumer.noevent, blank=1)
except ValueError as err:
if str(err) != "Unexpected end of stream.":
raise err
# End of File (well, it looks like it with recent versions
# of BLAST for multiple queries after the Iterator class
# has broken up the whole file into chunks).
break
line = safe_peekline(uhandle)
# Alignment continues if I see a 'Query' or the spaces for Blastn.
if not (line.startswith('Query') or line.startswith(' ')):
break
def _scan_masterslave_alignment(self, uhandle, consumer):
consumer.start_alignment()
while True:
line = safe_readline(uhandle)
# Check to see whether I'm finished reading the alignment.
# This is indicated by 1) database section, 2) next psi-blast
# round, which can also be a 'Results from round' if no
# searching line is present
# patch by chapmanb
if line.startswith('Searching') or \
line.startswith('Results from round'):
uhandle.saveline(line)
break
elif line.startswith(' Database'):
uhandle.saveline(line)
break
elif is_blank_line(line):
consumer.noevent(line)
else:
consumer.multalign(line)
read_and_call_while(uhandle, consumer.noevent, blank=1)
consumer.end_alignment()
def _eof(self, uhandle):
try:
line = safe_peekline(uhandle)
except ValueError as err:
if str(err) != "Unexpected end of stream.":
raise err
line = ""
return not line
def _scan_database_report(self, uhandle, consumer):
# Database: sdqib40-1.35.seg.fa
# Posted date: Nov 1, 1999 4:25 PM
# Number of letters in database: 223,339
# Number of sequences in database: 1323
#
# Lambda K H
# 0.322 0.133 0.369
#
# Gapped
# Lambda K H
# 0.270 0.0470 0.230
#
##########################################
# Or, more recently Blast 2.2.15 gives less blank lines
##########################################
# Database: All non-redundant GenBank CDS translations+PDB+SwissProt+PIR+PRF excluding
# environmental samples
# Posted date: Dec 12, 2006 5:51 PM
# Number of letters in database: 667,088,753
# Number of sequences in database: 2,094,974
# Lambda K H
# 0.319 0.136 0.395
# Gapped
# Lambda K H
# 0.267 0.0410 0.140
if self._eof(uhandle):
return
consumer.start_database_report()
# Subset of the database(s) listed below
# Number of letters searched: 562,618,960
# Number of sequences searched: 228,924
if attempt_read_and_call(uhandle, consumer.noevent, start=" Subset"):
read_and_call(uhandle, consumer.noevent, contains="letters")
read_and_call(uhandle, consumer.noevent, contains="sequences")
read_and_call(uhandle, consumer.noevent, start=" ")
# Sameet Mehta reported seeing output from BLASTN 2.2.9 that
# was missing the "Database" stanza completely.
while attempt_read_and_call(uhandle, consumer.database,
start=' Database'):
# BLAT output ends abruptly here, without any of the other
# information. Check to see if this is the case. If so,
# then end the database report here gracefully.
if not uhandle.peekline().strip() \
or uhandle.peekline().startswith("BLAST"):
consumer.end_database_report()
return
# Database can span multiple lines.
read_and_call_until(uhandle, consumer.database, start=' Posted')
read_and_call(uhandle, consumer.posted_date, start=' Posted')
read_and_call(uhandle, consumer.num_letters_in_database,
start=' Number of letters')
read_and_call(uhandle, consumer.num_sequences_in_database,
start=' Number of sequences')
# There may not be a line starting with spaces...
attempt_read_and_call(uhandle, consumer.noevent, start=' ')
line = safe_readline(uhandle)
uhandle.saveline(line)
if 'Lambda' in line:
break
try:
read_and_call(uhandle, consumer.noevent, start='Lambda')
read_and_call(uhandle, consumer.ka_params)
except:
pass
# This blank line is optional:
attempt_read_and_call(uhandle, consumer.noevent, blank=1)
# not BLASTP
attempt_read_and_call(uhandle, consumer.gapped, start='Gapped')
# not TBLASTX
if attempt_read_and_call(uhandle, consumer.noevent, start='Lambda'):
read_and_call(uhandle, consumer.ka_params_gap)
# Blast 2.2.4 can sometimes skip the whole parameter section.
# Thus, I need to be careful not to read past the end of the
# file.
try:
read_and_call_while(uhandle, consumer.noevent, blank=1)
except ValueError as x:
if str(x) != "Unexpected end of stream.":
raise
consumer.end_database_report()
def _scan_parameters(self, uhandle, consumer):
# Matrix: BLOSUM62
# Gap Penalties: Existence: 11, Extension: 1
# Number of Hits to DB: 50604
# Number of Sequences: 1323
# Number of extensions: 1526
# Number of successful extensions: 6
# Number of sequences better than 10.0: 5
# Number of HSP's better than 10.0 without gapping: 5
# Number of HSP's successfully gapped in prelim test: 0
# Number of HSP's that attempted gapping in prelim test: 1
# Number of HSP's gapped (non-prelim): 5
# length of query: 140
# length of database: 223,339
# effective HSP length: 39
# effective length of query: 101
# effective length of database: 171,742
# effective search space: 17345942
# effective search space used: 17345942
# T: 11
# A: 40
# X1: 16 ( 7.4 bits)
# X2: 38 (14.8 bits)
# X3: 64 (24.9 bits)
# S1: 41 (21.9 bits)
# S2: 42 (20.8 bits)
##########################################
# Or, more recently Blast(x) 2.2.15 gives
##########################################
# Matrix: BLOSUM62
# Gap Penalties: Existence: 11, Extension: 1
# Number of Sequences: 4535438
# Number of Hits to DB: 2,588,844,100
# Number of extensions: 60427286
# Number of successful extensions: 126433
# Number of sequences better than 2.0: 30
# Number of HSP's gapped: 126387
# Number of HSP's successfully gapped: 35
# Length of query: 291
# Length of database: 1,573,298,872
# Length adjustment: 130
# Effective length of query: 161
# Effective length of database: 983,691,932
# Effective search space: 158374401052
# Effective search space used: 158374401052
# Neighboring words threshold: 12
# Window for multiple hits: 40
# X1: 16 ( 7.3 bits)
# X2: 38 (14.6 bits)
# X3: 64 (24.7 bits)
# S1: 41 (21.7 bits)
# S2: 32 (16.9 bits)
# Blast 2.2.4 can sometimes skip the whole parameter section.
# BLAT also skips the whole parameter section.
# Thus, check to make sure that the parameter section really
# exists.
if not uhandle.peekline().strip():
return
# BLASTN 2.2.9 looks like it reverses the "Number of Hits" and
# "Number of Sequences" lines.
consumer.start_parameters()
# Matrix line may be missing in BLASTN 2.2.9
attempt_read_and_call(uhandle, consumer.matrix, start='Matrix')
# not TBLASTX
attempt_read_and_call(uhandle, consumer.gap_penalties, start='Gap')
attempt_read_and_call(uhandle, consumer.num_sequences,
start='Number of Sequences')
attempt_read_and_call(uhandle, consumer.num_hits,
start='Number of Hits')
attempt_read_and_call(uhandle, consumer.num_sequences,
start='Number of Sequences')
attempt_read_and_call(uhandle, consumer.num_extends,
start='Number of extensions')
attempt_read_and_call(uhandle, consumer.num_good_extends,
start='Number of successful')
attempt_read_and_call(uhandle, consumer.num_seqs_better_e,
start='Number of sequences')
# not BLASTN, TBLASTX
if attempt_read_and_call(uhandle, consumer.hsps_no_gap,
start="Number of HSP's better"):
# BLASTN 2.2.9
if attempt_read_and_call(uhandle, consumer.noevent,
start="Number of HSP's gapped:"):
read_and_call(uhandle, consumer.noevent,
start="Number of HSP's successfully")
# This is omitted in 2.2.15
attempt_read_and_call(uhandle, consumer.noevent,
start="Number of extra gapped extensions")
else:
read_and_call(uhandle, consumer.hsps_prelim_gapped,
start="Number of HSP's successfully")
read_and_call(uhandle, consumer.hsps_prelim_gap_attempted,
start="Number of HSP's that")
read_and_call(uhandle, consumer.hsps_gapped,
start="Number of HSP's gapped")
# e.g. BLASTX 2.2.15 where the "better" line is missing
elif attempt_read_and_call(uhandle, consumer.noevent,
start="Number of HSP's gapped"):
read_and_call(uhandle, consumer.noevent,
start="Number of HSP's successfully")
# not in blastx 2.2.1
attempt_read_and_call(uhandle, consumer.query_length,
has_re=re.compile(r"[Ll]ength of query"))
# Not in BLASTX 2.2.22+
attempt_read_and_call(uhandle, consumer.database_length,
has_re=re.compile(r"[Ll]ength of \s*[Dd]atabase"))
# BLASTN 2.2.9
attempt_read_and_call(uhandle, consumer.noevent,
start="Length adjustment")
attempt_read_and_call(uhandle, consumer.effective_hsp_length,
start='effective HSP')
# Not in blastx 2.2.1
attempt_read_and_call(
uhandle, consumer.effective_query_length,
has_re=re.compile(r'[Ee]ffective length of query'))
# This is not in BLASTP 2.2.15
attempt_read_and_call(
uhandle, consumer.effective_database_length,
has_re=re.compile(r'[Ee]ffective length of \s*[Dd]atabase'))
# Not in blastx 2.2.1, added a ':' to distinguish between
# this and the 'effective search space used' line
attempt_read_and_call(
uhandle, consumer.effective_search_space,
has_re=re.compile(r'[Ee]ffective search space:'))
# Does not appear in BLASTP 2.0.5
attempt_read_and_call(
uhandle, consumer.effective_search_space_used,
has_re=re.compile(r'[Ee]ffective search space used'))
# BLASTX, TBLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.frameshift, start='frameshift')
# not in BLASTN 2.2.9
attempt_read_and_call(uhandle, consumer.threshold, start='T')
# In BLASTX 2.2.15 replaced by: "Neighboring words threshold: 12"
attempt_read_and_call(uhandle, consumer.threshold, start='Neighboring words threshold')
# not in BLASTX 2.2.15
attempt_read_and_call(uhandle, consumer.window_size, start='A')
# get this instead: "Window for multiple hits: 40"
attempt_read_and_call(uhandle, consumer.window_size, start='Window for multiple hits')
# not in BLASTX 2.2.22+
attempt_read_and_call(uhandle, consumer.dropoff_1st_pass, start='X1')
# not TBLASTN
attempt_read_and_call(uhandle, consumer.gap_x_dropoff, start='X2')
# not BLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.gap_x_dropoff_final,
start='X3')
# not TBLASTN
attempt_read_and_call(uhandle, consumer.gap_trigger, start='S1')
# not in blastx 2.2.1
# first we make sure we have additional lines to work with, if
# not then the file is done and we don't have a final S2
if not is_blank_line(uhandle.peekline(), allow_spaces=1):
read_and_call(uhandle, consumer.blast_cutoff, start='S2')
consumer.end_parameters()
class BlastParser(AbstractParser):
"""Parses BLAST data into a Record.Blast object.
"""
def __init__(self):
"""__init__(self)"""
self._scanner = _Scanner()
self._consumer = _BlastConsumer()
def parse(self, handle):
"""parse(self, handle)"""
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class PSIBlastParser(AbstractParser):
"""Parses BLAST data into a Record.PSIBlast object.
"""
def __init__(self):
"""__init__(self)"""
self._scanner = _Scanner()
self._consumer = _PSIBlastConsumer()
def parse(self, handle):
"""parse(self, handle)"""
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class _HeaderConsumer(object):
def start_header(self):
self._header = Record.Header()
def version(self, line):
c = line.split()
self._header.application = c[0]
self._header.version = c[1]
if len(c) > 2:
# The date is missing in the new C++ output from blastx 2.2.22+
# Just get "BLASTX 2.2.22+\n" and that's all.
self._header.date = c[2][1:-1]
def reference(self, line):
if line.startswith('Reference: '):
self._header.reference = line[11:]
else:
self._header.reference = self._header.reference + line
def query_info(self, line):
if line.startswith('Query= '):
self._header.query = line[7:].lstrip()
elif line.startswith('Length='):
# New style way to give the query length in BLAST 2.2.22+ (the C++ code)
self._header.query_letters = _safe_int(line[7:].strip())
elif not line.startswith(' '): # continuation of query_info
self._header.query = "%s%s" % (self._header.query, line)
else:
# Hope it is the old style way to give the query length:
letters, = _re_search(
r"([0-9,]+) letters", line,
"I could not find the number of letters in line\n%s" % line)
self._header.query_letters = _safe_int(letters)
def database_info(self, line):
line = line.rstrip()
if line.startswith('Database: '):
self._header.database = line[10:]
elif not line.endswith('total letters'):
if self._header.database:
# Need to include a space when merging multi line datase descr
self._header.database = self._header.database + " " + line.strip()
else:
self._header.database = line.strip()
else:
sequences, letters = _re_search(
r"([0-9,]+) sequences; ([0-9,-]+) total letters", line,
"I could not find the sequences and letters in line\n%s" % line)
self._header.database_sequences = _safe_int(sequences)
self._header.database_letters = _safe_int(letters)
def end_header(self):
# Get rid of the trailing newlines
self._header.reference = self._header.reference.rstrip()
self._header.query = self._header.query.rstrip()
class _DescriptionConsumer(object):
def start_descriptions(self):
self._descriptions = []
self._model_sequences = []
self._nonmodel_sequences = []
self._converged = 0
self._type = None
self._roundnum = None
self.__has_n = 0 # Does the description line contain an N value?
def description_header(self, line):
if line.startswith('Sequences producing'):
cols = line.split()
if cols[-1] == 'N':
self.__has_n = 1
def description(self, line):
dh = self._parse(line)
if self._type == 'model':
self._model_sequences.append(dh)
elif self._type == 'nonmodel':
self._nonmodel_sequences.append(dh)
else:
self._descriptions.append(dh)
def model_sequences(self, line):
self._type = 'model'
def nonmodel_sequences(self, line):
self._type = 'nonmodel'
def converged(self, line):
self._converged = 1
def no_hits(self, line):
pass
def round(self, line):
if not line.startswith('Results from round'):
raise ValueError("I didn't understand the round line\n%s" % line)
self._roundnum = _safe_int(line[18:].strip())
def end_descriptions(self):
pass
def _parse(self, description_line):
line = description_line # for convenience
dh = Record.Description()
# I need to separate the score and p-value from the title.
# sp|P21297|FLBT_CAUCR FLBT PROTEIN [snip] 284 7e-77
# sp|P21297|FLBT_CAUCR FLBT PROTEIN [snip] 284 7e-77 1
# special cases to handle:
# - title must be preserved exactly (including whitespaces)
# - score could be equal to e-value (not likely, but what if??)
# - sometimes there's an "N" score of '1'.
cols = line.split()
if len(cols) < 3:
raise ValueError(
"Line does not appear to contain description:\n%s" % line)
if self.__has_n:
i = line.rfind(cols[-1]) # find start of N
i = line.rfind(cols[-2], 0, i) # find start of p-value
i = line.rfind(cols[-3], 0, i) # find start of score
else:
i = line.rfind(cols[-1]) # find start of p-value
i = line.rfind(cols[-2], 0, i) # find start of score
if self.__has_n:
dh.title, dh.score, dh.e, dh.num_alignments = \
line[:i].rstrip(), cols[-3], cols[-2], cols[-1]
else:
dh.title, dh.score, dh.e, dh.num_alignments = \
line[:i].rstrip(), cols[-2], cols[-1], 1
dh.num_alignments = _safe_int(dh.num_alignments)
dh.score = _safe_int(dh.score)
dh.e = _safe_float(dh.e)
return dh
class _AlignmentConsumer(object):
# This is a little bit tricky. An alignment can either be a
# pairwise alignment or a multiple alignment. Since it's difficult
# to know a-priori which one the blast record will contain, I'm going
# to make one class that can parse both of them.
def start_alignment(self):
self._alignment = Record.Alignment()
self._multiple_alignment = Record.MultipleAlignment()
def title(self, line):
if self._alignment.title:
self._alignment.title += " "
self._alignment.title += line.strip()
def length(self, line):
# e.g. "Length = 81" or more recently, "Length=428"
parts = line.replace(" ", "").split("=")
assert len(parts) == 2, "Unrecognised format length line"
self._alignment.length = parts[1]
self._alignment.length = _safe_int(self._alignment.length)
def multalign(self, line):
# Standalone version uses 'QUERY', while WWW version uses blast_tmp.
if line.startswith('QUERY') or line.startswith('blast_tmp'):
# If this is the first line of the multiple alignment,
# then I need to figure out how the line is formatted.
# Format of line is:
# QUERY 1 acttg...gccagaggtggtttattcagtctccataagagaggggacaaacg 60
try:
name, start, seq, end = line.split()
except ValueError:
raise ValueError("I do not understand the line\n%s" % line)
self._start_index = line.index(start, len(name))
self._seq_index = line.index(seq,
self._start_index + len(start))
# subtract 1 for the space
self._name_length = self._start_index - 1
self._start_length = self._seq_index - self._start_index - 1
self._seq_length = line.rfind(end) - self._seq_index - 1
# self._seq_index = line.index(seq)
# # subtract 1 for the space
# self._seq_length = line.rfind(end) - self._seq_index - 1
# self._start_index = line.index(start)
# self._start_length = self._seq_index - self._start_index - 1
# self._name_length = self._start_index
# Extract the information from the line
name = line[:self._name_length]
name = name.rstrip()
start = line[self._start_index:self._start_index + self._start_length]
start = start.rstrip()
if start:
start = _safe_int(start)
end = line[self._seq_index + self._seq_length:].rstrip()
if end:
end = _safe_int(end)
seq = line[self._seq_index:self._seq_index + self._seq_length].rstrip()
# right pad the sequence with spaces if necessary
if len(seq) < self._seq_length:
seq += ' ' * (self._seq_length - len(seq))
# I need to make sure the sequence is aligned correctly with the query.
# First, I will find the length of the query. Then, if necessary,
# I will pad my current sequence with spaces so that they will line
# up correctly.
# Two possible things can happen:
# QUERY
# 504
#
# QUERY
# 403
#
# Sequence 504 will need padding at the end. Since I won't know
# this until the end of the alignment, this will be handled in
# end_alignment.
# Sequence 403 will need padding before being added to the alignment.
align = self._multiple_alignment.alignment # for convenience
align.append((name, start, seq, end))
# This is old code that tried to line up all the sequences
# in a multiple alignment by using the sequence title's as
# identifiers. The problem with this is that BLAST assigns
# different HSP's from the same sequence the same id. Thus,
# in one alignment block, there may be multiple sequences with
# the same id. I'm not sure how to handle this, so I'm not
# going to.
# # If the sequence is the query, then just add it.
# if name == 'QUERY':
# if len(align) == 0:
# align.append((name, start, seq))
# else:
# aname, astart, aseq = align[0]
# if name != aname:
# raise ValueError, "Query is not the first sequence"
# aseq = aseq + seq
# align[0] = aname, astart, aseq
# else:
# if len(align) == 0:
# raise ValueError, "I could not find the query sequence"
# qname, qstart, qseq = align[0]
#
# # Now find my sequence in the multiple alignment.
# for i in range(1, len(align)):
# aname, astart, aseq = align[i]
# if name == aname:
# index = i
# break
# else:
# # If I couldn't find it, then add a new one.
# align.append((None, None, None))
# index = len(align)-1
# # Make sure to left-pad it.
# aname, astart, aseq = name, start, ' '*(len(qseq)-len(seq))
#
# if len(qseq) != len(aseq) + len(seq):
# # If my sequences are shorter than the query sequence,
# # then I will need to pad some spaces to make them line up.
# # Since I've already right padded seq, that means aseq
# # must be too short.
# aseq = aseq + ' '*(len(qseq)-len(aseq)-len(seq))
# aseq = aseq + seq
# if astart is None:
# astart = start
# align[index] = aname, astart, aseq
def end_alignment(self):
# Remove trailing newlines
if self._alignment:
self._alignment.title = self._alignment.title.rstrip()
# This code is also obsolete. See note above.
# If there's a multiple alignment, I will need to make sure
# all the sequences are aligned. That is, I may need to
# right-pad the sequences.
# if self._multiple_alignment is not None:
# align = self._multiple_alignment.alignment
# seqlen = None
# for i in range(len(align)):
# name, start, seq = align[i]
# if seqlen is None:
# seqlen = len(seq)
# else:
# if len(seq) < seqlen:
# seq = seq + ' '*(seqlen - len(seq))
# align[i] = name, start, seq
# elif len(seq) > seqlen:
# raise ValueError, \
# "Sequence %s is longer than the query" % name
# Clean up some variables, if they exist.
try:
del self._seq_index
del self._seq_length
del self._start_index
del self._start_length
del self._name_length
except AttributeError:
pass
class _HSPConsumer(object):
def start_hsp(self):
self._hsp = Record.HSP()
def score(self, line):
self._hsp.bits, self._hsp.score = _re_search(
r"Score =\s*([0-9.e+]+) bits \(([0-9]+)\)", line,
"I could not find the score in line\n%s" % line)
self._hsp.score = _safe_float(self._hsp.score)
self._hsp.bits = _safe_float(self._hsp.bits)
x, y = _re_search(
r"Expect\(?(\d*)\)? = +([0-9.e\-|\+]+)", line,
"I could not find the expect in line\n%s" % line)
if x:
self._hsp.num_alignments = _safe_int(x)
else:
self._hsp.num_alignments = 1
self._hsp.expect = _safe_float(y)
def identities(self, line):
x, y = _re_search(
r"Identities = (\d+)\/(\d+)", line,
"I could not find the identities in line\n%s" % line)
self._hsp.identities = _safe_int(x), _safe_int(y)
self._hsp.align_length = _safe_int(y)
if 'Positives' in line:
x, y = _re_search(
r"Positives = (\d+)\/(\d+)", line,
"I could not find the positives in line\n%s" % line)
self._hsp.positives = _safe_int(x), _safe_int(y)
assert self._hsp.align_length == _safe_int(y)
if 'Gaps' in line:
x, y = _re_search(
r"Gaps = (\d+)\/(\d+)", line,
"I could not find the gaps in line\n%s" % line)
self._hsp.gaps = _safe_int(x), _safe_int(y)
assert self._hsp.align_length == _safe_int(y)
def strand(self, line):
self._hsp.strand = _re_search(
r"Strand\s?=\s?(\w+)\s?/\s?(\w+)", line,
"I could not find the strand in line\n%s" % line)
def frame(self, line):
# Frame can be in formats:
# Frame = +1
# Frame = +2 / +2
if '/' in line:
self._hsp.frame = _re_search(
r"Frame\s?=\s?([-+][123])\s?/\s?([-+][123])", line,
"I could not find the frame in line\n%s" % line)
else:
self._hsp.frame = _re_search(
r"Frame = ([-+][123])", line,
"I could not find the frame in line\n%s" % line)
# Match a space, if one is available. Masahir Ishikawa found a
# case where there's no space between the start and the sequence:
# Query: 100tt 101
# line below modified by Yair Benita, Sep 2004
# Note that the colon is not always present. 2006
_query_re = re.compile(r"Query(:?) \s*(\d+)\s*(.+) (\d+)")
def query(self, line):
m = self._query_re.search(line)
if m is None:
if line.strip() == "Query ------------------------------------------------------------":
# Special case - long gap relative to the subject,
# note there is no start/end present, cannot update those
self._hsp.query += "-" * 60
self._query_len = 60 # number of dashes
self._query_start_index = 13 # offset of first dash
return
raise ValueError("I could not find the query in line\n%s" % line)
# line below modified by Yair Benita, Sep 2004.
# added the end attribute for the query
colon, start, seq, end = m.groups()
seq = seq.strip()
self._hsp.query += seq
if self._hsp.query_start is None:
self._hsp.query_start = _safe_int(start)
# line below added by Yair Benita, Sep 2004.
# added the end attribute for the query
self._hsp.query_end = _safe_int(end)
# Get index for sequence start (regular expression element 3)
self._query_start_index = m.start(3)
self._query_len = len(seq)
def align(self, line):
seq = line[self._query_start_index:].rstrip()
if len(seq) < self._query_len:
# Make sure the alignment is the same length as the query
seq += ' ' * (self._query_len - len(seq))
elif len(seq) < self._query_len:
raise ValueError("Match is longer than the query in line\n%s"
% line)
self._hsp.match = self._hsp.match + seq
# To match how we do the query, cache the regular expression.
# Note that the colon is not always present.
_sbjct_re = re.compile(r"Sbjct(:?) \s*(\d+)\s*(.+) (\d+)")
def sbjct(self, line):
m = self._sbjct_re.search(line)
if m is None:
raise ValueError("I could not find the sbjct in line\n%s" % line)
colon, start, seq, end = m.groups()
# mikep 26/9/00
# On occasion, there is a blast hit with no subject match
# so far, it only occurs with 1-line short "matches"
# I have decided to let these pass as they appear
if not seq.strip():
seq = ' ' * self._query_len
else:
seq = seq.strip()
self._hsp.sbjct += seq
if self._hsp.sbjct_start is None:
self._hsp.sbjct_start = _safe_int(start)
self._hsp.sbjct_end = _safe_int(end)
if len(seq) != self._query_len:
raise ValueError(
"QUERY and SBJCT sequence lengths don't match (%i %r vs %i) in line\n%s"
% (self._query_len, self._hsp.query, len(seq), line))
del self._query_start_index # clean up unused variables
del self._query_len
def end_hsp(self):
pass
class _DatabaseReportConsumer(object):
def start_database_report(self):
self._dr = Record.DatabaseReport()
def database(self, line):
m = re.search(r"Database: (.+)$", line)
if m:
self._dr.database_name.append(m.group(1))
elif self._dr.database_name:
# This must be a continuation of the previous name.
self._dr.database_name[-1] = "%s%s" % (self._dr.database_name[-1],
line.strip())
def posted_date(self, line):
self._dr.posted_date.append(_re_search(
r"Posted date:\s*(.+)$", line,
"I could not find the posted date in line\n%s" % line))
def num_letters_in_database(self, line):
letters, = _get_cols(
line, (-1,), ncols=6, expected={2: "letters", 4: "database:"})
self._dr.num_letters_in_database.append(_safe_int(letters))
def num_sequences_in_database(self, line):
sequences, = _get_cols(
line, (-1,), ncols=6, expected={2: "sequences", 4: "database:"})
self._dr.num_sequences_in_database.append(_safe_int(sequences))
def ka_params(self, line):
self._dr.ka_params = [_safe_float(x) for x in line.split()]
def gapped(self, line):
self._dr.gapped = 1
def ka_params_gap(self, line):
self._dr.ka_params_gap = [_safe_float(x) for x in line.split()]
def end_database_report(self):
pass
class _ParametersConsumer(object):
def start_parameters(self):
self._params = Record.Parameters()
def matrix(self, line):
self._params.matrix = line[8:].rstrip()
def gap_penalties(self, line):
self._params.gap_penalties = [_safe_float(x) for x in _get_cols(
line, (3, 5), ncols=6, expected={2: "Existence:", 4: "Extension:"})]
def num_hits(self, line):
if '1st pass' in line:
x, = _get_cols(line, (-4,), ncols=11, expected={2: "Hits"})
self._params.num_hits = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=6, expected={2: "Hits"})
self._params.num_hits = _safe_int(x)
def num_sequences(self, line):
if '1st pass' in line:
x, = _get_cols(line, (-4,), ncols=9, expected={2: "Sequences:"})
self._params.num_sequences = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=4, expected={2: "Sequences:"})
self._params.num_sequences = _safe_int(x)
def num_extends(self, line):
if '1st pass' in line:
x, = _get_cols(line, (-4,), ncols=9, expected={2: "extensions:"})
self._params.num_extends = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=4, expected={2: "extensions:"})
self._params.num_extends = _safe_int(x)
def num_good_extends(self, line):
if '1st pass' in line:
x, = _get_cols(line, (-4,), ncols=10, expected={3: "extensions:"})
self._params.num_good_extends = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=5, expected={3: "extensions:"})
self._params.num_good_extends = _safe_int(x)
def num_seqs_better_e(self, line):
self._params.num_seqs_better_e, = _get_cols(
line, (-1,), ncols=7, expected={2: "sequences"})
self._params.num_seqs_better_e = _safe_int(
self._params.num_seqs_better_e)
def hsps_no_gap(self, line):
self._params.hsps_no_gap, = _get_cols(
line, (-1,), ncols=9, expected={3: "better", 7: "gapping:"})
self._params.hsps_no_gap = _safe_int(self._params.hsps_no_gap)
def hsps_prelim_gapped(self, line):
self._params.hsps_prelim_gapped, = _get_cols(
line, (-1,), ncols=9, expected={4: "gapped", 6: "prelim"})
self._params.hsps_prelim_gapped = _safe_int(
self._params.hsps_prelim_gapped)
def hsps_prelim_gapped_attempted(self, line):
self._params.hsps_prelim_gapped_attempted, = _get_cols(
line, (-1,), ncols=10, expected={4: "attempted", 7: "prelim"})
self._params.hsps_prelim_gapped_attempted = _safe_int(
self._params.hsps_prelim_gapped_attempted)
def hsps_gapped(self, line):
self._params.hsps_gapped, = _get_cols(
line, (-1,), ncols=6, expected={3: "gapped"})
self._params.hsps_gapped = _safe_int(self._params.hsps_gapped)
def query_length(self, line):
self._params.query_length, = _get_cols(
line.lower(), (-1,), ncols=4, expected={0: "length", 2: "query:"})
self._params.query_length = _safe_int(self._params.query_length)
def database_length(self, line):
self._params.database_length, = _get_cols(
line.lower(), (-1,), ncols=4, expected={0: "length", 2: "database:"})
self._params.database_length = _safe_int(self._params.database_length)
def effective_hsp_length(self, line):
self._params.effective_hsp_length, = _get_cols(
line, (-1,), ncols=4, expected={1: "HSP", 2: "length:"})
self._params.effective_hsp_length = _safe_int(
self._params.effective_hsp_length)
def effective_query_length(self, line):
self._params.effective_query_length, = _get_cols(
line, (-1,), ncols=5, expected={1: "length", 3: "query:"})
self._params.effective_query_length = _safe_int(
self._params.effective_query_length)
def effective_database_length(self, line):
self._params.effective_database_length, = _get_cols(
line.lower(), (-1,), ncols=5, expected={1: "length", 3: "database:"})
self._params.effective_database_length = _safe_int(
self._params.effective_database_length)
def effective_search_space(self, line):
self._params.effective_search_space, = _get_cols(
line, (-1,), ncols=4, expected={1: "search"})
self._params.effective_search_space = _safe_int(
self._params.effective_search_space)
def effective_search_space_used(self, line):
self._params.effective_search_space_used, = _get_cols(
line, (-1,), ncols=5, expected={1: "search", 3: "used:"})
self._params.effective_search_space_used = _safe_int(
self._params.effective_search_space_used)
def frameshift(self, line):
self._params.frameshift = _get_cols(
line, (4, 5), ncols=6, expected={0: "frameshift", 2: "decay"})
def threshold(self, line):
if line[:2] == "T:":
# Assume its an old stlye line like "T: 123"
self._params.threshold, = _get_cols(
line, (1,), ncols=2, expected={0: "T:"})
elif line[:28] == "Neighboring words threshold:":
self._params.threshold, = _get_cols(
line, (3,), ncols=4, expected={0: "Neighboring", 1: "words", 2: "threshold:"})
else:
raise ValueError("Unrecognised threshold line:\n%s" % line)
self._params.threshold = _safe_int(self._params.threshold)
def window_size(self, line):
if line[:2] == "A:":
self._params.window_size, = _get_cols(
line, (1,), ncols=2, expected={0: "A:"})
elif line[:25] == "Window for multiple hits:":
self._params.window_size, = _get_cols(
line, (4,), ncols=5, expected={0: "Window", 2: "multiple", 3: "hits:"})
else:
raise ValueError("Unrecognised window size line:\n%s" % line)
self._params.window_size = _safe_int(self._params.window_size)
def dropoff_1st_pass(self, line):
score, bits = _re_search(
r"X1: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the dropoff in line\n%s" % line)
self._params.dropoff_1st_pass = _safe_int(score), _safe_float(bits)
def gap_x_dropoff(self, line):
score, bits = _re_search(
r"X2: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the gap dropoff in line\n%s" % line)
self._params.gap_x_dropoff = _safe_int(score), _safe_float(bits)
def gap_x_dropoff_final(self, line):
score, bits = _re_search(
r"X3: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the gap dropoff final in line\n%s" % line)
self._params.gap_x_dropoff_final = _safe_int(score), _safe_float(bits)
def gap_trigger(self, line):
score, bits = _re_search(
r"S1: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the gap trigger in line\n%s" % line)
self._params.gap_trigger = _safe_int(score), _safe_float(bits)
def blast_cutoff(self, line):
score, bits = _re_search(
r"S2: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the blast cutoff in line\n%s" % line)
self._params.blast_cutoff = _safe_int(score), _safe_float(bits)
def end_parameters(self):
pass
class _BlastConsumer(AbstractConsumer,
_HeaderConsumer,
_DescriptionConsumer,
_AlignmentConsumer,
_HSPConsumer,
_DatabaseReportConsumer,
_ParametersConsumer
):
# This Consumer is inherits from many other consumer classes that handle
# the actual dirty work. An alternate way to do it is to create objects
# of those classes and then delegate the parsing tasks to them in a
# decorator-type pattern. The disadvantage of that is that the method
# names will need to be resolved in this classes. However, using
# a decorator will retain more control in this class (which may or
# may not be a bad thing). In addition, having each sub-consumer as
# its own object prevents this object's dictionary from being cluttered
# with members and reduces the chance of member collisions.
def __init__(self):
self.data = None
def round(self, line):
# Make sure nobody's trying to pass me PSI-BLAST data!
raise ValueError("This consumer doesn't handle PSI-BLAST data")
def start_header(self):
self.data = Record.Blast()
_HeaderConsumer.start_header(self)
def end_header(self):
_HeaderConsumer.end_header(self)
self.data.__dict__.update(self._header.__dict__)
def end_descriptions(self):
self.data.descriptions = self._descriptions
def end_alignment(self):
_AlignmentConsumer.end_alignment(self)
if self._alignment.hsps:
self.data.alignments.append(self._alignment)
if self._multiple_alignment.alignment:
self.data.multiple_alignment = self._multiple_alignment
def end_hsp(self):
_HSPConsumer.end_hsp(self)
try:
self._alignment.hsps.append(self._hsp)
except AttributeError:
raise ValueError("Found an HSP before an alignment")
def end_database_report(self):
_DatabaseReportConsumer.end_database_report(self)
self.data.__dict__.update(self._dr.__dict__)
def end_parameters(self):
_ParametersConsumer.end_parameters(self)
self.data.__dict__.update(self._params.__dict__)
class _PSIBlastConsumer(AbstractConsumer,
_HeaderConsumer,
_DescriptionConsumer,
_AlignmentConsumer,
_HSPConsumer,
_DatabaseReportConsumer,
_ParametersConsumer
):
def __init__(self):
self.data = None
def start_header(self):
self.data = Record.PSIBlast()
_HeaderConsumer.start_header(self)
def end_header(self):
_HeaderConsumer.end_header(self)
self.data.__dict__.update(self._header.__dict__)
def start_descriptions(self):
self._round = Record.Round()
self.data.rounds.append(self._round)
_DescriptionConsumer.start_descriptions(self)
def end_descriptions(self):
_DescriptionConsumer.end_descriptions(self)
self._round.number = self._roundnum
if self._descriptions:
self._round.new_seqs.extend(self._descriptions)
self._round.reused_seqs.extend(self._model_sequences)
self._round.new_seqs.extend(self._nonmodel_sequences)
if self._converged:
self.data.converged = 1
def end_alignment(self):
_AlignmentConsumer.end_alignment(self)
if self._alignment.hsps:
self._round.alignments.append(self._alignment)
if self._multiple_alignment:
self._round.multiple_alignment = self._multiple_alignment
def end_hsp(self):
_HSPConsumer.end_hsp(self)
try:
self._alignment.hsps.append(self._hsp)
except AttributeError:
raise ValueError("Found an HSP before an alignment")
def end_database_report(self):
_DatabaseReportConsumer.end_database_report(self)
self.data.__dict__.update(self._dr.__dict__)
def end_parameters(self):
_ParametersConsumer.end_parameters(self)
self.data.__dict__.update(self._params.__dict__)
class Iterator(object):
"""Iterates over a file of multiple BLAST results.
Methods:
next Return the next record from the stream, or None.
"""
def __init__(self, handle, parser=None):
"""__init__(self, handle, parser=None)
Create a new iterator. handle is a file-like object. parser
is an optional Parser object to change the results into another form.
If set to None, then the raw contents of the file will be returned.
"""
try:
handle.readline
except AttributeError:
raise ValueError(
"I expected a file handle or file-like object, got %s"
% type(handle))
self._uhandle = File.UndoHandle(handle)
self._parser = parser
self._header = []
def __next__(self):
"""next(self) -> object
Return the next Blast record from the file. If no more records,
return None.
"""
lines = []
query = False
while True:
line = self._uhandle.readline()
if not line:
break
# If I've reached the next one, then put the line back and stop.
if lines and (line.startswith('BLAST')
or line.startswith('BLAST', 1)
or line.startswith('<?xml ')):
self._uhandle.saveline(line)
break
# New style files omit the BLAST line to mark a new query:
if line.startswith("Query="):
if not query:
if not self._header:
self._header = lines[:]
query = True
else:
# Start of another record
self._uhandle.saveline(line)
break
lines.append(line)
if query and "BLAST" not in lines[0]:
# Cheat and re-insert the header
# print "-"*50
# print "".join(self._header)
# print "-"*50
# print "".join(lines)
# print "-"*50
lines = self._header + lines
if not lines:
return None
data = ''.join(lines)
if self._parser is not None:
return self._parser.parse(StringIO(data))
return data
if sys.version_info[0] < 3:
def next(self):
"""Python 2 style alias for Python 3 style __next__ method."""
return self.__next__()
def __iter__(self):
return iter(self.__next__, None)
def _re_search(regex, line, error_msg):
m = re.search(regex, line)
if not m:
raise ValueError(error_msg)
return m.groups()
def _get_cols(line, cols_to_get, ncols=None, expected=None):
if expected is None:
expected = {}
cols = line.split()
# Check to make sure number of columns is correct
if ncols is not None and len(cols) != ncols:
raise ValueError("I expected %d columns (got %d) in line\n%s"
% (ncols, len(cols), line))
# Check to make sure columns contain the correct data
for k in expected:
if cols[k] != expected[k]:
raise ValueError("I expected '%s' in column %d in line\n%s"
% (expected[k], k, line))
# Construct the answer tuple
results = []
for c in cols_to_get:
results.append(cols[c])
return tuple(results)
def _safe_int(str):
try:
return int(str)
except ValueError:
# Something went wrong. Try to clean up the string.
# Remove all commas from the string
str = str.replace(',', '')
# try again after removing commas.
# Note int() will return a long rather than overflow
try:
return int(str)
except ValueError:
pass
# Call float to handle things like "54.3", note could lose precision, e.g.
# >>> int("5399354557888517312")
# 5399354557888517312
# >>> int(float("5399354557888517312"))
# 5399354557888517120
return int(float(str))
def _safe_float(str):
# Thomas Rosleff Soerensen (rosleff@mpiz-koeln.mpg.de) noted that
# float('e-172') does not produce an error on his platform. Thus,
# we need to check the string for this condition.
# Sometimes BLAST leaves of the '1' in front of an exponent.
if str and str[0] in ['E', 'e']:
str = '1' + str
try:
return float(str)
except ValueError:
# Remove all commas from the string
str = str.replace(',', '')
# try again.
return float(str)
class _BlastErrorConsumer(_BlastConsumer):
def __init__(self):
_BlastConsumer.__init__(self)
def noevent(self, line):
if 'Query must be at least wordsize' in line:
raise ShortQueryBlastError("Query must be at least wordsize")
# Now pass the line back up to the superclass.
method = getattr(_BlastConsumer, 'noevent',
_BlastConsumer.__getattr__(self, 'noevent'))
method(line)
class BlastErrorParser(AbstractParser):
"""Attempt to catch and diagnose BLAST errors while parsing.
This utilizes the BlastParser module but adds an additional layer
of complexity on top of it by attempting to diagnose ValueErrors
that may actually indicate problems during BLAST parsing.
Current BLAST problems this detects are:
o LowQualityBlastError - When BLASTing really low quality sequences
(ie. some GenBank entries which are just short streches of a single
nucleotide), BLAST will report an error with the sequence and be
unable to search with this. This will lead to a badly formatted
BLAST report that the parsers choke on. The parser will convert the
ValueError to a LowQualityBlastError and attempt to provide useful
information.
"""
def __init__(self, bad_report_handle=None):
"""Initialize a parser that tries to catch BlastErrors.
Arguments:
o bad_report_handle - An optional argument specifying a handle
where bad reports should be sent. This would allow you to save
all of the bad reports to a file, for instance. If no handle
is specified, the bad reports will not be saved.
"""
self._bad_report_handle = bad_report_handle
# self._b_parser = BlastParser()
self._scanner = _Scanner()
self._consumer = _BlastErrorConsumer()
def parse(self, handle):
"""Parse a handle, attempting to diagnose errors.
"""
results = handle.read()
try:
self._scanner.feed(StringIO(results), self._consumer)
except ValueError:
# if we have a bad_report_file, save the info to it first
if self._bad_report_handle:
# send the info to the error handle
self._bad_report_handle.write(results)
# now we want to try and diagnose the error
self._diagnose_error(
StringIO(results), self._consumer.data)
# if we got here we can't figure out the problem
# so we should pass along the syntax error we got
raise
return self._consumer.data
def _diagnose_error(self, handle, data_record):
"""Attempt to diagnose an error in the passed handle.
Arguments:
o handle - The handle potentially containing the error
o data_record - The data record partially created by the consumer.
"""
line = handle.readline()
while line:
# 'Searchingdone' instead of 'Searching......done' seems
# to indicate a failure to perform the BLAST due to
# low quality sequence
if line.startswith('Searchingdone'):
raise LowQualityBlastError("Blast failure occurred on query: ",
data_record.query)
line = handle.readline()
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/Blast/NCBIStandalone.py
|
Python
|
apache-2.0
| 74,333 | 0.000592 |
#! /usr/bin/python
# Python ctypes bindings for VLC
#
# Copyright (C) 2009-2012 the VideoLAN team
# $Id: $
#
# Authors: Olivier Aubert <olivier.aubert at liris.cnrs.fr>
# Jean Brouwers <MrJean1 at gmail.com>
# Geoff Salmon <geoff.salmon at gmail.com>
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
"""This module provides bindings for the LibVLC public API, see
U{http://wiki.videolan.org/LibVLC}.
You can find the documentation and a README file with some examples
at U{http://www.advene.org/download/python-ctypes/}.
Basically, the most important class is L{Instance}, which is used
to create a libvlc instance. From this instance, you then create
L{MediaPlayer} and L{MediaListPlayer} instances.
Alternatively, you may create instances of the L{MediaPlayer} and
L{MediaListPlayer} class directly and an instance of L{Instance}
will be implicitly created. The latter can be obtained using the
C{get_instance} method of L{MediaPlayer} and L{MediaListPlayer}.
"""
import ctypes
from ctypes.util import find_library
import os
import sys
# Used by EventManager in override.py
from inspect import getargspec
__version__ = "N/A"
build_date = "Tue Jul 2 10:35:53 2013"
if sys.version_info[0] > 2:
str = str
unicode = str
bytes = bytes
basestring = (str, bytes)
PYTHON3 = True
def str_to_bytes(s):
"""Translate string or bytes to bytes.
"""
if isinstance(s, str):
return bytes(s, sys.getfilesystemencoding())
else:
return s
def bytes_to_str(b):
"""Translate bytes to string.
"""
if isinstance(b, bytes):
return b.decode(sys.getfilesystemencoding())
else:
return b
else:
str = str
unicode = unicode
bytes = str
basestring = basestring
PYTHON3 = False
def str_to_bytes(s):
"""Translate string or bytes to bytes.
"""
if isinstance(s, unicode):
return s.encode(sys.getfilesystemencoding())
else:
return s
def bytes_to_str(b):
"""Translate bytes to unicode string.
"""
if isinstance(b, str):
return unicode(b, sys.getfilesystemencoding())
else:
return b
# Internal guard to prevent internal classes to be directly
# instanciated.
_internal_guard = object()
def find_lib():
dll = None
plugin_path = None
if sys.platform.startswith('linux'):
p = find_library('vlc')
try:
dll = ctypes.CDLL(p)
except OSError: # may fail
dll = ctypes.CDLL('libvlc.so.5')
elif sys.platform.startswith('win'):
p = find_library('libvlc.dll')
if p is None:
try: # some registry settings
import _winreg as w # leaner than win32api, win32con
for r in w.HKEY_LOCAL_MACHINE, w.HKEY_CURRENT_USER:
try:
r = w.OpenKey(r, 'Software\\VideoLAN\\VLC')
plugin_path, _ = w.QueryValueEx(r, 'InstallDir')
w.CloseKey(r)
break
except w.error:
pass
except ImportError: # no PyWin32
pass
if plugin_path is None:
# try some standard locations.
for p in ('Program Files\\VideoLan\\', 'VideoLan\\',
'Program Files\\', ''):
p = 'C:\\' + p + 'VLC\\libvlc.dll'
if os.path.exists(p):
plugin_path = os.path.dirname(p)
break
if plugin_path is not None: # try loading
p = os.getcwd()
os.chdir(plugin_path)
# if chdir failed, this will raise an exception
dll = ctypes.CDLL('libvlc.dll')
# restore cwd after dll has been loaded
os.chdir(p)
else: # may fail
dll = ctypes.CDLL('libvlc.dll')
else:
plugin_path = os.path.dirname(p)
dll = ctypes.CDLL(p)
elif sys.platform.startswith('darwin'):
# FIXME: should find a means to configure path
d = '/Applications/VLC.app/Contents/MacOS/'
p = d + 'lib/libvlc.dylib'
if os.path.exists(p):
dll = ctypes.CDLL(p)
d += 'modules'
if os.path.isdir(d):
plugin_path = d
else: # hope, some PATH is set...
dll = ctypes.CDLL('libvlc.dylib')
else:
raise NotImplementedError('%s: %s not supported' % (sys.argv[0], sys.platform))
return (dll, plugin_path)
# plugin_path used on win32 and MacOS in override.py
dll, plugin_path = find_lib()
class VLCException(Exception):
"""Exception raised by libvlc methods.
"""
pass
try:
_Ints = (int, long)
except NameError: # no long in Python 3+
_Ints = int
_Seqs = (list, tuple)
# Default instance. It is used to instanciate classes directly in the
# OO-wrapper.
_default_instance = None
def get_default_instance():
"""Return the default VLC.Instance.
"""
global _default_instance
if _default_instance is None:
_default_instance = Instance()
return _default_instance
_Cfunctions = {} # from LibVLC __version__
_Globals = globals() # sys.modules[__name__].__dict__
def _Cfunction(name, flags, errcheck, *types):
"""(INTERNAL) New ctypes function binding.
"""
if hasattr(dll, name) and name in _Globals:
p = ctypes.CFUNCTYPE(*types)
f = p((name, dll), flags)
if errcheck is not None:
f.errcheck = errcheck
# replace the Python function
# in this module, but only when
# running as python -O or -OO
if __debug__:
_Cfunctions[name] = f
else:
_Globals[name] = f
return f
raise NameError('no function %r' % (name,))
def _Cobject(cls, ctype):
"""(INTERNAL) New instance from ctypes.
"""
o = object.__new__(cls)
o._as_parameter_ = ctype
return o
def _Constructor(cls, ptr=_internal_guard):
"""(INTERNAL) New wrapper from ctypes.
"""
if ptr == _internal_guard:
raise VLCException("(INTERNAL) ctypes class. You should get references for this class through methods of the LibVLC API.")
if ptr is None or ptr == 0:
return None
return _Cobject(cls, ctypes.c_void_p(ptr))
class _Cstruct(ctypes.Structure):
"""(INTERNAL) Base class for ctypes structures.
"""
_fields_ = [] # list of 2-tuples ('name', ctyptes.<type>)
def __str__(self):
l = [' %s:\t%s' % (n, getattr(self, n)) for n, _ in self._fields_]
return '\n'.join([self.__class__.__name__] + l)
def __repr__(self):
return '%s.%s' % (self.__class__.__module__, self)
class _Ctype(object):
"""(INTERNAL) Base class for ctypes.
"""
@staticmethod
def from_param(this): # not self
"""(INTERNAL) ctypes parameter conversion method.
"""
if this is None:
return None
return this._as_parameter_
class ListPOINTER(object):
"""Just like a POINTER but accept a list of ctype as an argument.
"""
def __init__(self, etype):
self.etype = etype
def from_param(self, param):
if isinstance(param, _Seqs):
return (self.etype * len(param))(*param)
# errcheck functions for some native functions.
def string_result(result, func, arguments):
"""Errcheck function. Returns a string and frees the original pointer.
It assumes the result is a char *.
"""
if result:
# make a python string copy
s = bytes_to_str(ctypes.string_at(result))
# free original string ptr
libvlc_free(result)
return s
return None
def class_result(classname):
"""Errcheck function. Returns a function that creates the specified class.
"""
def wrap_errcheck(result, func, arguments):
if result is None:
return None
return classname(result)
return wrap_errcheck
# Wrapper for the opaque struct libvlc_log_t
class Log(ctypes.Structure):
pass
Log_ptr = ctypes.POINTER(Log)
# FILE* ctypes wrapper, copied from
# http://svn.python.org/projects/ctypes/trunk/ctypeslib/ctypeslib/contrib/pythonhdr.py
class FILE(ctypes.Structure):
pass
FILE_ptr = ctypes.POINTER(FILE)
if PYTHON3:
PyFile_FromFd = ctypes.pythonapi.PyFile_FromFd
PyFile_FromFd.restype = ctypes.py_object
PyFile_FromFd.argtypes = [ctypes.c_int,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_int ]
PyFile_AsFd = ctypes.pythonapi.PyObject_AsFileDescriptor
PyFile_AsFd.restype = ctypes.c_int
PyFile_AsFd.argtypes = [ctypes.py_object]
else:
PyFile_FromFile = ctypes.pythonapi.PyFile_FromFile
PyFile_FromFile.restype = ctypes.py_object
PyFile_FromFile.argtypes = [FILE_ptr,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.CFUNCTYPE(ctypes.c_int, FILE_ptr)]
PyFile_AsFile = ctypes.pythonapi.PyFile_AsFile
PyFile_AsFile.restype = FILE_ptr
PyFile_AsFile.argtypes = [ctypes.py_object]
# Generated enum types #
class _Enum(ctypes.c_uint):
'''(INTERNAL) Base class
'''
_enum_names_ = {}
def __str__(self):
n = self._enum_names_.get(self.value, '') or ('FIXME_(%r)' % (self.value,))
return '.'.join((self.__class__.__name__, n))
def __hash__(self):
return self.value
def __repr__(self):
return '.'.join((self.__class__.__module__, self.__str__()))
def __eq__(self, other):
return ( (isinstance(other, _Enum) and self.value == other.value)
or (isinstance(other, _Ints) and self.value == other) )
def __ne__(self, other):
return not self.__eq__(other)
class LogLevel(_Enum):
'''Logging messages level.
\note future libvlc versions may define new levels.
'''
_enum_names_ = {
0: 'DEBUG',
2: 'NOTICE',
3: 'WARNING',
4: 'ERROR',
}
LogLevel.DEBUG = LogLevel(0)
LogLevel.ERROR = LogLevel(4)
LogLevel.NOTICE = LogLevel(2)
LogLevel.WARNING = LogLevel(3)
class EventType(_Enum):
'''Event types.
'''
_enum_names_ = {
0: 'MediaMetaChanged',
1: 'MediaSubItemAdded',
2: 'MediaDurationChanged',
3: 'MediaParsedChanged',
4: 'MediaFreed',
5: 'MediaStateChanged',
0x100: 'MediaPlayerMediaChanged',
257: 'MediaPlayerNothingSpecial',
258: 'MediaPlayerOpening',
259: 'MediaPlayerBuffering',
260: 'MediaPlayerPlaying',
261: 'MediaPlayerPaused',
262: 'MediaPlayerStopped',
263: 'MediaPlayerForward',
264: 'MediaPlayerBackward',
265: 'MediaPlayerEndReached',
266: 'MediaPlayerEncounteredError',
267: 'MediaPlayerTimeChanged',
268: 'MediaPlayerPositionChanged',
269: 'MediaPlayerSeekableChanged',
270: 'MediaPlayerPausableChanged',
271: 'MediaPlayerTitleChanged',
272: 'MediaPlayerSnapshotTaken',
273: 'MediaPlayerLengthChanged',
274: 'MediaPlayerVout',
0x200: 'MediaListItemAdded',
513: 'MediaListWillAddItem',
514: 'MediaListItemDeleted',
515: 'MediaListWillDeleteItem',
0x300: 'MediaListViewItemAdded',
769: 'MediaListViewWillAddItem',
770: 'MediaListViewItemDeleted',
771: 'MediaListViewWillDeleteItem',
0x400: 'MediaListPlayerPlayed',
1025: 'MediaListPlayerNextItemSet',
1026: 'MediaListPlayerStopped',
0x500: 'MediaDiscovererStarted',
1281: 'MediaDiscovererEnded',
0x600: 'VlmMediaAdded',
1537: 'VlmMediaRemoved',
1538: 'VlmMediaChanged',
1539: 'VlmMediaInstanceStarted',
1540: 'VlmMediaInstanceStopped',
1541: 'VlmMediaInstanceStatusInit',
1542: 'VlmMediaInstanceStatusOpening',
1543: 'VlmMediaInstanceStatusPlaying',
1544: 'VlmMediaInstanceStatusPause',
1545: 'VlmMediaInstanceStatusEnd',
1546: 'VlmMediaInstanceStatusError',
}
EventType.MediaDiscovererEnded = EventType(1281)
EventType.MediaDiscovererStarted = EventType(0x500)
EventType.MediaDurationChanged = EventType(2)
EventType.MediaFreed = EventType(4)
EventType.MediaListItemAdded = EventType(0x200)
EventType.MediaListItemDeleted = EventType(514)
EventType.MediaListPlayerNextItemSet = EventType(1025)
EventType.MediaListPlayerPlayed = EventType(0x400)
EventType.MediaListPlayerStopped = EventType(1026)
EventType.MediaListViewItemAdded = EventType(0x300)
EventType.MediaListViewItemDeleted = EventType(770)
EventType.MediaListViewWillAddItem = EventType(769)
EventType.MediaListViewWillDeleteItem = EventType(771)
EventType.MediaListWillAddItem = EventType(513)
EventType.MediaListWillDeleteItem = EventType(515)
EventType.MediaMetaChanged = EventType(0)
EventType.MediaParsedChanged = EventType(3)
EventType.MediaPlayerBackward = EventType(264)
EventType.MediaPlayerBuffering = EventType(259)
EventType.MediaPlayerEncounteredError = EventType(266)
EventType.MediaPlayerEndReached = EventType(265)
EventType.MediaPlayerForward = EventType(263)
EventType.MediaPlayerLengthChanged = EventType(273)
EventType.MediaPlayerMediaChanged = EventType(0x100)
EventType.MediaPlayerNothingSpecial = EventType(257)
EventType.MediaPlayerOpening = EventType(258)
EventType.MediaPlayerPausableChanged = EventType(270)
EventType.MediaPlayerPaused = EventType(261)
EventType.MediaPlayerPlaying = EventType(260)
EventType.MediaPlayerPositionChanged = EventType(268)
EventType.MediaPlayerSeekableChanged = EventType(269)
EventType.MediaPlayerSnapshotTaken = EventType(272)
EventType.MediaPlayerStopped = EventType(262)
EventType.MediaPlayerTimeChanged = EventType(267)
EventType.MediaPlayerTitleChanged = EventType(271)
EventType.MediaPlayerVout = EventType(274)
EventType.MediaStateChanged = EventType(5)
EventType.MediaSubItemAdded = EventType(1)
EventType.VlmMediaAdded = EventType(0x600)
EventType.VlmMediaChanged = EventType(1538)
EventType.VlmMediaInstanceStarted = EventType(1539)
EventType.VlmMediaInstanceStatusEnd = EventType(1545)
EventType.VlmMediaInstanceStatusError = EventType(1546)
EventType.VlmMediaInstanceStatusInit = EventType(1541)
EventType.VlmMediaInstanceStatusOpening = EventType(1542)
EventType.VlmMediaInstanceStatusPause = EventType(1544)
EventType.VlmMediaInstanceStatusPlaying = EventType(1543)
EventType.VlmMediaInstanceStopped = EventType(1540)
EventType.VlmMediaRemoved = EventType(1537)
class Meta(_Enum):
'''Meta data types.
'''
_enum_names_ = {
0: 'Title',
1: 'Artist',
2: 'Genre',
3: 'Copyright',
4: 'Album',
5: 'TrackNumber',
6: 'Description',
7: 'Rating',
8: 'Date',
9: 'Setting',
10: 'URL',
11: 'Language',
12: 'NowPlaying',
13: 'Publisher',
14: 'EncodedBy',
15: 'ArtworkURL',
16: 'TrackID',
}
Meta.Album = Meta(4)
Meta.Artist = Meta(1)
Meta.ArtworkURL = Meta(15)
Meta.Copyright = Meta(3)
Meta.Date = Meta(8)
Meta.Description = Meta(6)
Meta.EncodedBy = Meta(14)
Meta.Genre = Meta(2)
Meta.Language = Meta(11)
Meta.NowPlaying = Meta(12)
Meta.Publisher = Meta(13)
Meta.Rating = Meta(7)
Meta.Setting = Meta(9)
Meta.Title = Meta(0)
Meta.TrackID = Meta(16)
Meta.TrackNumber = Meta(5)
Meta.URL = Meta(10)
class State(_Enum):
'''Note the order of libvlc_state_t enum must match exactly the order of
See mediacontrol_playerstatus, See input_state_e enums,
and videolan.libvlc.state (at bindings/cil/src/media.cs).
expected states by web plugins are:
idle/close=0, opening=1, buffering=2, playing=3, paused=4,
stopping=5, ended=6, error=7.
'''
_enum_names_ = {
0: 'NothingSpecial',
1: 'Opening',
2: 'Buffering',
3: 'Playing',
4: 'Paused',
5: 'Stopped',
6: 'Ended',
7: 'Error',
}
State.Buffering = State(2)
State.Ended = State(6)
State.Error = State(7)
State.NothingSpecial = State(0)
State.Opening = State(1)
State.Paused = State(4)
State.Playing = State(3)
State.Stopped = State(5)
class TrackType(_Enum):
'''N/A
'''
_enum_names_ = {
-1: 'unknown',
0: 'audio',
1: 'video',
2: 'text',
}
TrackType.audio = TrackType(0)
TrackType.text = TrackType(2)
TrackType.unknown = TrackType(-1)
TrackType.video = TrackType(1)
class PlaybackMode(_Enum):
'''Defines playback modes for playlist.
'''
_enum_names_ = {
0: 'default',
1: 'loop',
2: 'repeat',
}
PlaybackMode.default = PlaybackMode(0)
PlaybackMode.loop = PlaybackMode(1)
PlaybackMode.repeat = PlaybackMode(2)
class VideoMarqueeOption(_Enum):
'''Marq options definition.
'''
_enum_names_ = {
0: 'Enable',
1: 'Text',
2: 'Color',
3: 'Opacity',
4: 'Position',
5: 'Refresh',
6: 'Size',
7: 'Timeout',
8: 'marquee_X',
9: 'marquee_Y',
}
VideoMarqueeOption.Color = VideoMarqueeOption(2)
VideoMarqueeOption.Enable = VideoMarqueeOption(0)
VideoMarqueeOption.Opacity = VideoMarqueeOption(3)
VideoMarqueeOption.Position = VideoMarqueeOption(4)
VideoMarqueeOption.Refresh = VideoMarqueeOption(5)
VideoMarqueeOption.Size = VideoMarqueeOption(6)
VideoMarqueeOption.Text = VideoMarqueeOption(1)
VideoMarqueeOption.Timeout = VideoMarqueeOption(7)
VideoMarqueeOption.marquee_X = VideoMarqueeOption(8)
VideoMarqueeOption.marquee_Y = VideoMarqueeOption(9)
class NavigateMode(_Enum):
'''Navigation mode.
'''
_enum_names_ = {
0: 'activate',
1: 'up',
2: 'down',
3: 'left',
4: 'right',
}
NavigateMode.activate = NavigateMode(0)
NavigateMode.down = NavigateMode(2)
NavigateMode.left = NavigateMode(3)
NavigateMode.right = NavigateMode(4)
NavigateMode.up = NavigateMode(1)
class VideoLogoOption(_Enum):
'''Option values for libvlc_video_{get,set}_logo_{int,string}.
'''
_enum_names_ = {
0: 'enable',
1: 'file',
2: 'logo_x',
3: 'logo_y',
4: 'delay',
5: 'repeat',
6: 'opacity',
7: 'position',
}
VideoLogoOption.delay = VideoLogoOption(4)
VideoLogoOption.enable = VideoLogoOption(0)
VideoLogoOption.file = VideoLogoOption(1)
VideoLogoOption.logo_x = VideoLogoOption(2)
VideoLogoOption.logo_y = VideoLogoOption(3)
VideoLogoOption.opacity = VideoLogoOption(6)
VideoLogoOption.position = VideoLogoOption(7)
VideoLogoOption.repeat = VideoLogoOption(5)
class VideoAdjustOption(_Enum):
'''Option values for libvlc_video_{get,set}_adjust_{int,float,bool}.
'''
_enum_names_ = {
0: 'Enable',
1: 'Contrast',
2: 'Brightness',
3: 'Hue',
4: 'Saturation',
5: 'Gamma',
}
VideoAdjustOption.Brightness = VideoAdjustOption(2)
VideoAdjustOption.Contrast = VideoAdjustOption(1)
VideoAdjustOption.Enable = VideoAdjustOption(0)
VideoAdjustOption.Gamma = VideoAdjustOption(5)
VideoAdjustOption.Hue = VideoAdjustOption(3)
VideoAdjustOption.Saturation = VideoAdjustOption(4)
class AudioOutputDeviceTypes(_Enum):
'''Audio device types.
'''
_enum_names_ = {
-1: 'Error',
1: 'Mono',
2: 'Stereo',
4: '_2F2R',
5: '_3F2R',
6: '_5_1',
7: '_6_1',
8: '_7_1',
10: 'SPDIF',
}
AudioOutputDeviceTypes.Error = AudioOutputDeviceTypes(-1)
AudioOutputDeviceTypes.Mono = AudioOutputDeviceTypes(1)
AudioOutputDeviceTypes.SPDIF = AudioOutputDeviceTypes(10)
AudioOutputDeviceTypes.Stereo = AudioOutputDeviceTypes(2)
AudioOutputDeviceTypes._2F2R = AudioOutputDeviceTypes(4)
AudioOutputDeviceTypes._3F2R = AudioOutputDeviceTypes(5)
AudioOutputDeviceTypes._5_1 = AudioOutputDeviceTypes(6)
AudioOutputDeviceTypes._6_1 = AudioOutputDeviceTypes(7)
AudioOutputDeviceTypes._7_1 = AudioOutputDeviceTypes(8)
class AudioOutputChannel(_Enum):
'''Audio channels.
'''
_enum_names_ = {
-1: 'Error',
1: 'Stereo',
2: 'RStereo',
3: 'Left',
4: 'Right',
5: 'Dolbys',
}
AudioOutputChannel.Dolbys = AudioOutputChannel(5)
AudioOutputChannel.Error = AudioOutputChannel(-1)
AudioOutputChannel.Left = AudioOutputChannel(3)
AudioOutputChannel.RStereo = AudioOutputChannel(2)
AudioOutputChannel.Right = AudioOutputChannel(4)
AudioOutputChannel.Stereo = AudioOutputChannel(1)
class Callback(ctypes.c_void_p):
"""Callback function notification
\param p_event the event triggering the callback
"""
pass
class LogCb(ctypes.c_void_p):
"""Callback prototype for LibVLC log message handler.
\param data data pointer as given to L{libvlc_log_set}()
\param level message level (@ref enum libvlc_log_level)
\param ctx message context (meta-informations about the message)
\param fmt printf() format string (as defined by ISO C11)
\param args variable argument list for the format
\note Log message handlers <b>must</b> be thread-safe.
\warning The message context pointer, the format string parameters and the
variable arguments are only valid until the callback returns.
"""
pass
class VideoLockCb(ctypes.c_void_p):
"""Callback prototype to allocate and lock a picture buffer.
Whenever a new video frame needs to be decoded, the lock callback is
invoked. Depending on the video chroma, one or three pixel planes of
adequate dimensions must be returned via the second parameter. Those
planes must be aligned on 32-bytes boundaries.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param planes start address of the pixel planes (LibVLC allocates the array
of void pointers, this callback must initialize the array) [OUT]
\return a private pointer for the display and unlock callbacks to identify
the picture buffers
"""
pass
class VideoUnlockCb(ctypes.c_void_p):
"""Callback prototype to unlock a picture buffer.
When the video frame decoding is complete, the unlock callback is invoked.
This callback might not be needed at all. It is only an indication that the
application can now read the pixel values if it needs to.
\warning A picture buffer is unlocked after the picture is decoded,
but before the picture is displayed.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param picture private pointer returned from the @ref libvlc_video_lock_cb
callback [IN]
\param planes pixel planes as defined by the @ref libvlc_video_lock_cb
callback (this parameter is only for convenience) [IN]
"""
pass
class VideoDisplayCb(ctypes.c_void_p):
"""Callback prototype to display a picture.
When the video frame needs to be shown, as determined by the media playback
clock, the display callback is invoked.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param picture private pointer returned from the @ref libvlc_video_lock_cb
callback [IN]
"""
pass
class VideoFormatCb(ctypes.c_void_p):
"""Callback prototype to configure picture buffers format.
This callback gets the format of the video as output by the video decoder
and the chain of video filters (if any). It can opt to change any parameter
as it needs. In that case, LibVLC will attempt to convert the video format
(rescaling and chroma conversion) but these operations can be CPU intensive.
\param opaque pointer to the private pointer passed to
L{libvlc_video_set_callbacks}() [IN/OUT]
\param chroma pointer to the 4 bytes video format identifier [IN/OUT]
\param width pointer to the pixel width [IN/OUT]
\param height pointer to the pixel height [IN/OUT]
\param pitches table of scanline pitches in bytes for each pixel plane
(the table is allocated by LibVLC) [OUT]
\param lines table of scanlines count for each plane [OUT]
\return the number of picture buffers allocated, 0 indicates failure
\note
For each pixels plane, the scanline pitch must be bigger than or equal to
the number of bytes per pixel multiplied by the pixel width.
Similarly, the number of scanlines must be bigger than of equal to
the pixel height.
Furthermore, we recommend that pitches and lines be multiple of 32
to not break assumption that might be made by various optimizations
in the video decoders, video filters and/or video converters.
"""
pass
class VideoCleanupCb(ctypes.c_void_p):
"""Callback prototype to configure picture buffers format.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}()
(and possibly modified by @ref libvlc_video_format_cb) [IN]
"""
pass
class AudioPlayCb(ctypes.c_void_p):
"""Callback prototype for audio playback.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param samples pointer to the first audio sample to play back [IN]
\param count number of audio samples to play back
\param pts expected play time stamp (see libvlc_delay())
"""
pass
class AudioPauseCb(ctypes.c_void_p):
"""Callback prototype for audio pause.
\note The pause callback is never called if the audio is already paused.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param pts time stamp of the pause request (should be elapsed already)
"""
pass
class AudioResumeCb(ctypes.c_void_p):
"""Callback prototype for audio resumption (i.e. restart from pause).
\note The resume callback is never called if the audio is not paused.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param pts time stamp of the resumption request (should be elapsed already)
"""
pass
class AudioFlushCb(ctypes.c_void_p):
"""Callback prototype for audio buffer flush
(i.e. discard all pending buffers and stop playback as soon as possible).
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
"""
pass
class AudioDrainCb(ctypes.c_void_p):
"""Callback prototype for audio buffer drain
(i.e. wait for pending buffers to be played).
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
"""
pass
class AudioSetVolumeCb(ctypes.c_void_p):
"""Callback prototype for audio volume change.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param volume software volume (1. = nominal, 0. = mute)
\param mute muted flag
"""
pass
class AudioSetupCb(ctypes.c_void_p):
"""Callback prototype to setup the audio playback.
This is called when the media player needs to create a new audio output.
\param opaque pointer to the data pointer passed to
L{libvlc_audio_set_callbacks}() [IN/OUT]
\param format 4 bytes sample format [IN/OUT]
\param rate sample rate [IN/OUT]
\param channels channels count [IN/OUT]
\return 0 on success, anything else to skip audio playback
"""
pass
class AudioCleanupCb(ctypes.c_void_p):
"""Callback prototype for audio playback cleanup.
This is called when the media player no longer needs an audio output.
\param opaque data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
"""
pass
class CallbackDecorators(object):
"Class holding various method decorators for callback functions."
Callback = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
Callback.__doc__ = '''Callback function notification
\param p_event the event triggering the callback
'''
LogCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, Log_ptr, ctypes.c_char_p, ctypes.c_void_p)
LogCb.__doc__ = '''Callback prototype for LibVLC log message handler.
\param data data pointer as given to L{libvlc_log_set}()
\param level message level (@ref enum libvlc_log_level)
\param ctx message context (meta-informations about the message)
\param fmt printf() format string (as defined by ISO C11)
\param args variable argument list for the format
\note Log message handlers <b>must</b> be thread-safe.
\warning The message context pointer, the format string parameters and the
variable arguments are only valid until the callback returns.
'''
VideoLockCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ListPOINTER(ctypes.c_void_p))
VideoLockCb.__doc__ = '''Callback prototype to allocate and lock a picture buffer.
Whenever a new video frame needs to be decoded, the lock callback is
invoked. Depending on the video chroma, one or three pixel planes of
adequate dimensions must be returned via the second parameter. Those
planes must be aligned on 32-bytes boundaries.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param planes start address of the pixel planes (LibVLC allocates the array
of void pointers, this callback must initialize the array) [OUT]
\return a private pointer for the display and unlock callbacks to identify
the picture buffers
'''
VideoUnlockCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ListPOINTER(ctypes.c_void_p))
VideoUnlockCb.__doc__ = '''Callback prototype to unlock a picture buffer.
When the video frame decoding is complete, the unlock callback is invoked.
This callback might not be needed at all. It is only an indication that the
application can now read the pixel values if it needs to.
\warning A picture buffer is unlocked after the picture is decoded,
but before the picture is displayed.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param picture private pointer returned from the @ref libvlc_video_lock_cb
callback [IN]
\param planes pixel planes as defined by the @ref libvlc_video_lock_cb
callback (this parameter is only for convenience) [IN]
'''
VideoDisplayCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
VideoDisplayCb.__doc__ = '''Callback prototype to display a picture.
When the video frame needs to be shown, as determined by the media playback
clock, the display callback is invoked.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param picture private pointer returned from the @ref libvlc_video_lock_cb
callback [IN]
'''
VideoFormatCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_uint), ListPOINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
VideoFormatCb.__doc__ = '''Callback prototype to configure picture buffers format.
This callback gets the format of the video as output by the video decoder
and the chain of video filters (if any). It can opt to change any parameter
as it needs. In that case, LibVLC will attempt to convert the video format
(rescaling and chroma conversion) but these operations can be CPU intensive.
\param opaque pointer to the private pointer passed to
L{libvlc_video_set_callbacks}() [IN/OUT]
\param chroma pointer to the 4 bytes video format identifier [IN/OUT]
\param width pointer to the pixel width [IN/OUT]
\param height pointer to the pixel height [IN/OUT]
\param pitches table of scanline pitches in bytes for each pixel plane
(the table is allocated by LibVLC) [OUT]
\param lines table of scanlines count for each plane [OUT]
\return the number of picture buffers allocated, 0 indicates failure
\note
For each pixels plane, the scanline pitch must be bigger than or equal to
the number of bytes per pixel multiplied by the pixel width.
Similarly, the number of scanlines must be bigger than of equal to
the pixel height.
Furthermore, we recommend that pitches and lines be multiple of 32
to not break assumption that might be made by various optimizations
in the video decoders, video filters and/or video converters.
'''
VideoCleanupCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
VideoCleanupCb.__doc__ = '''Callback prototype to configure picture buffers format.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}()
(and possibly modified by @ref libvlc_video_format_cb) [IN]
'''
AudioPlayCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint, ctypes.c_int64)
AudioPlayCb.__doc__ = '''Callback prototype for audio playback.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param samples pointer to the first audio sample to play back [IN]
\param count number of audio samples to play back
\param pts expected play time stamp (see libvlc_delay())
'''
AudioPauseCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64)
AudioPauseCb.__doc__ = '''Callback prototype for audio pause.
\note The pause callback is never called if the audio is already paused.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param pts time stamp of the pause request (should be elapsed already)
'''
AudioResumeCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64)
AudioResumeCb.__doc__ = '''Callback prototype for audio resumption (i.e. restart from pause).
\note The resume callback is never called if the audio is not paused.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param pts time stamp of the resumption request (should be elapsed already)
'''
AudioFlushCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64)
AudioFlushCb.__doc__ = '''Callback prototype for audio buffer flush
(i.e. discard all pending buffers and stop playback as soon as possible).
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
'''
AudioDrainCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
AudioDrainCb.__doc__ = '''Callback prototype for audio buffer drain
(i.e. wait for pending buffers to be played).
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
'''
AudioSetVolumeCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_float, ctypes.c_bool)
AudioSetVolumeCb.__doc__ = '''Callback prototype for audio volume change.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param volume software volume (1. = nominal, 0. = mute)
\param mute muted flag
'''
AudioSetupCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_int), ListPOINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
AudioSetupCb.__doc__ = '''Callback prototype to setup the audio playback.
This is called when the media player needs to create a new audio output.
\param opaque pointer to the data pointer passed to
L{libvlc_audio_set_callbacks}() [IN/OUT]
\param format 4 bytes sample format [IN/OUT]
\param rate sample rate [IN/OUT]
\param channels channels count [IN/OUT]
\return 0 on success, anything else to skip audio playback
'''
AudioCleanupCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
AudioCleanupCb.__doc__ = '''Callback prototype for audio playback cleanup.
This is called when the media player no longer needs an audio output.
\param opaque data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
'''
cb = CallbackDecorators
# End of generated enum types #
# From libvlc_structures.h
class AudioOutput(_Cstruct):
def __str__(self):
return '%s(%s:%s)' % (self.__class__.__name__, self.name, self.description)
AudioOutput._fields_ = [ # recursive struct
('name', ctypes.c_char_p),
('description', ctypes.c_char_p),
('next', ctypes.POINTER(AudioOutput)),
]
class LogMessage(_Cstruct):
_fields_ = [
('size', ctypes.c_uint ),
('severity', ctypes.c_int ),
('type', ctypes.c_char_p),
('name', ctypes.c_char_p),
('header', ctypes.c_char_p),
('message', ctypes.c_char_p),
]
def __init__(self):
super(LogMessage, self).__init__()
self.size = ctypes.sizeof(self)
def __str__(self):
return '%s(%d:%s): %s' % (self.__class__.__name__, self.severity, self.type, self.message)
class MediaEvent(_Cstruct):
_fields_ = [
('media_name', ctypes.c_char_p),
('instance_name', ctypes.c_char_p),
]
class MediaStats(_Cstruct):
_fields_ = [
('read_bytes', ctypes.c_int ),
('input_bitrate', ctypes.c_float),
('demux_read_bytes', ctypes.c_int ),
('demux_bitrate', ctypes.c_float),
('demux_corrupted', ctypes.c_int ),
('demux_discontinuity', ctypes.c_int ),
('decoded_video', ctypes.c_int ),
('decoded_audio', ctypes.c_int ),
('displayed_pictures', ctypes.c_int ),
('lost_pictures', ctypes.c_int ),
('played_abuffers', ctypes.c_int ),
('lost_abuffers', ctypes.c_int ),
('sent_packets', ctypes.c_int ),
('sent_bytes', ctypes.c_int ),
('send_bitrate', ctypes.c_float),
]
class MediaTrackInfo(_Cstruct):
_fields_ = [
('codec', ctypes.c_uint32),
('id', ctypes.c_int ),
('type', TrackType ),
('profile', ctypes.c_int ),
('level', ctypes.c_int ),
('channels_or_height', ctypes.c_uint ),
('rate_or_width', ctypes.c_uint ),
]
class AudioTrack(_Cstruct):
_fields_ = [
('channels', ctypes.c_uint),
('rate', ctypes.c_uint),
]
class VideoTrack(_Cstruct):
_fields_ = [
('height', ctypes.c_uint),
('width', ctypes.c_uint),
('sar_num', ctypes.c_uint),
('sar_den', ctypes.c_uint),
('frame_rate_num', ctypes.c_uint),
('frame_rate_den', ctypes.c_uint),
]
class SubtitleTrack(_Cstruct):
_fields_ = [
('encoding', ctypes.c_char_p),
]
class MediaTrackTracks(ctypes.Union):
_fields_ = [
('audio', ctypes.POINTER(AudioTrack)),
('video', ctypes.POINTER(VideoTrack)),
('subtitle', ctypes.POINTER(SubtitleTrack)),
]
class MediaTrack(_Cstruct):
_anonymous_ = ("u",)
_fields_ = [
('codec', ctypes.c_uint32),
('original_fourcc', ctypes.c_uint32),
('id', ctypes.c_int ),
('type', TrackType ),
('profile', ctypes.c_int ),
('level', ctypes.c_int ),
('u', MediaTrackTracks),
('bitrate', ctypes.c_uint),
('language', ctypes.c_char_p),
('description', ctypes.c_char_p),
]
class PlaylistItem(_Cstruct):
_fields_ = [
('id', ctypes.c_int ),
('uri', ctypes.c_char_p),
('name', ctypes.c_char_p),
]
def __str__(self):
return '%s #%d %s (uri %s)' % (self.__class__.__name__, self.id, self.name, self.uri)
class Position(object):
"""Enum-like, immutable window position constants.
See e.g. VideoMarqueeOption.Position.
"""
Center = 0
Left = 1
CenterLeft = 1
Right = 2
CenterRight = 2
Top = 4
TopCenter = 4
TopLeft = 5
TopRight = 6
Bottom = 8
BottomCenter = 8
BottomLeft = 9
BottomRight = 10
def __init__(self, *unused):
raise TypeError('constants only')
def __setattr__(self, *unused): #PYCHOK expected
raise TypeError('immutable constants')
class Rectangle(_Cstruct):
_fields_ = [
('top', ctypes.c_int),
('left', ctypes.c_int),
('bottom', ctypes.c_int),
('right', ctypes.c_int),
]
class TrackDescription(_Cstruct):
def __str__(self):
return '%s(%d:%s)' % (self.__class__.__name__, self.id, self.name)
TrackDescription._fields_ = [ # recursive struct
('id', ctypes.c_int ),
('name', ctypes.c_char_p),
('next', ctypes.POINTER(TrackDescription)),
]
def track_description_list(head):
"""Convert a TrackDescription linked list to a Python list (and release the former).
"""
r = []
if head:
item = head
while item:
item = item.contents
r.append((item.id, item.name))
item = item.next
try:
libvlc_track_description_release(head)
except NameError:
libvlc_track_description_list_release(head)
return r
class EventUnion(ctypes.Union):
_fields_ = [
('meta_type', ctypes.c_uint ),
('new_child', ctypes.c_uint ),
('new_duration', ctypes.c_longlong),
('new_status', ctypes.c_int ),
('media', ctypes.c_void_p ),
('new_state', ctypes.c_uint ),
# Media instance
('new_position', ctypes.c_float ),
('new_time', ctypes.c_longlong),
('new_title', ctypes.c_int ),
('new_seekable', ctypes.c_longlong),
('new_pausable', ctypes.c_longlong),
# FIXME: Skipped MediaList and MediaListView...
('filename', ctypes.c_char_p ),
('new_length', ctypes.c_longlong),
('media_event', MediaEvent ),
]
class Event(_Cstruct):
_fields_ = [
('type', EventType ),
('object', ctypes.c_void_p),
('u', EventUnion ),
]
class ModuleDescription(_Cstruct):
def __str__(self):
return '%s %s (%s)' % (self.__class__.__name__, self.shortname, self.name)
ModuleDescription._fields_ = [ # recursive struct
('name', ctypes.c_char_p),
('shortname', ctypes.c_char_p),
('longname', ctypes.c_char_p),
('help', ctypes.c_char_p),
('next', ctypes.POINTER(ModuleDescription)),
]
def module_description_list(head):
"""Convert a ModuleDescription linked list to a Python list (and release the former).
"""
r = []
if head:
item = head
while item:
item = item.contents
r.append((item.name, item.shortname, item.longname, item.help))
item = item.next
libvlc_module_description_list_release(head)
return r
class AudioOutputDevice(_Cstruct):
def __str__(self):
return '%s(%d:%s)' % (self.__class__.__name__, self.id, self.name)
AudioOutputDevice._fields_ = [ # recursive struct
('next', ctypes.POINTER(AudioOutputDevice)),
('device', ctypes.c_char_p ),
('description', ctypes.c_char_p),
]
# End of header.py #
class EventManager(_Ctype):
'''Create an event manager with callback handler.
This class interposes the registration and handling of
event notifications in order to (a) remove the need for
decorating each callback functions with the decorator
'@callbackmethod', (b) allow any number of positional
and/or keyword arguments to the callback (in addition
to the Event instance) and (c) to preserve the Python
objects such that the callback and argument objects
remain alive (i.e. are not garbage collected) until
B{after} the notification has been unregistered.
@note: Only a single notification can be registered
for each event type in an EventManager instance.
'''
_callback_handler = None
_callbacks = {}
def __new__(cls, ptr=_internal_guard):
if ptr == _internal_guard:
raise VLCException("(INTERNAL) ctypes class.\nYou should get a reference to EventManager through the MediaPlayer.event_manager() method.")
return _Constructor(cls, ptr)
def event_attach(self, eventtype, callback, *args, **kwds):
"""Register an event notification.
@param eventtype: the desired event type to be notified about.
@param callback: the function to call when the event occurs.
@param args: optional positional arguments for the callback.
@param kwds: optional keyword arguments for the callback.
@return: 0 on success, ENOMEM on error.
@note: The callback function must have at least one argument,
an Event instance. Any other, optional positional and keyword
arguments are in B{addition} to the first one.
"""
if not isinstance(eventtype, EventType):
raise VLCException("%s required: %r" % ('EventType', eventtype))
if not hasattr(callback, '__call__'): # callable()
raise VLCException("%s required: %r" % ('callable', callback))
# check that the callback expects arguments
if not any(getargspec(callback)[:2]): # list(...)
raise VLCException("%s required: %r" % ('argument', callback))
if self._callback_handler is None:
_called_from_ctypes = ctypes.CFUNCTYPE(None, ctypes.POINTER(Event), ctypes.c_void_p)
@_called_from_ctypes
def _callback_handler(event, k):
"""(INTERNAL) handle callback call from ctypes.
@note: We cannot simply make this an EventManager
method since ctypes does not prepend self as the
first parameter, hence this closure.
"""
try: # retrieve Python callback and arguments
call, args, kwds = self._callbacks[k]
# deref event.contents to simplify callback code
call(event.contents, *args, **kwds)
except KeyError: # detached?
pass
self._callback_handler = _callback_handler
self._callbacks = {}
k = eventtype.value
r = libvlc_event_attach(self, k, self._callback_handler, k)
if not r:
self._callbacks[k] = (callback, args, kwds)
return r
def event_detach(self, eventtype):
"""Unregister an event notification.
@param eventtype: the event type notification to be removed.
"""
if not isinstance(eventtype, EventType):
raise VLCException("%s required: %r" % ('EventType', eventtype))
k = eventtype.value
if k in self._callbacks:
del self._callbacks[k] # remove, regardless of libvlc return value
libvlc_event_detach(self, k, self._callback_handler, k)
class Instance(_Ctype):
'''Create a new Instance instance.
It may take as parameter either:
- a string
- a list of strings as first parameters
- the parameters given as the constructor parameters (must be strings)
'''
def __new__(cls, *args):
if len(args) == 1:
# Only 1 arg. It is either a C pointer, or an arg string,
# or a tuple.
i = args[0]
if isinstance(i, _Ints):
return _Constructor(cls, i)
elif isinstance(i, basestring):
args = i.strip().split()
elif isinstance(i, _Seqs):
args = i
else:
raise VLCException('Instance %r' % (args,))
if not args and plugin_path is not None:
# no parameters passed, for win32 and MacOS,
# specify the plugin_path if detected earlier
args = ['vlc', '--plugin-path=' + plugin_path]
if PYTHON3:
args = [ str_to_bytes(a) for a in args ]
return libvlc_new(len(args), args)
def media_player_new(self, uri=None):
"""Create a new MediaPlayer instance.
@param uri: an optional URI to play in the player.
"""
p = libvlc_media_player_new(self)
if uri:
p.set_media(self.media_new(uri))
p._instance = self
return p
def media_list_player_new(self):
"""Create a new MediaListPlayer instance.
"""
p = libvlc_media_list_player_new(self)
p._instance = self
return p
def media_new(self, mrl, *options):
"""Create a new Media instance.
If mrl contains a colon (:) preceded by more than 1 letter, it
will be treated as a URL. Else, it will be considered as a
local path. If you need more control, directly use
media_new_location/media_new_path methods.
Options can be specified as supplementary string parameters, e.g.
C{m = i.media_new('foo.avi', 'sub-filter=marq{marquee=Hello}', 'vout-filter=invert')}
Alternatively, the options can be added to the media using the Media.add_options method:
C{m.add_options('foo.avi', 'sub-filter=marq@test{marquee=Hello}', 'video-filter=invert')}
@param options: optional media option=value strings
"""
if ':' in mrl and mrl.index(':') > 1:
# Assume it is a URL
m = libvlc_media_new_location(self, str_to_bytes(mrl))
else:
# Else it should be a local path.
m = libvlc_media_new_path(self, str_to_bytes(os.path.normpath(mrl)))
for o in options:
libvlc_media_add_option(m, str_to_bytes(o))
m._instance = self
return m
def media_list_new(self, mrls=None):
"""Create a new MediaList instance.
@param mrls: optional list of MRL strings
"""
l = libvlc_media_list_new(self)
# We should take the lock, but since we did not leak the
# reference, nobody else can access it.
if mrls:
for m in mrls:
l.add_media(m)
l._instance = self
return l
def audio_output_enumerate_devices(self):
"""Enumerate the defined audio output devices.
@return: list of dicts {name:, description:, devices:}
"""
r = []
head = libvlc_audio_output_list_get(self)
if head:
i = head
while i:
i = i.contents
d = [{'id': libvlc_audio_output_device_id (self, i.name, d),
'longname': libvlc_audio_output_device_longname(self, i.name, d)}
for d in range(libvlc_audio_output_device_count (self, i.name))]
r.append({'name': i.name, 'description': i.description, 'devices': d})
i = i.next
libvlc_audio_output_list_release(head)
return r
def audio_filter_list_get(self):
"""Returns a list of available audio filters.
"""
return module_description_list(libvlc_audio_filter_list_get(self))
def video_filter_list_get(self):
"""Returns a list of available video filters.
"""
return module_description_list(libvlc_video_filter_list_get(self))
def release(self):
'''Decrement the reference count of a libvlc instance, and destroy it
if it reaches zero.
'''
return libvlc_release(self)
def retain(self):
'''Increments the reference count of a libvlc instance.
The initial reference count is 1 after L{new}() returns.
'''
return libvlc_retain(self)
def add_intf(self, name):
'''Try to start a user interface for the libvlc instance.
@param name: interface name, or NULL for default.
@return: 0 on success, -1 on error.
'''
return libvlc_add_intf(self, str_to_bytes(name))
def set_user_agent(self, name, http):
'''Sets the application name. LibVLC passes this as the user agent string
when a protocol requires it.
@param name: human-readable application name, e.g. "FooBar player 1.2.3".
@param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0".
@version: LibVLC 1.1.1 or later.
'''
return libvlc_set_user_agent(self, str_to_bytes(name), str_to_bytes(http))
def log_unset(self):
'''Unsets the logging callback for a LibVLC instance. This is rarely needed:
the callback is implicitly unset when the instance is destroyed.
This function will wait for any pending callbacks invocation to complete
(causing a deadlock if called from within the callback).
@version: LibVLC 2.1.0 or later.
'''
return libvlc_log_unset(self)
def log_set(self, data, p_instance):
'''Sets the logging callback for a LibVLC instance.
This function is thread-safe: it will wait for any pending callbacks
invocation to complete.
@param data: opaque data pointer for the callback function @note Some log messages (especially debug) are emitted by LibVLC while is being initialized. These messages cannot be captured with this interface. @warning A deadlock may occur if this function is called from the callback.
@param p_instance: libvlc instance.
@version: LibVLC 2.1.0 or later.
'''
return libvlc_log_set(self, data, p_instance)
def log_set_file(self, stream):
'''Sets up logging to a file.
@param stream: FILE pointer opened for writing (the FILE pointer must remain valid until L{log_unset}()).
@version: LibVLC 2.1.0 or later.
'''
return libvlc_log_set_file(self, stream)
def media_new_location(self, psz_mrl):
'''Create a media with a certain given media resource location,
for instance a valid URL.
@note: To refer to a local file with this function,
the file://... URI syntax B{must} be used (see IETF RFC3986).
We recommend using L{media_new_path}() instead when dealing with
local files.
See L{media_release}.
@param psz_mrl: the media location.
@return: the newly created media or NULL on error.
'''
return libvlc_media_new_location(self, str_to_bytes(psz_mrl))
def media_new_path(self, path):
'''Create a media for a certain file path.
See L{media_release}.
@param path: local filesystem path.
@return: the newly created media or NULL on error.
'''
return libvlc_media_new_path(self, str_to_bytes(path))
def media_new_fd(self, fd):
'''Create a media for an already open file descriptor.
The file descriptor shall be open for reading (or reading and writing).
Regular file descriptors, pipe read descriptors and character device
descriptors (including TTYs) are supported on all platforms.
Block device descriptors are supported where available.
Directory descriptors are supported on systems that provide fdopendir().
Sockets are supported on all platforms where they are file descriptors,
i.e. all except Windows.
@note: This library will B{not} automatically close the file descriptor
under any circumstance. Nevertheless, a file descriptor can usually only be
rendered once in a media player. To render it a second time, the file
descriptor should probably be rewound to the beginning with lseek().
See L{media_release}.
@param fd: open file descriptor.
@return: the newly created media or NULL on error.
@version: LibVLC 1.1.5 and later.
'''
return libvlc_media_new_fd(self, fd)
def media_new_as_node(self, psz_name):
'''Create a media as an empty node with a given name.
See L{media_release}.
@param psz_name: the name of the node.
@return: the new empty media or NULL on error.
'''
return libvlc_media_new_as_node(self, str_to_bytes(psz_name))
def media_discoverer_new_from_name(self, psz_name):
'''Discover media service by name.
@param psz_name: service name.
@return: media discover object or NULL in case of error.
'''
return libvlc_media_discoverer_new_from_name(self, str_to_bytes(psz_name))
def media_library_new(self):
'''Create an new Media Library object.
@return: a new object or NULL on error.
'''
return libvlc_media_library_new(self)
def audio_output_list_get(self):
'''Gets the list of available audio outputs.
@return: list of available audio outputs. It must be freed it with In case of error, NULL is returned.
'''
return libvlc_audio_output_list_get(self)
def audio_output_device_list_get(self, aout):
'''Gets a list of audio output devices for a given audio output.
See L{audio_output_device_set}().
@note: Not all audio outputs support this. In particular, an empty (NULL)
list of devices does B{not} imply that the specified audio output does
not work.
@note: The list might not be exhaustive.
@warning: Some audio output devices in the list might not actually work in
some circumstances. By default, it is recommended to not specify any
explicit audio device.
@param psz_aout: audio output name (as returned by L{audio_output_list_get}()).
@return: A NULL-terminated linked list of potential audio output devices. It must be freed it with L{audio_output_device_list_release}().
@version: LibVLC 2.1.0 or later.
'''
return libvlc_audio_output_device_list_get(self, str_to_bytes(aout))
def vlm_release(self):
'''Release the vlm instance related to the given L{Instance}.
'''
return libvlc_vlm_release(self)
def vlm_add_broadcast(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Add a broadcast, with one input.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_broadcast(self, str_to_bytes(psz_name), str_to_bytes(psz_input), str_to_bytes(psz_output), i_options, ppsz_options, b_enabled, b_loop)
def vlm_add_vod(self, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux):
'''Add a vod, with one input.
@param psz_name: the name of the new vod media.
@param psz_input: the input MRL.
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new vod.
@param psz_mux: the muxer of the vod media.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_vod(self, str_to_bytes(psz_name), str_to_bytes(psz_input), i_options, ppsz_options, b_enabled, str_to_bytes(psz_mux))
def vlm_del_media(self, psz_name):
'''Delete a media (VOD or broadcast).
@param psz_name: the media to delete.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_del_media(self, str_to_bytes(psz_name))
def vlm_set_enabled(self, psz_name, b_enabled):
'''Enable or disable a media (VOD or broadcast).
@param psz_name: the media to work on.
@param b_enabled: the new status.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_enabled(self, str_to_bytes(psz_name), b_enabled)
def vlm_set_output(self, psz_name, psz_output):
'''Set the output for a media.
@param psz_name: the media to work on.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_output(self, str_to_bytes(psz_name), str_to_bytes(psz_output))
def vlm_set_input(self, psz_name, psz_input):
'''Set a media's input MRL. This will delete all existing inputs and
add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input))
def vlm_add_input(self, psz_name, psz_input):
'''Add a media's input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input))
def vlm_set_loop(self, psz_name, b_loop):
'''Set a media's loop status.
@param psz_name: the media to work on.
@param b_loop: the new status.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_loop(self, str_to_bytes(psz_name), b_loop)
def vlm_set_mux(self, psz_name, psz_mux):
'''Set a media's vod muxer.
@param psz_name: the media to work on.
@param psz_mux: the new muxer.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_mux(self, str_to_bytes(psz_name), str_to_bytes(psz_mux))
def vlm_change_media(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Edit the parameters of a media. This will delete all existing inputs and
add the specified one.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_change_media(self, str_to_bytes(psz_name), str_to_bytes(psz_input), str_to_bytes(psz_output), i_options, ppsz_options, b_enabled, b_loop)
def vlm_play_media(self, psz_name):
'''Play the named broadcast.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_play_media(self, str_to_bytes(psz_name))
def vlm_stop_media(self, psz_name):
'''Stop the named broadcast.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_stop_media(self, str_to_bytes(psz_name))
def vlm_pause_media(self, psz_name):
'''Pause the named broadcast.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_pause_media(self, str_to_bytes(psz_name))
def vlm_seek_media(self, psz_name, f_percentage):
'''Seek in the named broadcast.
@param psz_name: the name of the broadcast.
@param f_percentage: the percentage to seek to.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_seek_media(self, str_to_bytes(psz_name), f_percentage)
def vlm_show_media(self, psz_name):
'''Return information about the named media as a JSON
string representation.
This function is mainly intended for debugging use,
if you want programmatic access to the state of
a vlm_media_instance_t, please use the corresponding
libvlc_vlm_get_media_instance_xxx -functions.
Currently there are no such functions available for
vlm_media_t though.
@param psz_name: the name of the media, if the name is an empty string, all media is described.
@return: string with information about named media, or NULL on error.
'''
return libvlc_vlm_show_media(self, str_to_bytes(psz_name))
def vlm_get_media_instance_position(self, psz_name, i_instance):
'''Get vlm_media instance position by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: position as float or -1. on error.
'''
return libvlc_vlm_get_media_instance_position(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_time(self, psz_name, i_instance):
'''Get vlm_media instance time by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: time as integer or -1 on error.
'''
return libvlc_vlm_get_media_instance_time(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_length(self, psz_name, i_instance):
'''Get vlm_media instance length by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: length of media item or -1 on error.
'''
return libvlc_vlm_get_media_instance_length(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_rate(self, psz_name, i_instance):
'''Get vlm_media instance playback rate by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: playback rate or -1 on error.
'''
return libvlc_vlm_get_media_instance_rate(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_title(self, psz_name, i_instance):
'''Get vlm_media instance title number by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: title as number or -1 on error.
@bug: will always return 0.
'''
return libvlc_vlm_get_media_instance_title(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_chapter(self, psz_name, i_instance):
'''Get vlm_media instance chapter number by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: chapter as number or -1 on error.
@bug: will always return 0.
'''
return libvlc_vlm_get_media_instance_chapter(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_seekable(self, psz_name, i_instance):
'''Is libvlc instance seekable ?
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: 1 if seekable, 0 if not, -1 if media does not exist.
@bug: will always return 0.
'''
return libvlc_vlm_get_media_instance_seekable(self, str_to_bytes(psz_name), i_instance)
def vlm_get_event_manager(self):
'''Get libvlc_event_manager from a vlm media.
The p_event_manager is immutable, so you don't have to hold the lock.
@return: libvlc_event_manager.
'''
return libvlc_vlm_get_event_manager(self)
class Media(_Ctype):
'''Create a new Media instance.
Usage: Media(MRL, *options)
See vlc.Instance.media_new documentation for details.
'''
def __new__(cls, *args):
if args:
i = args[0]
if isinstance(i, _Ints):
return _Constructor(cls, i)
if isinstance(i, Instance):
return i.media_new(*args[1:])
o = get_default_instance().media_new(*args)
return o
def get_instance(self):
return getattr(self, '_instance', None)
def add_options(self, *options):
"""Add a list of options to the media.
Options must be written without the double-dash, e.g.:
C{m.add_options('sub-filter=marq@test{marquee=Hello}', 'video-filter=invert')}
Alternatively, the options can directly be passed in the Instance.media_new method:
C{m = instance.media_new('foo.avi', 'sub-filter=marq@test{marquee=Hello}', 'video-filter=invert')}
@param options: optional media option=value strings
"""
for o in options:
self.add_option(o)
def add_option(self, psz_options):
'''Add an option to the media.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
@note: The options are listed in 'vlc --long-help' from the command line,
e.g. "-sout-all". Keep in mind that available options and their semantics
vary across LibVLC versions and builds.
@warning: Not all options affects L{Media} objects:
Specifically, due to architectural issues most audio and video options,
such as text renderer options, have no effects on an individual media.
These options must be set through L{new}() instead.
@param psz_options: the options (as a string).
'''
return libvlc_media_add_option(self, str_to_bytes(psz_options))
def add_option_flag(self, psz_options, i_flags):
'''Add an option to the media with configurable flags.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
The options are detailed in vlc --long-help, for instance
"--sout-all". Note that all options are not usable on medias:
specifically, due to architectural issues, video-related options
such as text renderer options cannot be set on a single media. They
must be set on the whole libvlc instance instead.
@param psz_options: the options (as a string).
@param i_flags: the flags for this option.
'''
return libvlc_media_add_option_flag(self, str_to_bytes(psz_options), i_flags)
def retain(self):
'''Retain a reference to a media descriptor object (libvlc_media_t). Use
L{release}() to decrement the reference count of a
media descriptor object.
'''
return libvlc_media_retain(self)
def release(self):
'''Decrement the reference count of a media descriptor object. If the
reference count is 0, then L{release}() will release the
media descriptor object. It will send out an libvlc_MediaFreed event
to all listeners. If the media descriptor object has been released it
should not be used again.
'''
return libvlc_media_release(self)
def get_mrl(self):
'''Get the media resource locator (mrl) from a media descriptor object.
@return: string with mrl of media descriptor object.
'''
return libvlc_media_get_mrl(self)
def duplicate(self):
'''Duplicate a media descriptor object.
'''
return libvlc_media_duplicate(self)
def get_meta(self, e_meta):
'''Read the meta of the media.
If the media has not yet been parsed this will return NULL.
This methods automatically calls L{parse_async}(), so after calling
it you may receive a libvlc_MediaMetaChanged event. If you prefer a synchronous
version ensure that you call L{parse}() before get_meta().
See L{parse}
See L{parse_async}
See libvlc_MediaMetaChanged.
@param e_meta: the meta to read.
@return: the media's meta.
'''
return libvlc_media_get_meta(self, e_meta)
def set_meta(self, e_meta, psz_value):
'''Set the meta of the media (this function will not save the meta, call
L{save_meta} in order to save the meta).
@param e_meta: the meta to write.
@param psz_value: the media's meta.
'''
return libvlc_media_set_meta(self, e_meta, str_to_bytes(psz_value))
def save_meta(self):
'''Save the meta previously set.
@return: true if the write operation was successful.
'''
return libvlc_media_save_meta(self)
def get_state(self):
'''Get current state of media descriptor object. Possible media states
are defined in libvlc_structures.c ( libvlc_NothingSpecial=0,
libvlc_Opening, libvlc_Buffering, libvlc_Playing, libvlc_Paused,
libvlc_Stopped, libvlc_Ended,
libvlc_Error).
See libvlc_state_t.
@return: state of media descriptor object.
'''
return libvlc_media_get_state(self)
def get_stats(self, p_stats):
'''Get the current statistics about the media.
@param p_stats:: structure that contain the statistics about the media (this structure must be allocated by the caller).
@return: true if the statistics are available, false otherwise \libvlc_return_bool.
'''
return libvlc_media_get_stats(self, p_stats)
def subitems(self):
'''Get subitems of media descriptor object. This will increment
the reference count of supplied media descriptor object. Use
L{list_release}() to decrement the reference counting.
@return: list of media descriptor subitems or NULL.
'''
return libvlc_media_subitems(self)
def event_manager(self):
'''Get event manager from media descriptor object.
NOTE: this function doesn't increment reference counting.
@return: event manager object.
'''
return libvlc_media_event_manager(self)
def get_duration(self):
'''Get duration (in ms) of media descriptor object item.
@return: duration of media item or -1 on error.
'''
return libvlc_media_get_duration(self)
def parse(self):
'''Parse a media.
This fetches (local) meta data and tracks information.
The method is synchronous.
See L{parse_async}
See L{get_meta}
See libvlc_media_get_tracks_info.
'''
return libvlc_media_parse(self)
def parse_async(self):
'''Parse a media.
This fetches (local) meta data and tracks information.
The method is the asynchronous of L{parse}().
To track when this is over you can listen to libvlc_MediaParsedChanged
event. However if the media was already parsed you will not receive this
event.
See L{parse}
See libvlc_MediaParsedChanged
See L{get_meta}
See libvlc_media_get_tracks_info.
'''
return libvlc_media_parse_async(self)
def is_parsed(self):
'''Get Parsed status for media descriptor object.
See libvlc_MediaParsedChanged.
@return: true if media object has been parsed otherwise it returns false \libvlc_return_bool.
'''
return libvlc_media_is_parsed(self)
def set_user_data(self, p_new_user_data):
'''Sets media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_new_user_data: pointer to user data.
'''
return libvlc_media_set_user_data(self, p_new_user_data)
def get_user_data(self):
'''Get media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
'''
return libvlc_media_get_user_data(self)
def tracks_get(self, tracks):
'''Get media descriptor's elementary streams description
Note, you need to call L{parse}() or play the media at least once
before calling this function.
Not doing this will result in an empty array.
@param tracks: address to store an allocated array of Elementary Streams descriptions (must be freed with L{tracks_release}.
@return: the number of Elementary Streams (zero on error).
@version: LibVLC 2.1.0 and later.
'''
return libvlc_media_tracks_get(self, tracks)
def player_new_from_media(self):
'''Create a Media Player object from a Media.
@return: a new media player object, or NULL on error.
'''
return libvlc_media_player_new_from_media(self)
class MediaDiscoverer(_Ctype):
'''N/A
'''
def __new__(cls, ptr=_internal_guard):
'''(INTERNAL) ctypes wrapper constructor.
'''
return _Constructor(cls, ptr)
def release(self):
'''Release media discover object. If the reference count reaches 0, then
the object will be released.
'''
return libvlc_media_discoverer_release(self)
def localized_name(self):
'''Get media service discover object its localized name.
@return: localized name.
'''
return libvlc_media_discoverer_localized_name(self)
def media_list(self):
'''Get media service discover media list.
@return: list of media items.
'''
return libvlc_media_discoverer_media_list(self)
def event_manager(self):
'''Get event manager from media service discover object.
@return: event manager object.
'''
return libvlc_media_discoverer_event_manager(self)
def is_running(self):
'''Query if media service discover object is running.
@return: true if running, false if not \libvlc_return_bool.
'''
return libvlc_media_discoverer_is_running(self)
class MediaLibrary(_Ctype):
'''N/A
'''
def __new__(cls, ptr=_internal_guard):
'''(INTERNAL) ctypes wrapper constructor.
'''
return _Constructor(cls, ptr)
def release(self):
'''Release media library object. This functions decrements the
reference count of the media library object. If it reaches 0,
then the object will be released.
'''
return libvlc_media_library_release(self)
def retain(self):
'''Retain a reference to a media library object. This function will
increment the reference counting for this object. Use
L{release}() to decrement the reference count.
'''
return libvlc_media_library_retain(self)
def load(self):
'''Load media library.
@return: 0 on success, -1 on error.
'''
return libvlc_media_library_load(self)
def media_list(self):
'''Get media library subitems.
@return: media list subitems.
'''
return libvlc_media_library_media_list(self)
class MediaList(_Ctype):
'''Create a new MediaList instance.
Usage: MediaList(list_of_MRLs)
See vlc.Instance.media_list_new documentation for details.
'''
def __new__(cls, *args):
if args:
i = args[0]
if isinstance(i, _Ints):
return _Constructor(cls, i)
if isinstance(i, Instance):
return i.media_list_new(*args[1:])
o = get_default_instance().media_list_new(*args)
return o
def get_instance(self):
return getattr(self, '_instance', None)
def add_media(self, mrl):
"""Add media instance to media list.
The L{lock} should be held upon entering this function.
@param mrl: a media instance or a MRL.
@return: 0 on success, -1 if the media list is read-only.
"""
if isinstance(mrl, basestring):
mrl = (self.get_instance() or get_default_instance()).media_new(mrl)
return libvlc_media_list_add_media(self, mrl)
def release(self):
'''Release media list created with L{new}().
'''
return libvlc_media_list_release(self)
def retain(self):
'''Retain reference to a media list.
'''
return libvlc_media_list_retain(self)
def set_media(self, p_md):
'''Associate media instance with this media list instance.
If another media instance was present it will be released.
The L{lock} should NOT be held upon entering this function.
@param p_md: media instance to add.
'''
return libvlc_media_list_set_media(self, p_md)
def media(self):
'''Get media instance from this media list instance. This action will increase
the refcount on the media instance.
The L{lock} should NOT be held upon entering this function.
@return: media instance.
'''
return libvlc_media_list_media(self)
def insert_media(self, p_md, i_pos):
'''Insert media instance in media list on a position
The L{lock} should be held upon entering this function.
@param p_md: a media instance.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the media list is read-only.
'''
return libvlc_media_list_insert_media(self, p_md, i_pos)
def remove_index(self, i_pos):
'''Remove media instance from media list on a position
The L{lock} should be held upon entering this function.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the list is read-only or the item was not found.
'''
return libvlc_media_list_remove_index(self, i_pos)
def count(self):
'''Get count on media list items
The L{lock} should be held upon entering this function.
@return: number of items in media list.
'''
return libvlc_media_list_count(self)
def __len__(self):
return libvlc_media_list_count(self)
def item_at_index(self, i_pos):
'''List media instance in media list at a position
The L{lock} should be held upon entering this function.
@param i_pos: position in array where to insert.
@return: media instance at position i_pos, or NULL if not found. In case of success, L{media_retain}() is called to increase the refcount on the media.
'''
return libvlc_media_list_item_at_index(self, i_pos)
def __getitem__(self, i):
return libvlc_media_list_item_at_index(self, i)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def index_of_item(self, p_md):
'''Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{lock} should be held upon entering this function.
@param p_md: media instance.
@return: position of media instance or -1 if media not found.
'''
return libvlc_media_list_index_of_item(self, p_md)
def is_readonly(self):
'''This indicates if this media list is read-only from a user point of view.
@return: 1 on readonly, 0 on readwrite \libvlc_return_bool.
'''
return libvlc_media_list_is_readonly(self)
def lock(self):
'''Get lock on media list items.
'''
return libvlc_media_list_lock(self)
def unlock(self):
'''Release lock on media list items
The L{lock} should be held upon entering this function.
'''
return libvlc_media_list_unlock(self)
def event_manager(self):
'''Get libvlc_event_manager from this media list instance.
The p_event_manager is immutable, so you don't have to hold the lock.
@return: libvlc_event_manager.
'''
return libvlc_media_list_event_manager(self)
class MediaListPlayer(_Ctype):
'''Create a new MediaListPlayer instance.
It may take as parameter either:
- a vlc.Instance
- nothing
'''
def __new__(cls, arg=None):
if arg is None:
i = get_default_instance()
elif isinstance(arg, Instance):
i = arg
elif isinstance(arg, _Ints):
return _Constructor(cls, arg)
else:
raise TypeError('MediaListPlayer %r' % (arg,))
return i.media_list_player_new()
def get_instance(self):
"""Return the associated Instance.
"""
return self._instance #PYCHOK expected
def release(self):
'''Release a media_list_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
'''
return libvlc_media_list_player_release(self)
def retain(self):
'''Retain a reference to a media player list object. Use
L{release}() to decrement reference count.
'''
return libvlc_media_list_player_retain(self)
def event_manager(self):
'''Return the event manager of this media_list_player.
@return: the event manager.
'''
return libvlc_media_list_player_event_manager(self)
def set_media_player(self, p_mi):
'''Replace media player in media_list_player with this instance.
@param p_mi: media player instance.
'''
return libvlc_media_list_player_set_media_player(self, p_mi)
def set_media_list(self, p_mlist):
'''Set the media list associated with the player.
@param p_mlist: list of media.
'''
return libvlc_media_list_player_set_media_list(self, p_mlist)
def play(self):
'''Play media list.
'''
return libvlc_media_list_player_play(self)
def pause(self):
'''Toggle pause (or resume) media list.
'''
return libvlc_media_list_player_pause(self)
def is_playing(self):
'''Is media list playing?
@return: true for playing and false for not playing \libvlc_return_bool.
'''
return libvlc_media_list_player_is_playing(self)
def get_state(self):
'''Get current libvlc_state of media list player.
@return: libvlc_state_t for media list player.
'''
return libvlc_media_list_player_get_state(self)
def play_item_at_index(self, i_index):
'''Play media list item at position index.
@param i_index: index in media list to play.
@return: 0 upon success -1 if the item wasn't found.
'''
return libvlc_media_list_player_play_item_at_index(self, i_index)
def __getitem__(self, i):
return libvlc_media_list_player_play_item_at_index(self, i)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def play_item(self, p_md):
'''Play the given media item.
@param p_md: the media instance.
@return: 0 upon success, -1 if the media is not part of the media list.
'''
return libvlc_media_list_player_play_item(self, p_md)
def stop(self):
'''Stop playing media list.
'''
return libvlc_media_list_player_stop(self)
def next(self):
'''Play next item from media list.
@return: 0 upon success -1 if there is no next item.
'''
return libvlc_media_list_player_next(self)
def previous(self):
'''Play previous item from media list.
@return: 0 upon success -1 if there is no previous item.
'''
return libvlc_media_list_player_previous(self)
def set_playback_mode(self, e_mode):
'''Sets the playback mode for the playlist.
@param e_mode: playback mode specification.
'''
return libvlc_media_list_player_set_playback_mode(self, e_mode)
class MediaPlayer(_Ctype):
'''Create a new MediaPlayer instance.
It may take as parameter either:
- a string (media URI), options... In this case, a vlc.Instance will be created.
- a vlc.Instance, a string (media URI), options...
'''
def __new__(cls, *args):
if len(args) == 1 and isinstance(args[0], _Ints):
return _Constructor(cls, args[0])
if args and isinstance(args[0], Instance):
instance = args[0]
args = args[1:]
else:
instance = get_default_instance()
o = instance.media_player_new()
if args:
o.set_media(instance.media_new(*args))
return o
def get_instance(self):
"""Return the associated Instance.
"""
return self._instance #PYCHOK expected
def set_mrl(self, mrl, *options):
"""Set the MRL to play.
@param mrl: The MRL
@param options: optional media option=value strings
@return: the Media object
"""
m = self.get_instance().media_new(mrl, *options)
self.set_media(m)
return m
def video_get_spu_description(self):
"""Get the description of available video subtitles.
"""
return track_description_list(libvlc_video_get_spu_description(self))
def video_get_title_description(self):
"""Get the description of available titles.
"""
return track_description_list(libvlc_video_get_title_description(self))
def video_get_chapter_description(self, title):
"""Get the description of available chapters for specific title.
@param title: selected title (int)
"""
return track_description_list(libvlc_video_get_chapter_description(self, title))
def video_get_track_description(self):
"""Get the description of available video tracks.
"""
return track_description_list(libvlc_video_get_track_description(self))
def audio_get_track_description(self):
"""Get the description of available audio tracks.
"""
return track_description_list(libvlc_audio_get_track_description(self))
def video_get_size(self, num=0):
"""Get the video size in pixels as 2-tuple (width, height).
@param num: video number (default 0).
"""
r = libvlc_video_get_size(self, num)
if isinstance(r, tuple) and len(r) == 2:
return r
else:
raise VLCException('invalid video number (%s)' % (num,))
def set_hwnd(self, drawable):
"""Set a Win32/Win64 API window handle (HWND).
Specify where the media player should render its video
output. If LibVLC was built without Win32/Win64 API output
support, then this has no effects.
@param drawable: windows handle of the drawable.
"""
if not isinstance(drawable, ctypes.c_void_p):
drawable = ctypes.c_void_p(int(drawable))
libvlc_media_player_set_hwnd(self, drawable)
def video_get_width(self, num=0):
"""Get the width of a video in pixels.
@param num: video number (default 0).
"""
return self.video_get_size(num)[0]
def video_get_height(self, num=0):
"""Get the height of a video in pixels.
@param num: video number (default 0).
"""
return self.video_get_size(num)[1]
def video_get_cursor(self, num=0):
"""Get the mouse pointer coordinates over a video as 2-tuple (x, y).
Coordinates are expressed in terms of the decoded video resolution,
B{not} in terms of pixels on the screen/viewport. To get the
latter, you must query your windowing system directly.
Either coordinate may be negative or larger than the corresponding
size of the video, if the cursor is outside the rendering area.
@warning: The coordinates may be out-of-date if the pointer is not
located on the video rendering area. LibVLC does not track the
mouse pointer if the latter is outside the video widget.
@note: LibVLC does not support multiple mouse pointers (but does
support multiple input devices sharing the same pointer).
@param num: video number (default 0).
"""
r = libvlc_video_get_cursor(self, num)
if isinstance(r, tuple) and len(r) == 2:
return r
raise VLCException('invalid video number (%s)' % (num,))
def release(self):
'''Release a media_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
'''
return libvlc_media_player_release(self)
def retain(self):
'''Retain a reference to a media player object. Use
L{release}() to decrement reference count.
'''
return libvlc_media_player_retain(self)
def set_media(self, p_md):
'''Set the media that will be used by the media_player. If any,
previous md will be released.
@param p_md: the Media. Afterwards the p_md can be safely destroyed.
'''
return libvlc_media_player_set_media(self, p_md)
def get_media(self):
'''Get the media used by the media_player.
@return: the media associated with p_mi, or NULL if no media is associated.
'''
return libvlc_media_player_get_media(self)
def event_manager(self):
'''Get the Event Manager from which the media player send event.
@return: the event manager associated with p_mi.
'''
return libvlc_media_player_event_manager(self)
def is_playing(self):
'''is_playing.
@return: 1 if the media player is playing, 0 otherwise \libvlc_return_bool.
'''
return libvlc_media_player_is_playing(self)
def play(self):
'''Play.
@return: 0 if playback started (and was already started), or -1 on error.
'''
return libvlc_media_player_play(self)
def set_pause(self, do_pause):
'''Pause or resume (no effect if there is no media).
@param do_pause: play/resume if zero, pause if non-zero.
@version: LibVLC 1.1.1 or later.
'''
return libvlc_media_player_set_pause(self, do_pause)
def pause(self):
'''Toggle pause (no effect if there is no media).
'''
return libvlc_media_player_pause(self)
def stop(self):
'''Stop (no effect if there is no media).
'''
return libvlc_media_player_stop(self)
def video_set_callbacks(self, lock, unlock, display, opaque):
'''Set callbacks and private data to render decoded video to a custom area
in memory.
Use L{video_set_format}() or L{video_set_format_callbacks}()
to configure the decoded format.
@param lock: callback to lock video memory (must not be NULL).
@param unlock: callback to unlock video memory (or NULL if not needed).
@param display: callback to display video (or NULL if not needed).
@param opaque: private pointer for the three callbacks (as first parameter).
@version: LibVLC 1.1.1 or later.
'''
return libvlc_video_set_callbacks(self, lock, unlock, display, opaque)
def video_set_format(self, chroma, width, height, pitch):
'''Set decoded video chroma and dimensions.
This only works in combination with L{video_set_callbacks}(),
and is mutually exclusive with L{video_set_format_callbacks}().
@param chroma: a four-characters string identifying the chroma (e.g. "RV32" or "YUYV").
@param width: pixel width.
@param height: pixel height.
@param pitch: line pitch (in bytes).
@version: LibVLC 1.1.1 or later.
@bug: All pixel planes are expected to have the same pitch. To use the YCbCr color space with chrominance subsampling, consider using L{video_set_format_callbacks}() instead.
'''
return libvlc_video_set_format(self, str_to_bytes(chroma), width, height, pitch)
def video_set_format_callbacks(self, setup, cleanup):
'''Set decoded video chroma and dimensions. This only works in combination with
L{video_set_callbacks}().
@param setup: callback to select the video format (cannot be NULL).
@param cleanup: callback to release any allocated resources (or NULL).
@version: LibVLC 2.0.0 or later.
'''
return libvlc_video_set_format_callbacks(self, setup, cleanup)
def set_nsobject(self, drawable):
'''Set the NSView handler where the media player should render its video output.
Use the vout called "macosx".
The drawable is an NSObject that follow the VLCOpenGLVideoViewEmbedding
protocol:
@begincode
\@protocol VLCOpenGLVideoViewEmbedding <NSObject>
- (void)addVoutSubview:(NSView *)view;
- (void)removeVoutSubview:(NSView *)view;
\@end
@endcode
Or it can be an NSView object.
If you want to use it along with Qt4 see the QMacCocoaViewContainer. Then
the following code should work:
@begincode
NSView *video = [[NSView alloc] init];
QMacCocoaViewContainer *container = new QMacCocoaViewContainer(video, parent);
L{set_nsobject}(mp, video);
[video release];
@endcode
You can find a live example in VLCVideoView in VLCKit.framework.
@param drawable: the drawable that is either an NSView or an object following the VLCOpenGLVideoViewEmbedding protocol.
'''
return libvlc_media_player_set_nsobject(self, drawable)
def get_nsobject(self):
'''Get the NSView handler previously set with L{set_nsobject}().
@return: the NSView handler or 0 if none where set.
'''
return libvlc_media_player_get_nsobject(self)
def set_agl(self, drawable):
'''Set the agl handler where the media player should render its video output.
@param drawable: the agl handler.
'''
return libvlc_media_player_set_agl(self, drawable)
def get_agl(self):
'''Get the agl handler previously set with L{set_agl}().
@return: the agl handler or 0 if none where set.
'''
return libvlc_media_player_get_agl(self)
def set_xwindow(self, drawable):
'''Set an X Window System drawable where the media player should render its
video output. If LibVLC was built without X11 output support, then this has
no effects.
The specified identifier must correspond to an existing Input/Output class
X11 window. Pixmaps are B{not} supported. The caller shall ensure that
the X11 server is the same as the one the VLC instance has been configured
with. This function must be called before video playback is started;
otherwise it will only take effect after playback stop and restart.
@param drawable: the ID of the X window.
'''
return libvlc_media_player_set_xwindow(self, drawable)
def get_xwindow(self):
'''Get the X Window System window identifier previously set with
L{set_xwindow}(). Note that this will return the identifier
even if VLC is not currently using it (for instance if it is playing an
audio-only input).
@return: an X window ID, or 0 if none where set.
'''
return libvlc_media_player_get_xwindow(self)
def get_hwnd(self):
'''Get the Windows API window handle (HWND) previously set with
L{set_hwnd}(). The handle will be returned even if LibVLC
is not currently outputting any video to it.
@return: a window handle or NULL if there are none.
'''
return libvlc_media_player_get_hwnd(self)
def audio_set_callbacks(self, play, pause, resume, flush, drain, opaque):
'''Set callbacks and private data for decoded audio.
Use L{audio_set_format}() or L{audio_set_format_callbacks}()
to configure the decoded audio format.
@param play: callback to play audio samples (must not be NULL).
@param pause: callback to pause playback (or NULL to ignore).
@param resume: callback to resume playback (or NULL to ignore).
@param flush: callback to flush audio buffers (or NULL to ignore).
@param drain: callback to drain audio buffers (or NULL to ignore).
@param opaque: private pointer for the audio callbacks (as first parameter).
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_callbacks(self, play, pause, resume, flush, drain, opaque)
def audio_set_volume_callback(self, set_volume):
'''Set callbacks and private data for decoded audio.
Use L{audio_set_format}() or L{audio_set_format_callbacks}()
to configure the decoded audio format.
@param set_volume: callback to apply audio volume, or NULL to apply volume in software.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_volume_callback(self, set_volume)
def audio_set_format_callbacks(self, setup, cleanup):
'''Set decoded audio format. This only works in combination with
L{audio_set_callbacks}().
@param setup: callback to select the audio format (cannot be NULL).
@param cleanup: callback to release any allocated resources (or NULL).
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_format_callbacks(self, setup, cleanup)
def audio_set_format(self, format, rate, channels):
'''Set decoded audio format.
This only works in combination with L{audio_set_callbacks}(),
and is mutually exclusive with L{audio_set_format_callbacks}().
@param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32").
@param rate: sample rate (expressed in Hz).
@param channels: channels count.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_format(self, str_to_bytes(format), rate, channels)
def get_length(self):
'''Get the current movie length (in ms).
@return: the movie length (in ms), or -1 if there is no media.
'''
return libvlc_media_player_get_length(self)
def get_time(self):
'''Get the current movie time (in ms).
@return: the movie time (in ms), or -1 if there is no media.
'''
return libvlc_media_player_get_time(self)
def set_time(self, i_time):
'''Set the movie time (in ms). This has no effect if no media is being played.
Not all formats and protocols support this.
@param i_time: the movie time (in ms).
'''
return libvlc_media_player_set_time(self, i_time)
def get_position(self):
'''Get movie position as percentage between 0.0 and 1.0.
@return: movie position, or -1. in case of error.
'''
return libvlc_media_player_get_position(self)
def set_position(self, f_pos):
'''Set movie position as percentage between 0.0 and 1.0.
This has no effect if playback is not enabled.
This might not work depending on the underlying input format and protocol.
@param f_pos: the position.
'''
return libvlc_media_player_set_position(self, f_pos)
def set_chapter(self, i_chapter):
'''Set movie chapter (if applicable).
@param i_chapter: chapter number to play.
'''
return libvlc_media_player_set_chapter(self, i_chapter)
def get_chapter(self):
'''Get movie chapter.
@return: chapter number currently playing, or -1 if there is no media.
'''
return libvlc_media_player_get_chapter(self)
def get_chapter_count(self):
'''Get movie chapter count.
@return: number of chapters in movie, or -1.
'''
return libvlc_media_player_get_chapter_count(self)
def will_play(self):
'''Is the player able to play.
@return: boolean \libvlc_return_bool.
'''
return libvlc_media_player_will_play(self)
def get_chapter_count_for_title(self, i_title):
'''Get title chapter count.
@param i_title: title.
@return: number of chapters in title, or -1.
'''
return libvlc_media_player_get_chapter_count_for_title(self, i_title)
def set_title(self, i_title):
'''Set movie title.
@param i_title: title number to play.
'''
return libvlc_media_player_set_title(self, i_title)
def get_title(self):
'''Get movie title.
@return: title number currently playing, or -1.
'''
return libvlc_media_player_get_title(self)
def get_title_count(self):
'''Get movie title count.
@return: title number count, or -1.
'''
return libvlc_media_player_get_title_count(self)
def previous_chapter(self):
'''Set previous chapter (if applicable).
'''
return libvlc_media_player_previous_chapter(self)
def next_chapter(self):
'''Set next chapter (if applicable).
'''
return libvlc_media_player_next_chapter(self)
def get_rate(self):
'''Get the requested movie play rate.
@warning: Depending on the underlying media, the requested rate may be
different from the real playback rate.
@return: movie play rate.
'''
return libvlc_media_player_get_rate(self)
def set_rate(self, rate):
'''Set movie play rate.
@param rate: movie play rate to set.
@return: -1 if an error was detected, 0 otherwise (but even then, it might not actually work depending on the underlying media protocol).
'''
return libvlc_media_player_set_rate(self, rate)
def get_state(self):
'''Get current movie state.
@return: the current state of the media player (playing, paused, ...) See libvlc_state_t.
'''
return libvlc_media_player_get_state(self)
def get_fps(self):
'''Get movie fps rate.
@return: frames per second (fps) for this playing movie, or 0 if unspecified.
'''
return libvlc_media_player_get_fps(self)
def has_vout(self):
'''How many video outputs does this media player have?
@return: the number of video outputs.
'''
return libvlc_media_player_has_vout(self)
def is_seekable(self):
'''Is this media player seekable?
@return: true if the media player can seek \libvlc_return_bool.
'''
return libvlc_media_player_is_seekable(self)
def can_pause(self):
'''Can this media player be paused?
@return: true if the media player can pause \libvlc_return_bool.
'''
return libvlc_media_player_can_pause(self)
def next_frame(self):
'''Display the next frame (if supported).
'''
return libvlc_media_player_next_frame(self)
def navigate(self, navigate):
'''Navigate through DVD Menu.
@param navigate: the Navigation mode.
@version: libVLC 2.0.0 or later.
'''
return libvlc_media_player_navigate(self, navigate)
def toggle_fullscreen(self):
'''Toggle fullscreen status on non-embedded video outputs.
@warning: The same limitations applies to this function
as to L{set_fullscreen}().
'''
return libvlc_toggle_fullscreen(self)
def set_fullscreen(self, b_fullscreen):
'''Enable or disable fullscreen.
@warning: With most window managers, only a top-level windows can be in
full-screen mode. Hence, this function will not operate properly if
L{set_xwindow}() was used to embed the video in a
non-top-level window. In that case, the embedding window must be reparented
to the root window B{before} fullscreen mode is enabled. You will want
to reparent it back to its normal parent when disabling fullscreen.
@param b_fullscreen: boolean for fullscreen status.
'''
return libvlc_set_fullscreen(self, b_fullscreen)
def get_fullscreen(self):
'''Get current fullscreen status.
@return: the fullscreen status (boolean) \libvlc_return_bool.
'''
return libvlc_get_fullscreen(self)
def video_set_key_input(self, on):
'''Enable or disable key press events handling, according to the LibVLC hotkeys
configuration. By default and for historical reasons, keyboard events are
handled by the LibVLC video widget.
@note: On X11, there can be only one subscriber for key press and mouse
click events per window. If your application has subscribed to those events
for the X window ID of the video widget, then LibVLC will not be able to
handle key presses and mouse clicks in any case.
@warning: This function is only implemented for X11 and Win32 at the moment.
@param on: true to handle key press events, false to ignore them.
'''
return libvlc_video_set_key_input(self, on)
def video_set_mouse_input(self, on):
'''Enable or disable mouse click events handling. By default, those events are
handled. This is needed for DVD menus to work, as well as a few video
filters such as "puzzle".
See L{video_set_key_input}().
@warning: This function is only implemented for X11 and Win32 at the moment.
@param on: true to handle mouse click events, false to ignore them.
'''
return libvlc_video_set_mouse_input(self, on)
def video_get_scale(self):
'''Get the current video scaling factor.
See also L{video_set_scale}().
@return: the currently configured zoom factor, or 0. if the video is set to fit to the output window/drawable automatically.
'''
return libvlc_video_get_scale(self)
def video_set_scale(self, f_factor):
'''Set the video scaling factor. That is the ratio of the number of pixels on
screen to the number of pixels in the original decoded video in each
dimension. Zero is a special value; it will adjust the video to the output
window/drawable (in windowed mode) or the entire screen.
Note that not all video outputs support scaling.
@param f_factor: the scaling factor, or zero.
'''
return libvlc_video_set_scale(self, f_factor)
def video_get_aspect_ratio(self):
'''Get current video aspect ratio.
@return: the video aspect ratio or NULL if unspecified (the result must be released with free() or L{free}()).
'''
return libvlc_video_get_aspect_ratio(self)
def video_set_aspect_ratio(self, psz_aspect):
'''Set new video aspect ratio.
@param psz_aspect: new video aspect-ratio or NULL to reset to default @note Invalid aspect ratios are ignored.
'''
return libvlc_video_set_aspect_ratio(self, str_to_bytes(psz_aspect))
def video_get_spu(self):
'''Get current video subtitle.
@return: the video subtitle selected, or -1 if none.
'''
return libvlc_video_get_spu(self)
def video_get_spu_count(self):
'''Get the number of available video subtitles.
@return: the number of available video subtitles.
'''
return libvlc_video_get_spu_count(self)
def video_set_spu(self, i_spu):
'''Set new video subtitle.
@param i_spu: video subtitle track to select (i_id from track description).
@return: 0 on success, -1 if out of range.
'''
return libvlc_video_set_spu(self, i_spu)
def video_set_subtitle_file(self, psz_subtitle):
'''Set new video subtitle file.
@param psz_subtitle: new video subtitle file.
@return: the success status (boolean).
'''
return libvlc_video_set_subtitle_file(self, str_to_bytes(psz_subtitle))
def video_get_spu_delay(self):
'''Get the current subtitle delay. Positive values means subtitles are being
displayed later, negative values earlier.
@return: time (in microseconds) the display of subtitles is being delayed.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_video_get_spu_delay(self)
def video_set_spu_delay(self, i_delay):
'''Set the subtitle delay. This affects the timing of when the subtitle will
be displayed. Positive values result in subtitles being displayed later,
while negative values will result in subtitles being displayed earlier.
The subtitle delay will be reset to zero each time the media changes.
@param i_delay: time (in microseconds) the display of subtitles should be delayed.
@return: 0 on success, -1 on error.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_video_set_spu_delay(self, i_delay)
def video_get_crop_geometry(self):
'''Get current crop filter geometry.
@return: the crop filter geometry or NULL if unset.
'''
return libvlc_video_get_crop_geometry(self)
def video_set_crop_geometry(self, psz_geometry):
'''Set new crop filter geometry.
@param psz_geometry: new crop filter geometry (NULL to unset).
'''
return libvlc_video_set_crop_geometry(self, str_to_bytes(psz_geometry))
def video_get_teletext(self):
'''Get current teletext page requested.
@return: the current teletext page requested.
'''
return libvlc_video_get_teletext(self)
def video_set_teletext(self, i_page):
'''Set new teletext page to retrieve.
@param i_page: teletex page number requested.
'''
return libvlc_video_set_teletext(self, i_page)
def toggle_teletext(self):
'''Toggle teletext transparent status on video output.
'''
return libvlc_toggle_teletext(self)
def video_get_track_count(self):
'''Get number of available video tracks.
@return: the number of available video tracks (int).
'''
return libvlc_video_get_track_count(self)
def video_get_track(self):
'''Get current video track.
@return: the video track ID (int) or -1 if no active input.
'''
return libvlc_video_get_track(self)
def video_set_track(self, i_track):
'''Set video track.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 if out of range.
'''
return libvlc_video_set_track(self, i_track)
def video_take_snapshot(self, num, psz_filepath, i_width, i_height):
'''Take a snapshot of the current video window.
If i_width AND i_height is 0, original size is used.
If i_width XOR i_height is 0, original aspect-ratio is preserved.
@param num: number of video output (typically 0 for the first/only one).
@param psz_filepath: the path where to save the screenshot to.
@param i_width: the snapshot's width.
@param i_height: the snapshot's height.
@return: 0 on success, -1 if the video was not found.
'''
return libvlc_video_take_snapshot(self, num, str_to_bytes(psz_filepath), i_width, i_height)
def video_set_deinterlace(self, psz_mode):
'''Enable or disable deinterlace filter.
@param psz_mode: type of deinterlace filter, NULL to disable.
'''
return libvlc_video_set_deinterlace(self, str_to_bytes(psz_mode))
def video_get_marquee_int(self, option):
'''Get an integer marquee option value.
@param option: marq option to get See libvlc_video_marquee_int_option_t.
'''
return libvlc_video_get_marquee_int(self, option)
def video_get_marquee_string(self, option):
'''Get a string marquee option value.
@param option: marq option to get See libvlc_video_marquee_string_option_t.
'''
return libvlc_video_get_marquee_string(self, option)
def video_set_marquee_int(self, option, i_val):
'''Enable, disable or set an integer marquee option
Setting libvlc_marquee_Enable has the side effect of enabling (arg !0)
or disabling (arg 0) the marq filter.
@param option: marq option to set See libvlc_video_marquee_int_option_t.
@param i_val: marq option value.
'''
return libvlc_video_set_marquee_int(self, option, i_val)
def video_set_marquee_string(self, option, psz_text):
'''Set a marquee string option.
@param option: marq option to set See libvlc_video_marquee_string_option_t.
@param psz_text: marq option value.
'''
return libvlc_video_set_marquee_string(self, option, str_to_bytes(psz_text))
def video_get_logo_int(self, option):
'''Get integer logo option.
@param option: logo option to get, values of libvlc_video_logo_option_t.
'''
return libvlc_video_get_logo_int(self, option)
def video_set_logo_int(self, option, value):
'''Set logo option as integer. Options that take a different type value
are ignored.
Passing libvlc_logo_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the logo filter.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param value: logo option value.
'''
return libvlc_video_set_logo_int(self, option, value)
def video_set_logo_string(self, option, psz_value):
'''Set logo option as string. Options that take a different type value
are ignored.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param psz_value: logo option value.
'''
return libvlc_video_set_logo_string(self, option, str_to_bytes(psz_value))
def video_get_adjust_int(self, option):
'''Get integer adjust option.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_get_adjust_int(self, option)
def video_set_adjust_int(self, option, value):
'''Set adjust option as integer. Options that take a different type value
are ignored.
Passing libvlc_adjust_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the adjust filter.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_set_adjust_int(self, option, value)
def video_get_adjust_float(self, option):
'''Get float adjust option.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_get_adjust_float(self, option)
def video_set_adjust_float(self, option, value):
'''Set adjust option as float. Options that take a different type value
are ignored.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_set_adjust_float(self, option, value)
def audio_output_set(self, psz_name):
'''Sets the audio output.
@note: Any change will take be effect only after playback is stopped and
restarted. Audio output cannot be changed while playing.
@param psz_name: name of audio output, use psz_name of See L{AudioOutput}.
@return: 0 if function succeded, -1 on error.
'''
return libvlc_audio_output_set(self, str_to_bytes(psz_name))
def audio_output_device_set(self, psz_audio_output, psz_device_id):
'''Configures an explicit audio output device for a given audio output plugin.
A list of possible devices can be obtained with
L{audio_output_device_list_get}().
@note: This function does not select the specified audio output plugin.
L{audio_output_set}() is used for that purpose.
@warning: The syntax for the device parameter depends on the audio output.
This is not portable. Only use this function if you know what you are doing.
Some audio outputs do not support this function (e.g. PulseAudio, WASAPI).
Some audio outputs require further parameters (e.g. ALSA: channels map).
@param psz_audio_output: - name of audio output, See L{AudioOutput}.
@param psz_device_id: device.
@return: Nothing. Errors are ignored.
'''
return libvlc_audio_output_device_set(self, str_to_bytes(psz_audio_output), str_to_bytes(psz_device_id))
def audio_toggle_mute(self):
'''Toggle mute status.
'''
return libvlc_audio_toggle_mute(self)
def audio_get_mute(self):
'''Get current mute status.
@return: the mute status (boolean) if defined, -1 if undefined/unapplicable.
'''
return libvlc_audio_get_mute(self)
def audio_set_mute(self, status):
'''Set mute status.
@param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute.
'''
return libvlc_audio_set_mute(self, status)
def audio_get_volume(self):
'''Get current software audio volume.
@return: the software volume in percents (0 = mute, 100 = nominal / 0dB).
'''
return libvlc_audio_get_volume(self)
def audio_set_volume(self, i_volume):
'''Set current software audio volume.
@param i_volume: the volume in percents (0 = mute, 100 = 0dB).
@return: 0 if the volume was set, -1 if it was out of range.
'''
return libvlc_audio_set_volume(self, i_volume)
def audio_get_track_count(self):
'''Get number of available audio tracks.
@return: the number of available audio tracks (int), or -1 if unavailable.
'''
return libvlc_audio_get_track_count(self)
def audio_get_track(self):
'''Get current audio track.
@return: the audio track ID or -1 if no active input.
'''
return libvlc_audio_get_track(self)
def audio_set_track(self, i_track):
'''Set current audio track.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 on error.
'''
return libvlc_audio_set_track(self, i_track)
def audio_get_channel(self):
'''Get current audio channel.
@return: the audio channel See libvlc_audio_output_channel_t.
'''
return libvlc_audio_get_channel(self)
def audio_set_channel(self, channel):
'''Set current audio channel.
@param channel: the audio channel, See libvlc_audio_output_channel_t.
@return: 0 on success, -1 on error.
'''
return libvlc_audio_set_channel(self, channel)
def audio_get_delay(self):
'''Get current audio delay.
@return: the audio delay (microseconds).
@version: LibVLC 1.1.1 or later.
'''
return libvlc_audio_get_delay(self)
def audio_set_delay(self, i_delay):
'''Set current audio delay. The audio delay will be reset to zero each time the media changes.
@param i_delay: the audio delay (microseconds).
@return: 0 on success, -1 on error.
@version: LibVLC 1.1.1 or later.
'''
return libvlc_audio_set_delay(self, i_delay)
# LibVLC __version__ functions #
def libvlc_errmsg():
'''A human-readable error message for the last LibVLC error in the calling
thread. The resulting string is valid until another error occurs (at least
until the next LibVLC call).
@warning
This will be NULL if there was no error.
'''
f = _Cfunctions.get('libvlc_errmsg', None) or \
_Cfunction('libvlc_errmsg', (), None,
ctypes.c_char_p)
return f()
def libvlc_clearerr():
'''Clears the LibVLC error status for the current thread. This is optional.
By default, the error status is automatically overridden when a new error
occurs, and destroyed when the thread exits.
'''
f = _Cfunctions.get('libvlc_clearerr', None) or \
_Cfunction('libvlc_clearerr', (), None,
None)
return f()
def libvlc_vprinterr(fmt, ap):
'''Sets the LibVLC error status and message for the current thread.
Any previous error is overridden.
@param fmt: the format string.
@param ap: the arguments.
@return: a nul terminated string in any case.
'''
f = _Cfunctions.get('libvlc_vprinterr', None) or \
_Cfunction('libvlc_vprinterr', ((1,), (1,),), None,
ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p)
return f(fmt, ap)
def libvlc_new(argc, argv):
'''Create and initialize a libvlc instance.
This functions accept a list of "command line" arguments similar to the
main(). These arguments affect the LibVLC instance default configuration.
@param argc: the number of arguments (should be 0).
@param argv: list of arguments (should be NULL).
@return: the libvlc instance or NULL in case of error.
@version Arguments are meant to be passed from the command line to LibVLC, just like VLC media player does. The list of valid arguments depends on the LibVLC version, the operating system and platform, and set of available LibVLC plugins. Invalid or unsupported arguments will cause the function to fail (i.e. return NULL). Also, some arguments may alter the behaviour or otherwise interfere with other LibVLC functions. @warning There is absolutely no warranty or promise of forward, backward and cross-platform compatibility with regards to L{libvlc_new}() arguments. We recommend that you do not use them, other than when debugging.
'''
f = _Cfunctions.get('libvlc_new', None) or \
_Cfunction('libvlc_new', ((1,), (1,),), class_result(Instance),
ctypes.c_void_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p))
return f(argc, argv)
def libvlc_release(p_instance):
'''Decrement the reference count of a libvlc instance, and destroy it
if it reaches zero.
@param p_instance: the instance to destroy.
'''
f = _Cfunctions.get('libvlc_release', None) or \
_Cfunction('libvlc_release', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_retain(p_instance):
'''Increments the reference count of a libvlc instance.
The initial reference count is 1 after L{libvlc_new}() returns.
@param p_instance: the instance to reference.
'''
f = _Cfunctions.get('libvlc_retain', None) or \
_Cfunction('libvlc_retain', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_add_intf(p_instance, name):
'''Try to start a user interface for the libvlc instance.
@param p_instance: the instance.
@param name: interface name, or NULL for default.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_add_intf', None) or \
_Cfunction('libvlc_add_intf', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, name)
def libvlc_set_user_agent(p_instance, name, http):
'''Sets the application name. LibVLC passes this as the user agent string
when a protocol requires it.
@param p_instance: LibVLC instance.
@param name: human-readable application name, e.g. "FooBar player 1.2.3".
@param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0".
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_set_user_agent', None) or \
_Cfunction('libvlc_set_user_agent', ((1,), (1,), (1,),), None,
None, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, name, http)
def libvlc_get_version():
'''Retrieve libvlc version.
Example: "1.1.0-git The Luggage".
@return: a string containing the libvlc version.
'''
f = _Cfunctions.get('libvlc_get_version', None) or \
_Cfunction('libvlc_get_version', (), None,
ctypes.c_char_p)
return f()
def libvlc_get_compiler():
'''Retrieve libvlc compiler version.
Example: "gcc version 4.2.3 (Ubuntu 4.2.3-2ubuntu6)".
@return: a string containing the libvlc compiler version.
'''
f = _Cfunctions.get('libvlc_get_compiler', None) or \
_Cfunction('libvlc_get_compiler', (), None,
ctypes.c_char_p)
return f()
def libvlc_get_changeset():
'''Retrieve libvlc changeset.
Example: "aa9bce0bc4".
@return: a string containing the libvlc changeset.
'''
f = _Cfunctions.get('libvlc_get_changeset', None) or \
_Cfunction('libvlc_get_changeset', (), None,
ctypes.c_char_p)
return f()
def libvlc_free(ptr):
'''Frees an heap allocation returned by a LibVLC function.
If you know you're using the same underlying C run-time as the LibVLC
implementation, then you can call ANSI C free() directly instead.
@param ptr: the pointer.
'''
f = _Cfunctions.get('libvlc_free', None) or \
_Cfunction('libvlc_free', ((1,),), None,
None, ctypes.c_void_p)
return f(ptr)
def libvlc_event_attach(p_event_manager, i_event_type, f_callback, user_data):
'''Register for an event notification.
@param p_event_manager: the event manager to which you want to attach to. Generally it is obtained by vlc_my_object_event_manager() where my_object is the object you want to listen to.
@param i_event_type: the desired event to which we want to listen.
@param f_callback: the function to call when i_event_type occurs.
@param user_data: user provided data to carry with the event.
@return: 0 on success, ENOMEM on error.
'''
f = _Cfunctions.get('libvlc_event_attach', None) or \
_Cfunction('libvlc_event_attach', ((1,), (1,), (1,), (1,),), None,
ctypes.c_int, EventManager, ctypes.c_uint, Callback, ctypes.c_void_p)
return f(p_event_manager, i_event_type, f_callback, user_data)
def libvlc_event_detach(p_event_manager, i_event_type, f_callback, p_user_data):
'''Unregister an event notification.
@param p_event_manager: the event manager.
@param i_event_type: the desired event to which we want to unregister.
@param f_callback: the function to call when i_event_type occurs.
@param p_user_data: user provided data to carry with the event.
'''
f = _Cfunctions.get('libvlc_event_detach', None) or \
_Cfunction('libvlc_event_detach', ((1,), (1,), (1,), (1,),), None,
None, EventManager, ctypes.c_uint, Callback, ctypes.c_void_p)
return f(p_event_manager, i_event_type, f_callback, p_user_data)
def libvlc_event_type_name(event_type):
'''Get an event's type name.
@param event_type: the desired event.
'''
f = _Cfunctions.get('libvlc_event_type_name', None) or \
_Cfunction('libvlc_event_type_name', ((1,),), None,
ctypes.c_char_p, ctypes.c_uint)
return f(event_type)
def libvlc_log_get_context(ctx):
'''Gets debugging informations about a log message: the name of the VLC module
emitting the message and the message location within the source code.
The returned module name and file name will be NULL if unknown.
The returned line number will similarly be zero if unknown.
@param ctx: message context (as passed to the @ref libvlc_log_cb callback).
@return: module module name storage (or NULL), file source code file name storage (or NULL), line source code file line number storage (or NULL).
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_get_context', None) or \
_Cfunction('libvlc_log_get_context', ((1,), (2,), (2,), (2,),), None,
None, Log_ptr, ListPOINTER(ctypes.c_char_p), ListPOINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_uint))
return f(ctx)
def libvlc_log_get_object(ctx, id):
'''Gets VLC object informations about a log message: the type name of the VLC
object emitting the message, the object header if any and a temporaly-unique
object identifier. These informations are mainly meant for B{manual}
troubleshooting.
The returned type name may be "generic" if unknown, but it cannot be NULL.
The returned header will be NULL if unset; in current versions, the header
is used to distinguish for VLM inputs.
The returned object ID will be zero if the message is not associated with
any VLC object.
@param ctx: message context (as passed to the @ref libvlc_log_cb callback).
@return: name object name storage (or NULL), header object header (or NULL), line source code file line number storage (or NULL).
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_get_object', None) or \
_Cfunction('libvlc_log_get_object', ((1,), (2,), (2,), (1,),), None,
None, Log_ptr, ListPOINTER(ctypes.c_char_p), ListPOINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_uint))
return f(ctx, id)
def libvlc_log_unset(p_instance):
'''Unsets the logging callback for a LibVLC instance. This is rarely needed:
the callback is implicitly unset when the instance is destroyed.
This function will wait for any pending callbacks invocation to complete
(causing a deadlock if called from within the callback).
@param p_instance: libvlc instance.
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_unset', None) or \
_Cfunction('libvlc_log_unset', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_log_set(cb, data, p_instance):
'''Sets the logging callback for a LibVLC instance.
This function is thread-safe: it will wait for any pending callbacks
invocation to complete.
@param cb: callback function pointer.
@param data: opaque data pointer for the callback function @note Some log messages (especially debug) are emitted by LibVLC while is being initialized. These messages cannot be captured with this interface. @warning A deadlock may occur if this function is called from the callback.
@param p_instance: libvlc instance.
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_set', None) or \
_Cfunction('libvlc_log_set', ((1,), (1,), (1,),), None,
None, Instance, LogCb, ctypes.c_void_p)
return f(cb, data, p_instance)
def libvlc_log_set_file(p_instance, stream):
'''Sets up logging to a file.
@param p_instance: libvlc instance.
@param stream: FILE pointer opened for writing (the FILE pointer must remain valid until L{libvlc_log_unset}()).
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_set_file', None) or \
_Cfunction('libvlc_log_set_file', ((1,), (1,),), None,
None, Instance, FILE_ptr)
return f(p_instance, stream)
def libvlc_module_description_list_release(p_list):
'''Release a list of module descriptions.
@param p_list: the list to be released.
'''
f = _Cfunctions.get('libvlc_module_description_list_release', None) or \
_Cfunction('libvlc_module_description_list_release', ((1,),), None,
None, ctypes.POINTER(ModuleDescription))
return f(p_list)
def libvlc_audio_filter_list_get(p_instance):
'''Returns a list of audio filters that are available.
@param p_instance: libvlc instance.
@return: a list of module descriptions. It should be freed with L{libvlc_module_description_list_release}(). In case of an error, NULL is returned. See L{ModuleDescription} See L{libvlc_module_description_list_release}.
'''
f = _Cfunctions.get('libvlc_audio_filter_list_get', None) or \
_Cfunction('libvlc_audio_filter_list_get', ((1,),), None,
ctypes.POINTER(ModuleDescription), Instance)
return f(p_instance)
def libvlc_video_filter_list_get(p_instance):
'''Returns a list of video filters that are available.
@param p_instance: libvlc instance.
@return: a list of module descriptions. It should be freed with L{libvlc_module_description_list_release}(). In case of an error, NULL is returned. See L{ModuleDescription} See L{libvlc_module_description_list_release}.
'''
f = _Cfunctions.get('libvlc_video_filter_list_get', None) or \
_Cfunction('libvlc_video_filter_list_get', ((1,),), None,
ctypes.POINTER(ModuleDescription), Instance)
return f(p_instance)
def libvlc_clock():
'''Return the current time as defined by LibVLC. The unit is the microsecond.
Time increases monotonically (regardless of time zone changes and RTC
adjustements).
The origin is arbitrary but consistent across the whole system
(e.g. the system uptim, the time since the system was booted).
@note: On systems that support it, the POSIX monotonic clock is used.
'''
f = _Cfunctions.get('libvlc_clock', None) or \
_Cfunction('libvlc_clock', (), None,
ctypes.c_int64)
return f()
def libvlc_media_new_location(p_instance, psz_mrl):
'''Create a media with a certain given media resource location,
for instance a valid URL.
@note: To refer to a local file with this function,
the file://... URI syntax B{must} be used (see IETF RFC3986).
We recommend using L{libvlc_media_new_path}() instead when dealing with
local files.
See L{libvlc_media_release}.
@param p_instance: the instance.
@param psz_mrl: the media location.
@return: the newly created media or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_new_location', None) or \
_Cfunction('libvlc_media_new_location', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, psz_mrl)
def libvlc_media_new_path(p_instance, path):
'''Create a media for a certain file path.
See L{libvlc_media_release}.
@param p_instance: the instance.
@param path: local filesystem path.
@return: the newly created media or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_new_path', None) or \
_Cfunction('libvlc_media_new_path', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, path)
def libvlc_media_new_fd(p_instance, fd):
'''Create a media for an already open file descriptor.
The file descriptor shall be open for reading (or reading and writing).
Regular file descriptors, pipe read descriptors and character device
descriptors (including TTYs) are supported on all platforms.
Block device descriptors are supported where available.
Directory descriptors are supported on systems that provide fdopendir().
Sockets are supported on all platforms where they are file descriptors,
i.e. all except Windows.
@note: This library will B{not} automatically close the file descriptor
under any circumstance. Nevertheless, a file descriptor can usually only be
rendered once in a media player. To render it a second time, the file
descriptor should probably be rewound to the beginning with lseek().
See L{libvlc_media_release}.
@param p_instance: the instance.
@param fd: open file descriptor.
@return: the newly created media or NULL on error.
@version: LibVLC 1.1.5 and later.
'''
f = _Cfunctions.get('libvlc_media_new_fd', None) or \
_Cfunction('libvlc_media_new_fd', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_int)
return f(p_instance, fd)
def libvlc_media_new_as_node(p_instance, psz_name):
'''Create a media as an empty node with a given name.
See L{libvlc_media_release}.
@param p_instance: the instance.
@param psz_name: the name of the node.
@return: the new empty media or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_new_as_node', None) or \
_Cfunction('libvlc_media_new_as_node', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_media_add_option(p_md, psz_options):
'''Add an option to the media.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
@note: The options are listed in 'vlc --long-help' from the command line,
e.g. "-sout-all". Keep in mind that available options and their semantics
vary across LibVLC versions and builds.
@warning: Not all options affects L{Media} objects:
Specifically, due to architectural issues most audio and video options,
such as text renderer options, have no effects on an individual media.
These options must be set through L{libvlc_new}() instead.
@param p_md: the media descriptor.
@param psz_options: the options (as a string).
'''
f = _Cfunctions.get('libvlc_media_add_option', None) or \
_Cfunction('libvlc_media_add_option', ((1,), (1,),), None,
None, Media, ctypes.c_char_p)
return f(p_md, psz_options)
def libvlc_media_add_option_flag(p_md, psz_options, i_flags):
'''Add an option to the media with configurable flags.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
The options are detailed in vlc --long-help, for instance
"--sout-all". Note that all options are not usable on medias:
specifically, due to architectural issues, video-related options
such as text renderer options cannot be set on a single media. They
must be set on the whole libvlc instance instead.
@param p_md: the media descriptor.
@param psz_options: the options (as a string).
@param i_flags: the flags for this option.
'''
f = _Cfunctions.get('libvlc_media_add_option_flag', None) or \
_Cfunction('libvlc_media_add_option_flag', ((1,), (1,), (1,),), None,
None, Media, ctypes.c_char_p, ctypes.c_uint)
return f(p_md, psz_options, i_flags)
def libvlc_media_retain(p_md):
'''Retain a reference to a media descriptor object (libvlc_media_t). Use
L{libvlc_media_release}() to decrement the reference count of a
media descriptor object.
@param p_md: the media descriptor.
'''
f = _Cfunctions.get('libvlc_media_retain', None) or \
_Cfunction('libvlc_media_retain', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_release(p_md):
'''Decrement the reference count of a media descriptor object. If the
reference count is 0, then L{libvlc_media_release}() will release the
media descriptor object. It will send out an libvlc_MediaFreed event
to all listeners. If the media descriptor object has been released it
should not be used again.
@param p_md: the media descriptor.
'''
f = _Cfunctions.get('libvlc_media_release', None) or \
_Cfunction('libvlc_media_release', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_get_mrl(p_md):
'''Get the media resource locator (mrl) from a media descriptor object.
@param p_md: a media descriptor object.
@return: string with mrl of media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_get_mrl', None) or \
_Cfunction('libvlc_media_get_mrl', ((1,),), string_result,
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_duplicate(p_md):
'''Duplicate a media descriptor object.
@param p_md: a media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_duplicate', None) or \
_Cfunction('libvlc_media_duplicate', ((1,),), class_result(Media),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_get_meta(p_md, e_meta):
'''Read the meta of the media.
If the media has not yet been parsed this will return NULL.
This methods automatically calls L{libvlc_media_parse_async}(), so after calling
it you may receive a libvlc_MediaMetaChanged event. If you prefer a synchronous
version ensure that you call L{libvlc_media_parse}() before get_meta().
See L{libvlc_media_parse}
See L{libvlc_media_parse_async}
See libvlc_MediaMetaChanged.
@param p_md: the media descriptor.
@param e_meta: the meta to read.
@return: the media's meta.
'''
f = _Cfunctions.get('libvlc_media_get_meta', None) or \
_Cfunction('libvlc_media_get_meta', ((1,), (1,),), string_result,
ctypes.c_void_p, Media, Meta)
return f(p_md, e_meta)
def libvlc_media_set_meta(p_md, e_meta, psz_value):
'''Set the meta of the media (this function will not save the meta, call
L{libvlc_media_save_meta} in order to save the meta).
@param p_md: the media descriptor.
@param e_meta: the meta to write.
@param psz_value: the media's meta.
'''
f = _Cfunctions.get('libvlc_media_set_meta', None) or \
_Cfunction('libvlc_media_set_meta', ((1,), (1,), (1,),), None,
None, Media, Meta, ctypes.c_char_p)
return f(p_md, e_meta, psz_value)
def libvlc_media_save_meta(p_md):
'''Save the meta previously set.
@param p_md: the media desriptor.
@return: true if the write operation was successful.
'''
f = _Cfunctions.get('libvlc_media_save_meta', None) or \
_Cfunction('libvlc_media_save_meta', ((1,),), None,
ctypes.c_int, Media)
return f(p_md)
def libvlc_media_get_state(p_md):
'''Get current state of media descriptor object. Possible media states
are defined in libvlc_structures.c ( libvlc_NothingSpecial=0,
libvlc_Opening, libvlc_Buffering, libvlc_Playing, libvlc_Paused,
libvlc_Stopped, libvlc_Ended,
libvlc_Error).
See libvlc_state_t.
@param p_md: a media descriptor object.
@return: state of media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_get_state', None) or \
_Cfunction('libvlc_media_get_state', ((1,),), None,
State, Media)
return f(p_md)
def libvlc_media_get_stats(p_md, p_stats):
'''Get the current statistics about the media.
@param p_md:: media descriptor object.
@param p_stats:: structure that contain the statistics about the media (this structure must be allocated by the caller).
@return: true if the statistics are available, false otherwise \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_get_stats', None) or \
_Cfunction('libvlc_media_get_stats', ((1,), (1,),), None,
ctypes.c_int, Media, ctypes.POINTER(MediaStats))
return f(p_md, p_stats)
def libvlc_media_subitems(p_md):
'''Get subitems of media descriptor object. This will increment
the reference count of supplied media descriptor object. Use
L{libvlc_media_list_release}() to decrement the reference counting.
@param p_md: media descriptor object.
@return: list of media descriptor subitems or NULL.
'''
f = _Cfunctions.get('libvlc_media_subitems', None) or \
_Cfunction('libvlc_media_subitems', ((1,),), class_result(MediaList),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_event_manager(p_md):
'''Get event manager from media descriptor object.
NOTE: this function doesn't increment reference counting.
@param p_md: a media descriptor object.
@return: event manager object.
'''
f = _Cfunctions.get('libvlc_media_event_manager', None) or \
_Cfunction('libvlc_media_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_get_duration(p_md):
'''Get duration (in ms) of media descriptor object item.
@param p_md: media descriptor object.
@return: duration of media item or -1 on error.
'''
f = _Cfunctions.get('libvlc_media_get_duration', None) or \
_Cfunction('libvlc_media_get_duration', ((1,),), None,
ctypes.c_longlong, Media)
return f(p_md)
def libvlc_media_parse(p_md):
'''Parse a media.
This fetches (local) meta data and tracks information.
The method is synchronous.
See L{libvlc_media_parse_async}
See L{libvlc_media_get_meta}
See libvlc_media_get_tracks_info.
@param p_md: media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_parse', None) or \
_Cfunction('libvlc_media_parse', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_parse_async(p_md):
'''Parse a media.
This fetches (local) meta data and tracks information.
The method is the asynchronous of L{libvlc_media_parse}().
To track when this is over you can listen to libvlc_MediaParsedChanged
event. However if the media was already parsed you will not receive this
event.
See L{libvlc_media_parse}
See libvlc_MediaParsedChanged
See L{libvlc_media_get_meta}
See libvlc_media_get_tracks_info.
@param p_md: media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_parse_async', None) or \
_Cfunction('libvlc_media_parse_async', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_is_parsed(p_md):
'''Get Parsed status for media descriptor object.
See libvlc_MediaParsedChanged.
@param p_md: media descriptor object.
@return: true if media object has been parsed otherwise it returns false \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_is_parsed', None) or \
_Cfunction('libvlc_media_is_parsed', ((1,),), None,
ctypes.c_int, Media)
return f(p_md)
def libvlc_media_set_user_data(p_md, p_new_user_data):
'''Sets media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
@param p_new_user_data: pointer to user data.
'''
f = _Cfunctions.get('libvlc_media_set_user_data', None) or \
_Cfunction('libvlc_media_set_user_data', ((1,), (1,),), None,
None, Media, ctypes.c_void_p)
return f(p_md, p_new_user_data)
def libvlc_media_get_user_data(p_md):
'''Get media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_get_user_data', None) or \
_Cfunction('libvlc_media_get_user_data', ((1,),), None,
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_tracks_get(p_md, tracks):
'''Get media descriptor's elementary streams description
Note, you need to call L{libvlc_media_parse}() or play the media at least once
before calling this function.
Not doing this will result in an empty array.
@param p_md: media descriptor object.
@param tracks: address to store an allocated array of Elementary Streams descriptions (must be freed with L{libvlc_media_tracks_release}.
@return: the number of Elementary Streams (zero on error).
@version: LibVLC 2.1.0 and later.
'''
f = _Cfunctions.get('libvlc_media_tracks_get', None) or \
_Cfunction('libvlc_media_tracks_get', ((1,), (1,),), None,
ctypes.c_uint, Media, ctypes.POINTER(ctypes.POINTER(MediaTrack)))
return f(p_md, tracks)
def libvlc_media_tracks_release(p_tracks, i_count):
'''Release media descriptor's elementary streams description array.
@param p_tracks: tracks info array to release.
@param i_count: number of elements in the array.
@version: LibVLC 2.1.0 and later.
'''
f = _Cfunctions.get('libvlc_media_tracks_release', None) or \
_Cfunction('libvlc_media_tracks_release', ((1,), (1,),), None,
None, ctypes.POINTER(MediaTrack), ctypes.c_uint)
return f(p_tracks, i_count)
def libvlc_media_discoverer_new_from_name(p_inst, psz_name):
'''Discover media service by name.
@param p_inst: libvlc instance.
@param psz_name: service name.
@return: media discover object or NULL in case of error.
'''
f = _Cfunctions.get('libvlc_media_discoverer_new_from_name', None) or \
_Cfunction('libvlc_media_discoverer_new_from_name', ((1,), (1,),), class_result(MediaDiscoverer),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_inst, psz_name)
def libvlc_media_discoverer_release(p_mdis):
'''Release media discover object. If the reference count reaches 0, then
the object will be released.
@param p_mdis: media service discover object.
'''
f = _Cfunctions.get('libvlc_media_discoverer_release', None) or \
_Cfunction('libvlc_media_discoverer_release', ((1,),), None,
None, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_localized_name(p_mdis):
'''Get media service discover object its localized name.
@param p_mdis: media discover object.
@return: localized name.
'''
f = _Cfunctions.get('libvlc_media_discoverer_localized_name', None) or \
_Cfunction('libvlc_media_discoverer_localized_name', ((1,),), string_result,
ctypes.c_void_p, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_media_list(p_mdis):
'''Get media service discover media list.
@param p_mdis: media service discover object.
@return: list of media items.
'''
f = _Cfunctions.get('libvlc_media_discoverer_media_list', None) or \
_Cfunction('libvlc_media_discoverer_media_list', ((1,),), class_result(MediaList),
ctypes.c_void_p, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_event_manager(p_mdis):
'''Get event manager from media service discover object.
@param p_mdis: media service discover object.
@return: event manager object.
'''
f = _Cfunctions.get('libvlc_media_discoverer_event_manager', None) or \
_Cfunction('libvlc_media_discoverer_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_is_running(p_mdis):
'''Query if media service discover object is running.
@param p_mdis: media service discover object.
@return: true if running, false if not \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_discoverer_is_running', None) or \
_Cfunction('libvlc_media_discoverer_is_running', ((1,),), None,
ctypes.c_int, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_library_new(p_instance):
'''Create an new Media Library object.
@param p_instance: the libvlc instance.
@return: a new object or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_library_new', None) or \
_Cfunction('libvlc_media_library_new', ((1,),), class_result(MediaLibrary),
ctypes.c_void_p, Instance)
return f(p_instance)
def libvlc_media_library_release(p_mlib):
'''Release media library object. This functions decrements the
reference count of the media library object. If it reaches 0,
then the object will be released.
@param p_mlib: media library object.
'''
f = _Cfunctions.get('libvlc_media_library_release', None) or \
_Cfunction('libvlc_media_library_release', ((1,),), None,
None, MediaLibrary)
return f(p_mlib)
def libvlc_media_library_retain(p_mlib):
'''Retain a reference to a media library object. This function will
increment the reference counting for this object. Use
L{libvlc_media_library_release}() to decrement the reference count.
@param p_mlib: media library object.
'''
f = _Cfunctions.get('libvlc_media_library_retain', None) or \
_Cfunction('libvlc_media_library_retain', ((1,),), None,
None, MediaLibrary)
return f(p_mlib)
def libvlc_media_library_load(p_mlib):
'''Load media library.
@param p_mlib: media library object.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_media_library_load', None) or \
_Cfunction('libvlc_media_library_load', ((1,),), None,
ctypes.c_int, MediaLibrary)
return f(p_mlib)
def libvlc_media_library_media_list(p_mlib):
'''Get media library subitems.
@param p_mlib: media library object.
@return: media list subitems.
'''
f = _Cfunctions.get('libvlc_media_library_media_list', None) or \
_Cfunction('libvlc_media_library_media_list', ((1,),), class_result(MediaList),
ctypes.c_void_p, MediaLibrary)
return f(p_mlib)
def libvlc_media_list_new(p_instance):
'''Create an empty media list.
@param p_instance: libvlc instance.
@return: empty media list, or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_list_new', None) or \
_Cfunction('libvlc_media_list_new', ((1,),), class_result(MediaList),
ctypes.c_void_p, Instance)
return f(p_instance)
def libvlc_media_list_release(p_ml):
'''Release media list created with L{libvlc_media_list_new}().
@param p_ml: a media list created with L{libvlc_media_list_new}().
'''
f = _Cfunctions.get('libvlc_media_list_release', None) or \
_Cfunction('libvlc_media_list_release', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_retain(p_ml):
'''Retain reference to a media list.
@param p_ml: a media list created with L{libvlc_media_list_new}().
'''
f = _Cfunctions.get('libvlc_media_list_retain', None) or \
_Cfunction('libvlc_media_list_retain', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_set_media(p_ml, p_md):
'''Associate media instance with this media list instance.
If another media instance was present it will be released.
The L{libvlc_media_list_lock} should NOT be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: media instance to add.
'''
f = _Cfunctions.get('libvlc_media_list_set_media', None) or \
_Cfunction('libvlc_media_list_set_media', ((1,), (1,),), None,
None, MediaList, Media)
return f(p_ml, p_md)
def libvlc_media_list_media(p_ml):
'''Get media instance from this media list instance. This action will increase
the refcount on the media instance.
The L{libvlc_media_list_lock} should NOT be held upon entering this function.
@param p_ml: a media list instance.
@return: media instance.
'''
f = _Cfunctions.get('libvlc_media_list_media', None) or \
_Cfunction('libvlc_media_list_media', ((1,),), class_result(Media),
ctypes.c_void_p, MediaList)
return f(p_ml)
def libvlc_media_list_add_media(p_ml, p_md):
'''Add media instance to media list
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: a media instance.
@return: 0 on success, -1 if the media list is read-only.
'''
f = _Cfunctions.get('libvlc_media_list_add_media', None) or \
_Cfunction('libvlc_media_list_add_media', ((1,), (1,),), None,
ctypes.c_int, MediaList, Media)
return f(p_ml, p_md)
def libvlc_media_list_insert_media(p_ml, p_md, i_pos):
'''Insert media instance in media list on a position
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: a media instance.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the media list is read-only.
'''
f = _Cfunctions.get('libvlc_media_list_insert_media', None) or \
_Cfunction('libvlc_media_list_insert_media', ((1,), (1,), (1,),), None,
ctypes.c_int, MediaList, Media, ctypes.c_int)
return f(p_ml, p_md, i_pos)
def libvlc_media_list_remove_index(p_ml, i_pos):
'''Remove media instance from media list on a position
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the list is read-only or the item was not found.
'''
f = _Cfunctions.get('libvlc_media_list_remove_index', None) or \
_Cfunction('libvlc_media_list_remove_index', ((1,), (1,),), None,
ctypes.c_int, MediaList, ctypes.c_int)
return f(p_ml, i_pos)
def libvlc_media_list_count(p_ml):
'''Get count on media list items
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@return: number of items in media list.
'''
f = _Cfunctions.get('libvlc_media_list_count', None) or \
_Cfunction('libvlc_media_list_count', ((1,),), None,
ctypes.c_int, MediaList)
return f(p_ml)
def libvlc_media_list_item_at_index(p_ml, i_pos):
'''List media instance in media list at a position
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param i_pos: position in array where to insert.
@return: media instance at position i_pos, or NULL if not found. In case of success, L{libvlc_media_retain}() is called to increase the refcount on the media.
'''
f = _Cfunctions.get('libvlc_media_list_item_at_index', None) or \
_Cfunction('libvlc_media_list_item_at_index', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, MediaList, ctypes.c_int)
return f(p_ml, i_pos)
def libvlc_media_list_index_of_item(p_ml, p_md):
'''Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: media instance.
@return: position of media instance or -1 if media not found.
'''
f = _Cfunctions.get('libvlc_media_list_index_of_item', None) or \
_Cfunction('libvlc_media_list_index_of_item', ((1,), (1,),), None,
ctypes.c_int, MediaList, Media)
return f(p_ml, p_md)
def libvlc_media_list_is_readonly(p_ml):
'''This indicates if this media list is read-only from a user point of view.
@param p_ml: media list instance.
@return: 1 on readonly, 0 on readwrite \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_list_is_readonly', None) or \
_Cfunction('libvlc_media_list_is_readonly', ((1,),), None,
ctypes.c_int, MediaList)
return f(p_ml)
def libvlc_media_list_lock(p_ml):
'''Get lock on media list items.
@param p_ml: a media list instance.
'''
f = _Cfunctions.get('libvlc_media_list_lock', None) or \
_Cfunction('libvlc_media_list_lock', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_unlock(p_ml):
'''Release lock on media list items
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
'''
f = _Cfunctions.get('libvlc_media_list_unlock', None) or \
_Cfunction('libvlc_media_list_unlock', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_event_manager(p_ml):
'''Get libvlc_event_manager from this media list instance.
The p_event_manager is immutable, so you don't have to hold the lock.
@param p_ml: a media list instance.
@return: libvlc_event_manager.
'''
f = _Cfunctions.get('libvlc_media_list_event_manager', None) or \
_Cfunction('libvlc_media_list_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaList)
return f(p_ml)
def libvlc_media_list_player_new(p_instance):
'''Create new media_list_player.
@param p_instance: libvlc instance.
@return: media list player instance or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_list_player_new', None) or \
_Cfunction('libvlc_media_list_player_new', ((1,),), class_result(MediaListPlayer),
ctypes.c_void_p, Instance)
return f(p_instance)
def libvlc_media_list_player_release(p_mlp):
'''Release a media_list_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{libvlc_media_list_player_release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_release', None) or \
_Cfunction('libvlc_media_list_player_release', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_retain(p_mlp):
'''Retain a reference to a media player list object. Use
L{libvlc_media_list_player_release}() to decrement reference count.
@param p_mlp: media player list object.
'''
f = _Cfunctions.get('libvlc_media_list_player_retain', None) or \
_Cfunction('libvlc_media_list_player_retain', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_event_manager(p_mlp):
'''Return the event manager of this media_list_player.
@param p_mlp: media list player instance.
@return: the event manager.
'''
f = _Cfunctions.get('libvlc_media_list_player_event_manager', None) or \
_Cfunction('libvlc_media_list_player_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_set_media_player(p_mlp, p_mi):
'''Replace media player in media_list_player with this instance.
@param p_mlp: media list player instance.
@param p_mi: media player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_set_media_player', None) or \
_Cfunction('libvlc_media_list_player_set_media_player', ((1,), (1,),), None,
None, MediaListPlayer, MediaPlayer)
return f(p_mlp, p_mi)
def libvlc_media_list_player_set_media_list(p_mlp, p_mlist):
'''Set the media list associated with the player.
@param p_mlp: media list player instance.
@param p_mlist: list of media.
'''
f = _Cfunctions.get('libvlc_media_list_player_set_media_list', None) or \
_Cfunction('libvlc_media_list_player_set_media_list', ((1,), (1,),), None,
None, MediaListPlayer, MediaList)
return f(p_mlp, p_mlist)
def libvlc_media_list_player_play(p_mlp):
'''Play media list.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_play', None) or \
_Cfunction('libvlc_media_list_player_play', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_pause(p_mlp):
'''Toggle pause (or resume) media list.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_pause', None) or \
_Cfunction('libvlc_media_list_player_pause', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_is_playing(p_mlp):
'''Is media list playing?
@param p_mlp: media list player instance.
@return: true for playing and false for not playing \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_list_player_is_playing', None) or \
_Cfunction('libvlc_media_list_player_is_playing', ((1,),), None,
ctypes.c_int, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_get_state(p_mlp):
'''Get current libvlc_state of media list player.
@param p_mlp: media list player instance.
@return: libvlc_state_t for media list player.
'''
f = _Cfunctions.get('libvlc_media_list_player_get_state', None) or \
_Cfunction('libvlc_media_list_player_get_state', ((1,),), None,
State, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_play_item_at_index(p_mlp, i_index):
'''Play media list item at position index.
@param p_mlp: media list player instance.
@param i_index: index in media list to play.
@return: 0 upon success -1 if the item wasn't found.
'''
f = _Cfunctions.get('libvlc_media_list_player_play_item_at_index', None) or \
_Cfunction('libvlc_media_list_player_play_item_at_index', ((1,), (1,),), None,
ctypes.c_int, MediaListPlayer, ctypes.c_int)
return f(p_mlp, i_index)
def libvlc_media_list_player_play_item(p_mlp, p_md):
'''Play the given media item.
@param p_mlp: media list player instance.
@param p_md: the media instance.
@return: 0 upon success, -1 if the media is not part of the media list.
'''
f = _Cfunctions.get('libvlc_media_list_player_play_item', None) or \
_Cfunction('libvlc_media_list_player_play_item', ((1,), (1,),), None,
ctypes.c_int, MediaListPlayer, Media)
return f(p_mlp, p_md)
def libvlc_media_list_player_stop(p_mlp):
'''Stop playing media list.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_stop', None) or \
_Cfunction('libvlc_media_list_player_stop', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_next(p_mlp):
'''Play next item from media list.
@param p_mlp: media list player instance.
@return: 0 upon success -1 if there is no next item.
'''
f = _Cfunctions.get('libvlc_media_list_player_next', None) or \
_Cfunction('libvlc_media_list_player_next', ((1,),), None,
ctypes.c_int, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_previous(p_mlp):
'''Play previous item from media list.
@param p_mlp: media list player instance.
@return: 0 upon success -1 if there is no previous item.
'''
f = _Cfunctions.get('libvlc_media_list_player_previous', None) or \
_Cfunction('libvlc_media_list_player_previous', ((1,),), None,
ctypes.c_int, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_set_playback_mode(p_mlp, e_mode):
'''Sets the playback mode for the playlist.
@param p_mlp: media list player instance.
@param e_mode: playback mode specification.
'''
f = _Cfunctions.get('libvlc_media_list_player_set_playback_mode', None) or \
_Cfunction('libvlc_media_list_player_set_playback_mode', ((1,), (1,),), None,
None, MediaListPlayer, PlaybackMode)
return f(p_mlp, e_mode)
def libvlc_media_player_new(p_libvlc_instance):
'''Create an empty Media Player object.
@param p_libvlc_instance: the libvlc instance in which the Media Player should be created.
@return: a new media player object, or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_player_new', None) or \
_Cfunction('libvlc_media_player_new', ((1,),), class_result(MediaPlayer),
ctypes.c_void_p, Instance)
return f(p_libvlc_instance)
def libvlc_media_player_new_from_media(p_md):
'''Create a Media Player object from a Media.
@param p_md: the media. Afterwards the p_md can be safely destroyed.
@return: a new media player object, or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_player_new_from_media', None) or \
_Cfunction('libvlc_media_player_new_from_media', ((1,),), class_result(MediaPlayer),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_player_release(p_mi):
'''Release a media_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{libvlc_media_player_release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
@param p_mi: the Media Player to free.
'''
f = _Cfunctions.get('libvlc_media_player_release', None) or \
_Cfunction('libvlc_media_player_release', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_retain(p_mi):
'''Retain a reference to a media player object. Use
L{libvlc_media_player_release}() to decrement reference count.
@param p_mi: media player object.
'''
f = _Cfunctions.get('libvlc_media_player_retain', None) or \
_Cfunction('libvlc_media_player_retain', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_media(p_mi, p_md):
'''Set the media that will be used by the media_player. If any,
previous md will be released.
@param p_mi: the Media Player.
@param p_md: the Media. Afterwards the p_md can be safely destroyed.
'''
f = _Cfunctions.get('libvlc_media_player_set_media', None) or \
_Cfunction('libvlc_media_player_set_media', ((1,), (1,),), None,
None, MediaPlayer, Media)
return f(p_mi, p_md)
def libvlc_media_player_get_media(p_mi):
'''Get the media used by the media_player.
@param p_mi: the Media Player.
@return: the media associated with p_mi, or NULL if no media is associated.
'''
f = _Cfunctions.get('libvlc_media_player_get_media', None) or \
_Cfunction('libvlc_media_player_get_media', ((1,),), class_result(Media),
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_media_player_event_manager(p_mi):
'''Get the Event Manager from which the media player send event.
@param p_mi: the Media Player.
@return: the event manager associated with p_mi.
'''
f = _Cfunctions.get('libvlc_media_player_event_manager', None) or \
_Cfunction('libvlc_media_player_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_media_player_is_playing(p_mi):
'''is_playing.
@param p_mi: the Media Player.
@return: 1 if the media player is playing, 0 otherwise \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_is_playing', None) or \
_Cfunction('libvlc_media_player_is_playing', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_play(p_mi):
'''Play.
@param p_mi: the Media Player.
@return: 0 if playback started (and was already started), or -1 on error.
'''
f = _Cfunctions.get('libvlc_media_player_play', None) or \
_Cfunction('libvlc_media_player_play', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_pause(mp, do_pause):
'''Pause or resume (no effect if there is no media).
@param mp: the Media Player.
@param do_pause: play/resume if zero, pause if non-zero.
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_media_player_set_pause', None) or \
_Cfunction('libvlc_media_player_set_pause', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(mp, do_pause)
def libvlc_media_player_pause(p_mi):
'''Toggle pause (no effect if there is no media).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_pause', None) or \
_Cfunction('libvlc_media_player_pause', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_stop(p_mi):
'''Stop (no effect if there is no media).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_stop', None) or \
_Cfunction('libvlc_media_player_stop', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_video_set_callbacks(mp, lock, unlock, display, opaque):
'''Set callbacks and private data to render decoded video to a custom area
in memory.
Use L{libvlc_video_set_format}() or L{libvlc_video_set_format_callbacks}()
to configure the decoded format.
@param mp: the media player.
@param lock: callback to lock video memory (must not be NULL).
@param unlock: callback to unlock video memory (or NULL if not needed).
@param display: callback to display video (or NULL if not needed).
@param opaque: private pointer for the three callbacks (as first parameter).
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_video_set_callbacks', None) or \
_Cfunction('libvlc_video_set_callbacks', ((1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, VideoLockCb, VideoUnlockCb, VideoDisplayCb, ctypes.c_void_p)
return f(mp, lock, unlock, display, opaque)
def libvlc_video_set_format(mp, chroma, width, height, pitch):
'''Set decoded video chroma and dimensions.
This only works in combination with L{libvlc_video_set_callbacks}(),
and is mutually exclusive with L{libvlc_video_set_format_callbacks}().
@param mp: the media player.
@param chroma: a four-characters string identifying the chroma (e.g. "RV32" or "YUYV").
@param width: pixel width.
@param height: pixel height.
@param pitch: line pitch (in bytes).
@version: LibVLC 1.1.1 or later.
@bug: All pixel planes are expected to have the same pitch. To use the YCbCr color space with chrominance subsampling, consider using L{libvlc_video_set_format_callbacks}() instead.
'''
f = _Cfunctions.get('libvlc_video_set_format', None) or \
_Cfunction('libvlc_video_set_format', ((1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint)
return f(mp, chroma, width, height, pitch)
def libvlc_video_set_format_callbacks(mp, setup, cleanup):
'''Set decoded video chroma and dimensions. This only works in combination with
L{libvlc_video_set_callbacks}().
@param mp: the media player.
@param setup: callback to select the video format (cannot be NULL).
@param cleanup: callback to release any allocated resources (or NULL).
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_video_set_format_callbacks', None) or \
_Cfunction('libvlc_video_set_format_callbacks', ((1,), (1,), (1,),), None,
None, MediaPlayer, VideoFormatCb, VideoCleanupCb)
return f(mp, setup, cleanup)
def libvlc_media_player_set_nsobject(p_mi, drawable):
'''Set the NSView handler where the media player should render its video output.
Use the vout called "macosx".
The drawable is an NSObject that follow the VLCOpenGLVideoViewEmbedding
protocol:
@begincode
\@protocol VLCOpenGLVideoViewEmbedding <NSObject>
- (void)addVoutSubview:(NSView *)view;
- (void)removeVoutSubview:(NSView *)view;
\@end
@endcode
Or it can be an NSView object.
If you want to use it along with Qt4 see the QMacCocoaViewContainer. Then
the following code should work:
@begincode
NSView *video = [[NSView alloc] init];
QMacCocoaViewContainer *container = new QMacCocoaViewContainer(video, parent);
L{libvlc_media_player_set_nsobject}(mp, video);
[video release];
@endcode
You can find a live example in VLCVideoView in VLCKit.framework.
@param p_mi: the Media Player.
@param drawable: the drawable that is either an NSView or an object following the VLCOpenGLVideoViewEmbedding protocol.
'''
f = _Cfunctions.get('libvlc_media_player_set_nsobject', None) or \
_Cfunction('libvlc_media_player_set_nsobject', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_void_p)
return f(p_mi, drawable)
def libvlc_media_player_get_nsobject(p_mi):
'''Get the NSView handler previously set with L{libvlc_media_player_set_nsobject}().
@param p_mi: the Media Player.
@return: the NSView handler or 0 if none where set.
'''
f = _Cfunctions.get('libvlc_media_player_get_nsobject', None) or \
_Cfunction('libvlc_media_player_get_nsobject', ((1,),), None,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_agl(p_mi, drawable):
'''Set the agl handler where the media player should render its video output.
@param p_mi: the Media Player.
@param drawable: the agl handler.
'''
f = _Cfunctions.get('libvlc_media_player_set_agl', None) or \
_Cfunction('libvlc_media_player_set_agl', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint32)
return f(p_mi, drawable)
def libvlc_media_player_get_agl(p_mi):
'''Get the agl handler previously set with L{libvlc_media_player_set_agl}().
@param p_mi: the Media Player.
@return: the agl handler or 0 if none where set.
'''
f = _Cfunctions.get('libvlc_media_player_get_agl', None) or \
_Cfunction('libvlc_media_player_get_agl', ((1,),), None,
ctypes.c_uint32, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_xwindow(p_mi, drawable):
'''Set an X Window System drawable where the media player should render its
video output. If LibVLC was built without X11 output support, then this has
no effects.
The specified identifier must correspond to an existing Input/Output class
X11 window. Pixmaps are B{not} supported. The caller shall ensure that
the X11 server is the same as the one the VLC instance has been configured
with. This function must be called before video playback is started;
otherwise it will only take effect after playback stop and restart.
@param p_mi: the Media Player.
@param drawable: the ID of the X window.
'''
f = _Cfunctions.get('libvlc_media_player_set_xwindow', None) or \
_Cfunction('libvlc_media_player_set_xwindow', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint32)
return f(p_mi, drawable)
def libvlc_media_player_get_xwindow(p_mi):
'''Get the X Window System window identifier previously set with
L{libvlc_media_player_set_xwindow}(). Note that this will return the identifier
even if VLC is not currently using it (for instance if it is playing an
audio-only input).
@param p_mi: the Media Player.
@return: an X window ID, or 0 if none where set.
'''
f = _Cfunctions.get('libvlc_media_player_get_xwindow', None) or \
_Cfunction('libvlc_media_player_get_xwindow', ((1,),), None,
ctypes.c_uint32, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_hwnd(p_mi, drawable):
'''Set a Win32/Win64 API window handle (HWND) where the media player should
render its video output. If LibVLC was built without Win32/Win64 API output
support, then this has no effects.
@param p_mi: the Media Player.
@param drawable: windows handle of the drawable.
'''
f = _Cfunctions.get('libvlc_media_player_set_hwnd', None) or \
_Cfunction('libvlc_media_player_set_hwnd', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_void_p)
return f(p_mi, drawable)
def libvlc_media_player_get_hwnd(p_mi):
'''Get the Windows API window handle (HWND) previously set with
L{libvlc_media_player_set_hwnd}(). The handle will be returned even if LibVLC
is not currently outputting any video to it.
@param p_mi: the Media Player.
@return: a window handle or NULL if there are none.
'''
f = _Cfunctions.get('libvlc_media_player_get_hwnd', None) or \
_Cfunction('libvlc_media_player_get_hwnd', ((1,),), None,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_callbacks(mp, play, pause, resume, flush, drain, opaque):
'''Set callbacks and private data for decoded audio.
Use L{libvlc_audio_set_format}() or L{libvlc_audio_set_format_callbacks}()
to configure the decoded audio format.
@param mp: the media player.
@param play: callback to play audio samples (must not be NULL).
@param pause: callback to pause playback (or NULL to ignore).
@param resume: callback to resume playback (or NULL to ignore).
@param flush: callback to flush audio buffers (or NULL to ignore).
@param drain: callback to drain audio buffers (or NULL to ignore).
@param opaque: private pointer for the audio callbacks (as first parameter).
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_callbacks', None) or \
_Cfunction('libvlc_audio_set_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, AudioPlayCb, AudioPauseCb, AudioResumeCb, AudioFlushCb, AudioDrainCb, ctypes.c_void_p)
return f(mp, play, pause, resume, flush, drain, opaque)
def libvlc_audio_set_volume_callback(mp, set_volume):
'''Set callbacks and private data for decoded audio.
Use L{libvlc_audio_set_format}() or L{libvlc_audio_set_format_callbacks}()
to configure the decoded audio format.
@param mp: the media player.
@param set_volume: callback to apply audio volume, or NULL to apply volume in software.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_volume_callback', None) or \
_Cfunction('libvlc_audio_set_volume_callback', ((1,), (1,),), None,
None, MediaPlayer, AudioSetVolumeCb)
return f(mp, set_volume)
def libvlc_audio_set_format_callbacks(mp, setup, cleanup):
'''Set decoded audio format. This only works in combination with
L{libvlc_audio_set_callbacks}().
@param mp: the media player.
@param setup: callback to select the audio format (cannot be NULL).
@param cleanup: callback to release any allocated resources (or NULL).
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_format_callbacks', None) or \
_Cfunction('libvlc_audio_set_format_callbacks', ((1,), (1,), (1,),), None,
None, MediaPlayer, AudioSetupCb, AudioCleanupCb)
return f(mp, setup, cleanup)
def libvlc_audio_set_format(mp, format, rate, channels):
'''Set decoded audio format.
This only works in combination with L{libvlc_audio_set_callbacks}(),
and is mutually exclusive with L{libvlc_audio_set_format_callbacks}().
@param mp: the media player.
@param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32").
@param rate: sample rate (expressed in Hz).
@param channels: channels count.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_format', None) or \
_Cfunction('libvlc_audio_set_format', ((1,), (1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint)
return f(mp, format, rate, channels)
def libvlc_media_player_get_length(p_mi):
'''Get the current movie length (in ms).
@param p_mi: the Media Player.
@return: the movie length (in ms), or -1 if there is no media.
'''
f = _Cfunctions.get('libvlc_media_player_get_length', None) or \
_Cfunction('libvlc_media_player_get_length', ((1,),), None,
ctypes.c_longlong, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_time(p_mi):
'''Get the current movie time (in ms).
@param p_mi: the Media Player.
@return: the movie time (in ms), or -1 if there is no media.
'''
f = _Cfunctions.get('libvlc_media_player_get_time', None) or \
_Cfunction('libvlc_media_player_get_time', ((1,),), None,
ctypes.c_longlong, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_time(p_mi, i_time):
'''Set the movie time (in ms). This has no effect if no media is being played.
Not all formats and protocols support this.
@param p_mi: the Media Player.
@param i_time: the movie time (in ms).
'''
f = _Cfunctions.get('libvlc_media_player_set_time', None) or \
_Cfunction('libvlc_media_player_set_time', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_longlong)
return f(p_mi, i_time)
def libvlc_media_player_get_position(p_mi):
'''Get movie position as percentage between 0.0 and 1.0.
@param p_mi: the Media Player.
@return: movie position, or -1. in case of error.
'''
f = _Cfunctions.get('libvlc_media_player_get_position', None) or \
_Cfunction('libvlc_media_player_get_position', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_position(p_mi, f_pos):
'''Set movie position as percentage between 0.0 and 1.0.
This has no effect if playback is not enabled.
This might not work depending on the underlying input format and protocol.
@param p_mi: the Media Player.
@param f_pos: the position.
'''
f = _Cfunctions.get('libvlc_media_player_set_position', None) or \
_Cfunction('libvlc_media_player_set_position', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_float)
return f(p_mi, f_pos)
def libvlc_media_player_set_chapter(p_mi, i_chapter):
'''Set movie chapter (if applicable).
@param p_mi: the Media Player.
@param i_chapter: chapter number to play.
'''
f = _Cfunctions.get('libvlc_media_player_set_chapter', None) or \
_Cfunction('libvlc_media_player_set_chapter', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, i_chapter)
def libvlc_media_player_get_chapter(p_mi):
'''Get movie chapter.
@param p_mi: the Media Player.
@return: chapter number currently playing, or -1 if there is no media.
'''
f = _Cfunctions.get('libvlc_media_player_get_chapter', None) or \
_Cfunction('libvlc_media_player_get_chapter', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_chapter_count(p_mi):
'''Get movie chapter count.
@param p_mi: the Media Player.
@return: number of chapters in movie, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_chapter_count', None) or \
_Cfunction('libvlc_media_player_get_chapter_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_will_play(p_mi):
'''Is the player able to play.
@param p_mi: the Media Player.
@return: boolean \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_will_play', None) or \
_Cfunction('libvlc_media_player_will_play', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_chapter_count_for_title(p_mi, i_title):
'''Get title chapter count.
@param p_mi: the Media Player.
@param i_title: title.
@return: number of chapters in title, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_chapter_count_for_title', None) or \
_Cfunction('libvlc_media_player_get_chapter_count_for_title', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_title)
def libvlc_media_player_set_title(p_mi, i_title):
'''Set movie title.
@param p_mi: the Media Player.
@param i_title: title number to play.
'''
f = _Cfunctions.get('libvlc_media_player_set_title', None) or \
_Cfunction('libvlc_media_player_set_title', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, i_title)
def libvlc_media_player_get_title(p_mi):
'''Get movie title.
@param p_mi: the Media Player.
@return: title number currently playing, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_title', None) or \
_Cfunction('libvlc_media_player_get_title', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_title_count(p_mi):
'''Get movie title count.
@param p_mi: the Media Player.
@return: title number count, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_title_count', None) or \
_Cfunction('libvlc_media_player_get_title_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_previous_chapter(p_mi):
'''Set previous chapter (if applicable).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_previous_chapter', None) or \
_Cfunction('libvlc_media_player_previous_chapter', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_next_chapter(p_mi):
'''Set next chapter (if applicable).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_next_chapter', None) or \
_Cfunction('libvlc_media_player_next_chapter', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_rate(p_mi):
'''Get the requested movie play rate.
@warning: Depending on the underlying media, the requested rate may be
different from the real playback rate.
@param p_mi: the Media Player.
@return: movie play rate.
'''
f = _Cfunctions.get('libvlc_media_player_get_rate', None) or \
_Cfunction('libvlc_media_player_get_rate', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_rate(p_mi, rate):
'''Set movie play rate.
@param p_mi: the Media Player.
@param rate: movie play rate to set.
@return: -1 if an error was detected, 0 otherwise (but even then, it might not actually work depending on the underlying media protocol).
'''
f = _Cfunctions.get('libvlc_media_player_set_rate', None) or \
_Cfunction('libvlc_media_player_set_rate', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_float)
return f(p_mi, rate)
def libvlc_media_player_get_state(p_mi):
'''Get current movie state.
@param p_mi: the Media Player.
@return: the current state of the media player (playing, paused, ...) See libvlc_state_t.
'''
f = _Cfunctions.get('libvlc_media_player_get_state', None) or \
_Cfunction('libvlc_media_player_get_state', ((1,),), None,
State, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_fps(p_mi):
'''Get movie fps rate.
@param p_mi: the Media Player.
@return: frames per second (fps) for this playing movie, or 0 if unspecified.
'''
f = _Cfunctions.get('libvlc_media_player_get_fps', None) or \
_Cfunction('libvlc_media_player_get_fps', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_media_player_has_vout(p_mi):
'''How many video outputs does this media player have?
@param p_mi: the media player.
@return: the number of video outputs.
'''
f = _Cfunctions.get('libvlc_media_player_has_vout', None) or \
_Cfunction('libvlc_media_player_has_vout', ((1,),), None,
ctypes.c_uint, MediaPlayer)
return f(p_mi)
def libvlc_media_player_is_seekable(p_mi):
'''Is this media player seekable?
@param p_mi: the media player.
@return: true if the media player can seek \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_is_seekable', None) or \
_Cfunction('libvlc_media_player_is_seekable', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_can_pause(p_mi):
'''Can this media player be paused?
@param p_mi: the media player.
@return: true if the media player can pause \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_can_pause', None) or \
_Cfunction('libvlc_media_player_can_pause', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_next_frame(p_mi):
'''Display the next frame (if supported).
@param p_mi: the media player.
'''
f = _Cfunctions.get('libvlc_media_player_next_frame', None) or \
_Cfunction('libvlc_media_player_next_frame', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_navigate(p_mi, navigate):
'''Navigate through DVD Menu.
@param p_mi: the Media Player.
@param navigate: the Navigation mode.
@version: libVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_player_navigate', None) or \
_Cfunction('libvlc_media_player_navigate', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint)
return f(p_mi, navigate)
def libvlc_track_description_list_release(p_track_description):
'''Release (free) L{TrackDescription}.
@param p_track_description: the structure to release.
'''
f = _Cfunctions.get('libvlc_track_description_list_release', None) or \
_Cfunction('libvlc_track_description_list_release', ((1,),), None,
None, ctypes.POINTER(TrackDescription))
return f(p_track_description)
def libvlc_toggle_fullscreen(p_mi):
'''Toggle fullscreen status on non-embedded video outputs.
@warning: The same limitations applies to this function
as to L{libvlc_set_fullscreen}().
@param p_mi: the media player.
'''
f = _Cfunctions.get('libvlc_toggle_fullscreen', None) or \
_Cfunction('libvlc_toggle_fullscreen', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_set_fullscreen(p_mi, b_fullscreen):
'''Enable or disable fullscreen.
@warning: With most window managers, only a top-level windows can be in
full-screen mode. Hence, this function will not operate properly if
L{libvlc_media_player_set_xwindow}() was used to embed the video in a
non-top-level window. In that case, the embedding window must be reparented
to the root window B{before} fullscreen mode is enabled. You will want
to reparent it back to its normal parent when disabling fullscreen.
@param p_mi: the media player.
@param b_fullscreen: boolean for fullscreen status.
'''
f = _Cfunctions.get('libvlc_set_fullscreen', None) or \
_Cfunction('libvlc_set_fullscreen', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, b_fullscreen)
def libvlc_get_fullscreen(p_mi):
'''Get current fullscreen status.
@param p_mi: the media player.
@return: the fullscreen status (boolean) \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_get_fullscreen', None) or \
_Cfunction('libvlc_get_fullscreen', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_set_key_input(p_mi, on):
'''Enable or disable key press events handling, according to the LibVLC hotkeys
configuration. By default and for historical reasons, keyboard events are
handled by the LibVLC video widget.
@note: On X11, there can be only one subscriber for key press and mouse
click events per window. If your application has subscribed to those events
for the X window ID of the video widget, then LibVLC will not be able to
handle key presses and mouse clicks in any case.
@warning: This function is only implemented for X11 and Win32 at the moment.
@param p_mi: the media player.
@param on: true to handle key press events, false to ignore them.
'''
f = _Cfunctions.get('libvlc_video_set_key_input', None) or \
_Cfunction('libvlc_video_set_key_input', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint)
return f(p_mi, on)
def libvlc_video_set_mouse_input(p_mi, on):
'''Enable or disable mouse click events handling. By default, those events are
handled. This is needed for DVD menus to work, as well as a few video
filters such as "puzzle".
See L{libvlc_video_set_key_input}().
@warning: This function is only implemented for X11 and Win32 at the moment.
@param p_mi: the media player.
@param on: true to handle mouse click events, false to ignore them.
'''
f = _Cfunctions.get('libvlc_video_set_mouse_input', None) or \
_Cfunction('libvlc_video_set_mouse_input', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint)
return f(p_mi, on)
def libvlc_video_get_size(p_mi, num):
'''Get the pixel dimensions of a video.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px pixel width, py pixel height.
'''
f = _Cfunctions.get('libvlc_video_get_size', None) or \
_Cfunction('libvlc_video_get_size', ((1,), (1,), (2,), (2,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
return f(p_mi, num)
def libvlc_video_get_cursor(p_mi, num):
'''Get the mouse pointer coordinates over a video.
Coordinates are expressed in terms of the decoded video resolution,
B{not} in terms of pixels on the screen/viewport (to get the latter,
you can query your windowing system directly).
Either of the coordinates may be negative or larger than the corresponding
dimension of the video, if the cursor is outside the rendering area.
@warning: The coordinates may be out-of-date if the pointer is not located
on the video rendering area. LibVLC does not track the pointer if it is
outside of the video widget.
@note: LibVLC does not support multiple pointers (it does of course support
multiple input devices sharing the same pointer) at the moment.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px abscissa, py ordinate.
'''
f = _Cfunctions.get('libvlc_video_get_cursor', None) or \
_Cfunction('libvlc_video_get_cursor', ((1,), (1,), (2,), (2,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
return f(p_mi, num)
def libvlc_video_get_scale(p_mi):
'''Get the current video scaling factor.
See also L{libvlc_video_set_scale}().
@param p_mi: the media player.
@return: the currently configured zoom factor, or 0. if the video is set to fit to the output window/drawable automatically.
'''
f = _Cfunctions.get('libvlc_video_get_scale', None) or \
_Cfunction('libvlc_video_get_scale', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_video_set_scale(p_mi, f_factor):
'''Set the video scaling factor. That is the ratio of the number of pixels on
screen to the number of pixels in the original decoded video in each
dimension. Zero is a special value; it will adjust the video to the output
window/drawable (in windowed mode) or the entire screen.
Note that not all video outputs support scaling.
@param p_mi: the media player.
@param f_factor: the scaling factor, or zero.
'''
f = _Cfunctions.get('libvlc_video_set_scale', None) or \
_Cfunction('libvlc_video_set_scale', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_float)
return f(p_mi, f_factor)
def libvlc_video_get_aspect_ratio(p_mi):
'''Get current video aspect ratio.
@param p_mi: the media player.
@return: the video aspect ratio or NULL if unspecified (the result must be released with free() or L{libvlc_free}()).
'''
f = _Cfunctions.get('libvlc_video_get_aspect_ratio', None) or \
_Cfunction('libvlc_video_get_aspect_ratio', ((1,),), string_result,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_video_set_aspect_ratio(p_mi, psz_aspect):
'''Set new video aspect ratio.
@param p_mi: the media player.
@param psz_aspect: new video aspect-ratio or NULL to reset to default @note Invalid aspect ratios are ignored.
'''
f = _Cfunctions.get('libvlc_video_set_aspect_ratio', None) or \
_Cfunction('libvlc_video_set_aspect_ratio', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_aspect)
def libvlc_video_get_spu(p_mi):
'''Get current video subtitle.
@param p_mi: the media player.
@return: the video subtitle selected, or -1 if none.
'''
f = _Cfunctions.get('libvlc_video_get_spu', None) or \
_Cfunction('libvlc_video_get_spu', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_get_spu_count(p_mi):
'''Get the number of available video subtitles.
@param p_mi: the media player.
@return: the number of available video subtitles.
'''
f = _Cfunctions.get('libvlc_video_get_spu_count', None) or \
_Cfunction('libvlc_video_get_spu_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_get_spu_description(p_mi):
'''Get the description of available video subtitles.
@param p_mi: the media player.
@return: list containing description of available video subtitles.
'''
f = _Cfunctions.get('libvlc_video_get_spu_description', None) or \
_Cfunction('libvlc_video_get_spu_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_video_set_spu(p_mi, i_spu):
'''Set new video subtitle.
@param p_mi: the media player.
@param i_spu: video subtitle track to select (i_id from track description).
@return: 0 on success, -1 if out of range.
'''
f = _Cfunctions.get('libvlc_video_set_spu', None) or \
_Cfunction('libvlc_video_set_spu', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_spu)
def libvlc_video_set_subtitle_file(p_mi, psz_subtitle):
'''Set new video subtitle file.
@param p_mi: the media player.
@param psz_subtitle: new video subtitle file.
@return: the success status (boolean).
'''
f = _Cfunctions.get('libvlc_video_set_subtitle_file', None) or \
_Cfunction('libvlc_video_set_subtitle_file', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_subtitle)
def libvlc_video_get_spu_delay(p_mi):
'''Get the current subtitle delay. Positive values means subtitles are being
displayed later, negative values earlier.
@param p_mi: media player.
@return: time (in microseconds) the display of subtitles is being delayed.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_video_get_spu_delay', None) or \
_Cfunction('libvlc_video_get_spu_delay', ((1,),), None,
ctypes.c_int64, MediaPlayer)
return f(p_mi)
def libvlc_video_set_spu_delay(p_mi, i_delay):
'''Set the subtitle delay. This affects the timing of when the subtitle will
be displayed. Positive values result in subtitles being displayed later,
while negative values will result in subtitles being displayed earlier.
The subtitle delay will be reset to zero each time the media changes.
@param p_mi: media player.
@param i_delay: time (in microseconds) the display of subtitles should be delayed.
@return: 0 on success, -1 on error.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_video_set_spu_delay', None) or \
_Cfunction('libvlc_video_set_spu_delay', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int64)
return f(p_mi, i_delay)
def libvlc_video_get_title_description(p_mi):
'''Get the description of available titles.
@param p_mi: the media player.
@return: list containing description of available titles.
'''
f = _Cfunctions.get('libvlc_video_get_title_description', None) or \
_Cfunction('libvlc_video_get_title_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_video_get_chapter_description(p_mi, i_title):
'''Get the description of available chapters for specific title.
@param p_mi: the media player.
@param i_title: selected title.
@return: list containing description of available chapter for title i_title.
'''
f = _Cfunctions.get('libvlc_video_get_chapter_description', None) or \
_Cfunction('libvlc_video_get_chapter_description', ((1,), (1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer, ctypes.c_int)
return f(p_mi, i_title)
def libvlc_video_get_crop_geometry(p_mi):
'''Get current crop filter geometry.
@param p_mi: the media player.
@return: the crop filter geometry or NULL if unset.
'''
f = _Cfunctions.get('libvlc_video_get_crop_geometry', None) or \
_Cfunction('libvlc_video_get_crop_geometry', ((1,),), string_result,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_video_set_crop_geometry(p_mi, psz_geometry):
'''Set new crop filter geometry.
@param p_mi: the media player.
@param psz_geometry: new crop filter geometry (NULL to unset).
'''
f = _Cfunctions.get('libvlc_video_set_crop_geometry', None) or \
_Cfunction('libvlc_video_set_crop_geometry', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_geometry)
def libvlc_video_get_teletext(p_mi):
'''Get current teletext page requested.
@param p_mi: the media player.
@return: the current teletext page requested.
'''
f = _Cfunctions.get('libvlc_video_get_teletext', None) or \
_Cfunction('libvlc_video_get_teletext', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_set_teletext(p_mi, i_page):
'''Set new teletext page to retrieve.
@param p_mi: the media player.
@param i_page: teletex page number requested.
'''
f = _Cfunctions.get('libvlc_video_set_teletext', None) or \
_Cfunction('libvlc_video_set_teletext', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, i_page)
def libvlc_toggle_teletext(p_mi):
'''Toggle teletext transparent status on video output.
@param p_mi: the media player.
'''
f = _Cfunctions.get('libvlc_toggle_teletext', None) or \
_Cfunction('libvlc_toggle_teletext', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_video_get_track_count(p_mi):
'''Get number of available video tracks.
@param p_mi: media player.
@return: the number of available video tracks (int).
'''
f = _Cfunctions.get('libvlc_video_get_track_count', None) or \
_Cfunction('libvlc_video_get_track_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_get_track_description(p_mi):
'''Get the description of available video tracks.
@param p_mi: media player.
@return: list with description of available video tracks, or NULL on error.
'''
f = _Cfunctions.get('libvlc_video_get_track_description', None) or \
_Cfunction('libvlc_video_get_track_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_video_get_track(p_mi):
'''Get current video track.
@param p_mi: media player.
@return: the video track ID (int) or -1 if no active input.
'''
f = _Cfunctions.get('libvlc_video_get_track', None) or \
_Cfunction('libvlc_video_get_track', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_set_track(p_mi, i_track):
'''Set video track.
@param p_mi: media player.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 if out of range.
'''
f = _Cfunctions.get('libvlc_video_set_track', None) or \
_Cfunction('libvlc_video_set_track', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_track)
def libvlc_video_take_snapshot(p_mi, num, psz_filepath, i_width, i_height):
'''Take a snapshot of the current video window.
If i_width AND i_height is 0, original size is used.
If i_width XOR i_height is 0, original aspect-ratio is preserved.
@param p_mi: media player instance.
@param num: number of video output (typically 0 for the first/only one).
@param psz_filepath: the path where to save the screenshot to.
@param i_width: the snapshot's width.
@param i_height: the snapshot's height.
@return: 0 on success, -1 if the video was not found.
'''
f = _Cfunctions.get('libvlc_video_take_snapshot', None) or \
_Cfunction('libvlc_video_take_snapshot', ((1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.c_char_p, ctypes.c_int, ctypes.c_int)
return f(p_mi, num, psz_filepath, i_width, i_height)
def libvlc_video_set_deinterlace(p_mi, psz_mode):
'''Enable or disable deinterlace filter.
@param p_mi: libvlc media player.
@param psz_mode: type of deinterlace filter, NULL to disable.
'''
f = _Cfunctions.get('libvlc_video_set_deinterlace', None) or \
_Cfunction('libvlc_video_set_deinterlace', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_mode)
def libvlc_video_get_marquee_int(p_mi, option):
'''Get an integer marquee option value.
@param p_mi: libvlc media player.
@param option: marq option to get See libvlc_video_marquee_int_option_t.
'''
f = _Cfunctions.get('libvlc_video_get_marquee_int', None) or \
_Cfunction('libvlc_video_get_marquee_int', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_get_marquee_string(p_mi, option):
'''Get a string marquee option value.
@param p_mi: libvlc media player.
@param option: marq option to get See libvlc_video_marquee_string_option_t.
'''
f = _Cfunctions.get('libvlc_video_get_marquee_string', None) or \
_Cfunction('libvlc_video_get_marquee_string', ((1,), (1,),), string_result,
ctypes.c_void_p, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_marquee_int(p_mi, option, i_val):
'''Enable, disable or set an integer marquee option
Setting libvlc_marquee_Enable has the side effect of enabling (arg !0)
or disabling (arg 0) the marq filter.
@param p_mi: libvlc media player.
@param option: marq option to set See libvlc_video_marquee_int_option_t.
@param i_val: marq option value.
'''
f = _Cfunctions.get('libvlc_video_set_marquee_int', None) or \
_Cfunction('libvlc_video_set_marquee_int', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_int)
return f(p_mi, option, i_val)
def libvlc_video_set_marquee_string(p_mi, option, psz_text):
'''Set a marquee string option.
@param p_mi: libvlc media player.
@param option: marq option to set See libvlc_video_marquee_string_option_t.
@param psz_text: marq option value.
'''
f = _Cfunctions.get('libvlc_video_set_marquee_string', None) or \
_Cfunction('libvlc_video_set_marquee_string', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_char_p)
return f(p_mi, option, psz_text)
def libvlc_video_get_logo_int(p_mi, option):
'''Get integer logo option.
@param p_mi: libvlc media player instance.
@param option: logo option to get, values of libvlc_video_logo_option_t.
'''
f = _Cfunctions.get('libvlc_video_get_logo_int', None) or \
_Cfunction('libvlc_video_get_logo_int', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_logo_int(p_mi, option, value):
'''Set logo option as integer. Options that take a different type value
are ignored.
Passing libvlc_logo_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the logo filter.
@param p_mi: libvlc media player instance.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param value: logo option value.
'''
f = _Cfunctions.get('libvlc_video_set_logo_int', None) or \
_Cfunction('libvlc_video_set_logo_int', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_int)
return f(p_mi, option, value)
def libvlc_video_set_logo_string(p_mi, option, psz_value):
'''Set logo option as string. Options that take a different type value
are ignored.
@param p_mi: libvlc media player instance.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param psz_value: logo option value.
'''
f = _Cfunctions.get('libvlc_video_set_logo_string', None) or \
_Cfunction('libvlc_video_set_logo_string', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_char_p)
return f(p_mi, option, psz_value)
def libvlc_video_get_adjust_int(p_mi, option):
'''Get integer adjust option.
@param p_mi: libvlc media player instance.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_get_adjust_int', None) or \
_Cfunction('libvlc_video_get_adjust_int', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_adjust_int(p_mi, option, value):
'''Set adjust option as integer. Options that take a different type value
are ignored.
Passing libvlc_adjust_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the adjust filter.
@param p_mi: libvlc media player instance.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_set_adjust_int', None) or \
_Cfunction('libvlc_video_set_adjust_int', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_int)
return f(p_mi, option, value)
def libvlc_video_get_adjust_float(p_mi, option):
'''Get float adjust option.
@param p_mi: libvlc media player instance.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_get_adjust_float', None) or \
_Cfunction('libvlc_video_get_adjust_float', ((1,), (1,),), None,
ctypes.c_float, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_adjust_float(p_mi, option, value):
'''Set adjust option as float. Options that take a different type value
are ignored.
@param p_mi: libvlc media player instance.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_set_adjust_float', None) or \
_Cfunction('libvlc_video_set_adjust_float', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_float)
return f(p_mi, option, value)
def libvlc_audio_output_list_get(p_instance):
'''Gets the list of available audio outputs.
@param p_instance: libvlc instance.
@return: list of available audio outputs. It must be freed it with In case of error, NULL is returned.
'''
f = _Cfunctions.get('libvlc_audio_output_list_get', None) or \
_Cfunction('libvlc_audio_output_list_get', ((1,),), None,
ctypes.POINTER(AudioOutput), Instance)
return f(p_instance)
def libvlc_audio_output_list_release(p_list):
'''Frees the list of available audio outputs.
@param p_list: list with audio outputs for release.
'''
f = _Cfunctions.get('libvlc_audio_output_list_release', None) or \
_Cfunction('libvlc_audio_output_list_release', ((1,),), None,
None, ctypes.POINTER(AudioOutput))
return f(p_list)
def libvlc_audio_output_set(p_mi, psz_name):
'''Sets the audio output.
@note: Any change will take be effect only after playback is stopped and
restarted. Audio output cannot be changed while playing.
@param p_mi: media player.
@param psz_name: name of audio output, use psz_name of See L{AudioOutput}.
@return: 0 if function succeded, -1 on error.
'''
f = _Cfunctions.get('libvlc_audio_output_set', None) or \
_Cfunction('libvlc_audio_output_set', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_name)
def libvlc_audio_output_device_list_get(p_instance, aout):
'''Gets a list of audio output devices for a given audio output.
See L{libvlc_audio_output_device_set}().
@note: Not all audio outputs support this. In particular, an empty (NULL)
list of devices does B{not} imply that the specified audio output does
not work.
@note: The list might not be exhaustive.
@warning: Some audio output devices in the list might not actually work in
some circumstances. By default, it is recommended to not specify any
explicit audio device.
@param p_instance: libvlc instance.
@param psz_aout: audio output name (as returned by L{libvlc_audio_output_list_get}()).
@return: A NULL-terminated linked list of potential audio output devices. It must be freed it with L{libvlc_audio_output_device_list_release}().
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_output_device_list_get', None) or \
_Cfunction('libvlc_audio_output_device_list_get', ((1,), (1,),), None,
ctypes.POINTER(AudioOutputDevice), Instance, ctypes.c_char_p)
return f(p_instance, aout)
def libvlc_audio_output_device_list_release(p_list):
'''Frees a list of available audio output devices.
@param p_list: list with audio outputs for release.
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_output_device_list_release', None) or \
_Cfunction('libvlc_audio_output_device_list_release', ((1,),), None,
None, ctypes.POINTER(AudioOutputDevice))
return f(p_list)
def libvlc_audio_output_device_set(p_mi, psz_audio_output, psz_device_id):
'''Configures an explicit audio output device for a given audio output plugin.
A list of possible devices can be obtained with
L{libvlc_audio_output_device_list_get}().
@note: This function does not select the specified audio output plugin.
L{libvlc_audio_output_set}() is used for that purpose.
@warning: The syntax for the device parameter depends on the audio output.
This is not portable. Only use this function if you know what you are doing.
Some audio outputs do not support this function (e.g. PulseAudio, WASAPI).
Some audio outputs require further parameters (e.g. ALSA: channels map).
@param p_mi: media player.
@param psz_audio_output: - name of audio output, See L{AudioOutput}.
@param psz_device_id: device.
@return: Nothing. Errors are ignored.
'''
f = _Cfunctions.get('libvlc_audio_output_device_set', None) or \
_Cfunction('libvlc_audio_output_device_set', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p, ctypes.c_char_p)
return f(p_mi, psz_audio_output, psz_device_id)
def libvlc_audio_toggle_mute(p_mi):
'''Toggle mute status.
@param p_mi: media player @warning Toggling mute atomically is not always possible: On some platforms, other processes can mute the VLC audio playback stream asynchronously. Thus, there is a small race condition where toggling will not work. See also the limitations of L{libvlc_audio_set_mute}().
'''
f = _Cfunctions.get('libvlc_audio_toggle_mute', None) or \
_Cfunction('libvlc_audio_toggle_mute', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_audio_get_mute(p_mi):
'''Get current mute status.
@param p_mi: media player.
@return: the mute status (boolean) if defined, -1 if undefined/unapplicable.
'''
f = _Cfunctions.get('libvlc_audio_get_mute', None) or \
_Cfunction('libvlc_audio_get_mute', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_mute(p_mi, status):
'''Set mute status.
@param p_mi: media player.
@param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute.
'''
f = _Cfunctions.get('libvlc_audio_set_mute', None) or \
_Cfunction('libvlc_audio_set_mute', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, status)
def libvlc_audio_get_volume(p_mi):
'''Get current software audio volume.
@param p_mi: media player.
@return: the software volume in percents (0 = mute, 100 = nominal / 0dB).
'''
f = _Cfunctions.get('libvlc_audio_get_volume', None) or \
_Cfunction('libvlc_audio_get_volume', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_volume(p_mi, i_volume):
'''Set current software audio volume.
@param p_mi: media player.
@param i_volume: the volume in percents (0 = mute, 100 = 0dB).
@return: 0 if the volume was set, -1 if it was out of range.
'''
f = _Cfunctions.get('libvlc_audio_set_volume', None) or \
_Cfunction('libvlc_audio_set_volume', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_volume)
def libvlc_audio_get_track_count(p_mi):
'''Get number of available audio tracks.
@param p_mi: media player.
@return: the number of available audio tracks (int), or -1 if unavailable.
'''
f = _Cfunctions.get('libvlc_audio_get_track_count', None) or \
_Cfunction('libvlc_audio_get_track_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_get_track_description(p_mi):
'''Get the description of available audio tracks.
@param p_mi: media player.
@return: list with description of available audio tracks, or NULL.
'''
f = _Cfunctions.get('libvlc_audio_get_track_description', None) or \
_Cfunction('libvlc_audio_get_track_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_audio_get_track(p_mi):
'''Get current audio track.
@param p_mi: media player.
@return: the audio track ID or -1 if no active input.
'''
f = _Cfunctions.get('libvlc_audio_get_track', None) or \
_Cfunction('libvlc_audio_get_track', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_track(p_mi, i_track):
'''Set current audio track.
@param p_mi: media player.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_audio_set_track', None) or \
_Cfunction('libvlc_audio_set_track', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_track)
def libvlc_audio_get_channel(p_mi):
'''Get current audio channel.
@param p_mi: media player.
@return: the audio channel See libvlc_audio_output_channel_t.
'''
f = _Cfunctions.get('libvlc_audio_get_channel', None) or \
_Cfunction('libvlc_audio_get_channel', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_channel(p_mi, channel):
'''Set current audio channel.
@param p_mi: media player.
@param channel: the audio channel, See libvlc_audio_output_channel_t.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_audio_set_channel', None) or \
_Cfunction('libvlc_audio_set_channel', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, channel)
def libvlc_audio_get_delay(p_mi):
'''Get current audio delay.
@param p_mi: media player.
@return: the audio delay (microseconds).
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_audio_get_delay', None) or \
_Cfunction('libvlc_audio_get_delay', ((1,),), None,
ctypes.c_int64, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_delay(p_mi, i_delay):
'''Set current audio delay. The audio delay will be reset to zero each time the media changes.
@param p_mi: media player.
@param i_delay: the audio delay (microseconds).
@return: 0 on success, -1 on error.
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_delay', None) or \
_Cfunction('libvlc_audio_set_delay', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int64)
return f(p_mi, i_delay)
def libvlc_vlm_release(p_instance):
'''Release the vlm instance related to the given L{Instance}.
@param p_instance: the instance.
'''
f = _Cfunctions.get('libvlc_vlm_release', None) or \
_Cfunction('libvlc_vlm_release', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_vlm_add_broadcast(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Add a broadcast, with one input.
@param p_instance: the instance.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_add_broadcast', None) or \
_Cfunction('libvlc_vlm_add_broadcast', ((1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_int)
return f(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop)
def libvlc_vlm_add_vod(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux):
'''Add a vod, with one input.
@param p_instance: the instance.
@param psz_name: the name of the new vod media.
@param psz_input: the input MRL.
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new vod.
@param psz_mux: the muxer of the vod media.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_add_vod', None) or \
_Cfunction('libvlc_vlm_add_vod', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_char_p)
return f(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux)
def libvlc_vlm_del_media(p_instance, psz_name):
'''Delete a media (VOD or broadcast).
@param p_instance: the instance.
@param psz_name: the media to delete.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_del_media', None) or \
_Cfunction('libvlc_vlm_del_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_set_enabled(p_instance, psz_name, b_enabled):
'''Enable or disable a media (VOD or broadcast).
@param p_instance: the instance.
@param psz_name: the media to work on.
@param b_enabled: the new status.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_enabled', None) or \
_Cfunction('libvlc_vlm_set_enabled', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, b_enabled)
def libvlc_vlm_set_output(p_instance, psz_name, psz_output):
'''Set the output for a media.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_output', None) or \
_Cfunction('libvlc_vlm_set_output', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_output)
def libvlc_vlm_set_input(p_instance, psz_name, psz_input):
'''Set a media's input MRL. This will delete all existing inputs and
add the specified one.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_input', None) or \
_Cfunction('libvlc_vlm_set_input', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_input)
def libvlc_vlm_add_input(p_instance, psz_name, psz_input):
'''Add a media's input MRL. This will add the specified one.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_add_input', None) or \
_Cfunction('libvlc_vlm_add_input', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_input)
def libvlc_vlm_set_loop(p_instance, psz_name, b_loop):
'''Set a media's loop status.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param b_loop: the new status.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_loop', None) or \
_Cfunction('libvlc_vlm_set_loop', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, b_loop)
def libvlc_vlm_set_mux(p_instance, psz_name, psz_mux):
'''Set a media's vod muxer.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_mux: the new muxer.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_mux', None) or \
_Cfunction('libvlc_vlm_set_mux', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_mux)
def libvlc_vlm_change_media(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Edit the parameters of a media. This will delete all existing inputs and
add the specified one.
@param p_instance: the instance.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_change_media', None) or \
_Cfunction('libvlc_vlm_change_media', ((1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_int)
return f(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop)
def libvlc_vlm_play_media(p_instance, psz_name):
'''Play the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_play_media', None) or \
_Cfunction('libvlc_vlm_play_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_stop_media(p_instance, psz_name):
'''Stop the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_stop_media', None) or \
_Cfunction('libvlc_vlm_stop_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_pause_media(p_instance, psz_name):
'''Pause the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_pause_media', None) or \
_Cfunction('libvlc_vlm_pause_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_seek_media(p_instance, psz_name, f_percentage):
'''Seek in the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@param f_percentage: the percentage to seek to.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_seek_media', None) or \
_Cfunction('libvlc_vlm_seek_media', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_float)
return f(p_instance, psz_name, f_percentage)
def libvlc_vlm_show_media(p_instance, psz_name):
'''Return information about the named media as a JSON
string representation.
This function is mainly intended for debugging use,
if you want programmatic access to the state of
a vlm_media_instance_t, please use the corresponding
libvlc_vlm_get_media_instance_xxx -functions.
Currently there are no such functions available for
vlm_media_t though.
@param p_instance: the instance.
@param psz_name: the name of the media, if the name is an empty string, all media is described.
@return: string with information about named media, or NULL on error.
'''
f = _Cfunctions.get('libvlc_vlm_show_media', None) or \
_Cfunction('libvlc_vlm_show_media', ((1,), (1,),), string_result,
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_get_media_instance_position(p_instance, psz_name, i_instance):
'''Get vlm_media instance position by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: position as float or -1. on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_position', None) or \
_Cfunction('libvlc_vlm_get_media_instance_position', ((1,), (1,), (1,),), None,
ctypes.c_float, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_time(p_instance, psz_name, i_instance):
'''Get vlm_media instance time by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: time as integer or -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_time', None) or \
_Cfunction('libvlc_vlm_get_media_instance_time', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_length(p_instance, psz_name, i_instance):
'''Get vlm_media instance length by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: length of media item or -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_length', None) or \
_Cfunction('libvlc_vlm_get_media_instance_length', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_rate(p_instance, psz_name, i_instance):
'''Get vlm_media instance playback rate by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: playback rate or -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_rate', None) or \
_Cfunction('libvlc_vlm_get_media_instance_rate', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_title(p_instance, psz_name, i_instance):
'''Get vlm_media instance title number by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: title as number or -1 on error.
@bug: will always return 0.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_title', None) or \
_Cfunction('libvlc_vlm_get_media_instance_title', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_chapter(p_instance, psz_name, i_instance):
'''Get vlm_media instance chapter number by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: chapter as number or -1 on error.
@bug: will always return 0.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_chapter', None) or \
_Cfunction('libvlc_vlm_get_media_instance_chapter', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_seekable(p_instance, psz_name, i_instance):
'''Is libvlc instance seekable ?
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: 1 if seekable, 0 if not, -1 if media does not exist.
@bug: will always return 0.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_seekable', None) or \
_Cfunction('libvlc_vlm_get_media_instance_seekable', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_event_manager(p_instance):
'''Get libvlc_event_manager from a vlm media.
The p_event_manager is immutable, so you don't have to hold the lock.
@param p_instance: a libvlc instance.
@return: libvlc_event_manager.
'''
f = _Cfunctions.get('libvlc_vlm_get_event_manager', None) or \
_Cfunction('libvlc_vlm_get_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, Instance)
return f(p_instance)
# 4 function(s) blacklisted:
# libvlc_audio_output_get_device_type
# libvlc_audio_output_set_device_type
# libvlc_printerr
# libvlc_set_exit_handler
# 17 function(s) not wrapped as methods:
# libvlc_audio_output_device_list_release
# libvlc_audio_output_list_release
# libvlc_clearerr
# libvlc_clock
# libvlc_errmsg
# libvlc_event_type_name
# libvlc_free
# libvlc_get_changeset
# libvlc_get_compiler
# libvlc_get_version
# libvlc_log_get_context
# libvlc_log_get_object
# libvlc_media_tracks_release
# libvlc_module_description_list_release
# libvlc_new
# libvlc_track_description_list_release
# libvlc_vprinterr
# Start of footer.py #
# Backward compatibility
def callbackmethod(callback):
"""Now obsolete @callbackmethod decorator."""
return callback
# libvlc_free is not present in some versions of libvlc. If it is not
# in the library, then emulate it by calling libc.free
if not hasattr(dll, 'libvlc_free'):
# need to find the free function in the C runtime. This is
# platform specific.
# For Linux and MacOSX
libc_path = find_library('c')
if libc_path:
libc = ctypes.CDLL(libc_path)
libvlc_free = libc.free
else:
# On win32, it is impossible to guess the proper lib to call
# (msvcrt, mingw...). Just ignore the call: it will memleak,
# but not prevent to run the application.
def libvlc_free(p):
pass
# ensure argtypes is right, because default type of int won't work
# on 64-bit systems
libvlc_free.argtypes = [ ctypes.c_void_p ]
# Version functions
def _dot2int(v):
'''(INTERNAL) Convert 'i.i.i[.i]' str to int.
'''
t = [int(i) for i in v.split('.')]
if len(t) == 3:
t.append(0)
elif len(t) != 4:
raise ValueError('"i.i.i[.i]": %r' % (v,))
if min(t) < 0 or max(t) > 255:
raise ValueError('[0..255]: %r' % (v,))
i = t.pop(0)
while t:
i = (i << 8) + t.pop(0)
return i
def hex_version():
"""Return the version of these bindings in hex or 0 if unavailable.
"""
try:
return _dot2int(__version__.split('-')[-1])
except (NameError, ValueError):
return 0
def libvlc_hex_version():
"""Return the libvlc version in hex or 0 if unavailable.
"""
try:
return _dot2int(bytes_to_str(libvlc_get_version()).split()[0])
except ValueError:
return 0
def debug_callback(event, *args, **kwds):
'''Example callback, useful for debugging.
'''
l = ['event %s' % (event.type,)]
if args:
l.extend(map(str, args))
if kwds:
l.extend(sorted('%s=%s' % t for t in kwds.items()))
print('Debug callback (%s)' % ', '.join(l))
if __name__ == '__main__':
try:
from msvcrt import getch
except ImportError:
import termios
import tty
def getch(): # getchar(), getc(stdin) #PYCHOK flake
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
return ch
def end_callback(event):
print('End of media stream (event %s)' % event.type)
sys.exit(0)
echo_position = False
def pos_callback(event, player):
if echo_position:
sys.stdout.write('\r%s to %.2f%% (%.2f%%)' % (event.type,
event.u.new_position * 100,
player.get_position() * 100))
sys.stdout.flush()
def print_version():
"""Print libvlc version"""
try:
print('Build date: %s (%#x)' % (build_date, hex_version()))
print('LibVLC version: %s (%#x)' % (bytes_to_str(libvlc_get_version()), libvlc_hex_version()))
print('LibVLC compiler: %s' % bytes_to_str(libvlc_get_compiler()))
if plugin_path:
print('Plugin path: %s' % plugin_path)
except:
print('Error: %s' % sys.exc_info()[1])
if sys.argv[1:] and sys.argv[1] not in ('-h', '--help'):
movie = os.path.expanduser(sys.argv[1])
if not os.access(movie, os.R_OK):
print('Error: %s file not readable' % movie)
sys.exit(1)
instance = Instance("--sub-source marq")
try:
media = instance.media_new(movie)
except NameError:
print('NameError: %s (%s vs LibVLC %s)' % (sys.exc_info()[1],
__version__,
libvlc_get_version()))
sys.exit(1)
player = instance.media_player_new()
player.set_media(media)
player.play()
# Some marquee examples. Marquee requires '--sub-source marq' in the
# Instance() call above. See <http://www.videolan.org/doc/play-howto/en/ch04.html>
player.video_set_marquee_int(VideoMarqueeOption.Enable, 1)
player.video_set_marquee_int(VideoMarqueeOption.Size, 24) # pixels
player.video_set_marquee_int(VideoMarqueeOption.Position, Position.Bottom)
if False: # only one marquee can be specified
player.video_set_marquee_int(VideoMarqueeOption.Timeout, 5000) # millisec, 0==forever
t = media.get_mrl() # movie
else: # update marquee text periodically
player.video_set_marquee_int(VideoMarqueeOption.Timeout, 0) # millisec, 0==forever
player.video_set_marquee_int(VideoMarqueeOption.Refresh, 1000) # millisec (or sec?)
##t = '$L / $D or $P at $T'
t = '%Y-%m-%d %H:%M:%S'
player.video_set_marquee_string(VideoMarqueeOption.Text, str_to_bytes(t))
# Some event manager examples. Note, the callback can be any Python
# callable and does not need to be decorated. Optionally, specify
# any number of positional and/or keyword arguments to be passed
# to the callback (in addition to the first one, an Event instance).
event_manager = player.event_manager()
event_manager.event_attach(EventType.MediaPlayerEndReached, end_callback)
event_manager.event_attach(EventType.MediaPlayerPositionChanged, pos_callback, player)
def mspf():
"""Milliseconds per frame."""
return int(1000 // (player.get_fps() or 25))
def print_info():
"""Print information about the media"""
try:
print_version()
media = player.get_media()
print('State: %s' % player.get_state())
print('Media: %s' % bytes_to_str(media.get_mrl()))
print('Track: %s/%s' % (player.video_get_track(), player.video_get_track_count()))
print('Current time: %s/%s' % (player.get_time(), media.get_duration()))
print('Position: %s' % player.get_position())
print('FPS: %s (%d ms)' % (player.get_fps(), mspf()))
print('Rate: %s' % player.get_rate())
print('Video size: %s' % str(player.video_get_size(0))) # num=0
print('Scale: %s' % player.video_get_scale())
print('Aspect ratio: %s' % player.video_get_aspect_ratio())
#print('Window:' % player.get_hwnd()
except Exception:
print('Error: %s' % sys.exc_info()[1])
def sec_forward():
"""Go forward one sec"""
player.set_time(player.get_time() + 1000)
def sec_backward():
"""Go backward one sec"""
player.set_time(player.get_time() - 1000)
def frame_forward():
"""Go forward one frame"""
player.set_time(player.get_time() + mspf())
def frame_backward():
"""Go backward one frame"""
player.set_time(player.get_time() - mspf())
def print_help():
"""Print help"""
print('Single-character commands:')
for k, m in sorted(keybindings.items()):
m = (m.__doc__ or m.__name__).splitlines()[0]
print(' %s: %s.' % (k, m.rstrip('.')))
print('0-9: go to that fraction of the movie')
def quit_app():
"""Stop and exit"""
sys.exit(0)
def toggle_echo_position():
"""Toggle echoing of media position"""
global echo_position
echo_position = not echo_position
keybindings = {
' ': player.pause,
'+': sec_forward,
'-': sec_backward,
'.': frame_forward,
',': frame_backward,
'f': player.toggle_fullscreen,
'i': print_info,
'p': toggle_echo_position,
'q': quit_app,
'?': print_help,
}
print('Press q to quit, ? to get help.%s' % os.linesep)
while True:
k = getch()
print('> %s' % k)
if k in keybindings:
keybindings[k]()
elif k.isdigit():
# jump to fraction of the movie.
player.set_position(float('0.'+k))
else:
print('Usage: %s <movie_filename>' % sys.argv[0])
print('Once launched, type ? for help.')
print('')
print_version()
|
vr2262/framer
|
vlc/vlc.py
|
Python
|
mit
| 256,336 | 0.004837 |
#!/usr/bin/env python
#
# Jetduino Example for using the Grove Thumb Joystick (http://www.seeedstudio.com/wiki/Grove_-_Thumb_Joystick)
#
# The Jetduino connects the Jetson and Grove sensors. You can learn more about the Jetduino here: http://www.NeuroRoboticTech.com/Projects/Jetduino
#
# Have a question about this example? Ask on the forums here: http://www.NeuroRoboticTech.com/Forum
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import jetduino
from jetduino_pins import *
# Connect the Grove Thumb Joystick to analog port A0
# GrovePi Port A0 uses Arduino pins 0 and 1
# GrovePi Port A1 uses Arduino pins 1 and 2
# Don't plug anything into port A1 that uses pin 1
# Most Grove sensors only use 3 of their 4 pins, which is why the GrovePi shares Arduino pins between adjacent ports
# If the sensor has a pin definition SIG,NC,VCC,GND, the second (white) pin is not connected to anything
# If you wish to connect two joysticks, use ports A0 and A2 (skip A1)
# Uses two pins - one for the X axis and one for the Y axis
# This configuration means you are using port A0
xPin = ARD_A0
yPin = ARD_A2
jetduino.pinMode(xPin, INPUT_PIN)
jetduino.pinMode(yPin, INPUT_PIN)
# The Grove Thumb Joystick is an analog device that outputs analog signal ranging from 0 to 1023
# The X and Y axes are two ~10k potentiometers and a momentary push button which shorts the x axis
# My joystick produces slightly different results to the specifications found on the url above
# I've listed both here:
# Specifications
# Min Typ Max Click
# X 206 516 798 1023
# Y 203 507 797
# My Joystick
# Min Typ Max Click
# X 253 513 766 1020-1023
# Y 250 505 769
while True:
try:
# Get X/Y coordinates
x = jetduino.analogRead(xPin)
y = jetduino.analogRead(yPin)
# Calculate X/Y resistance
Rx = (float)(1023 - x) * 10 / x
Ry = (float)(1023 - y) * 10 / y
# Was a click detected on the X axis?
click = 1 if x >= 1020 else 0
print ("x =", x, " y =", y, " Rx =", Rx, " Ry =", Ry, " click =", click)
time.sleep(.5)
except IOError:
print ("Error")
|
NeuroRoboticTech/Jetduino
|
Software/Python/grove_thumb_joystick.py
|
Python
|
mit
| 3,602 | 0.003609 |
# -*- coding: utf-8 -*-
"""
.. module:: MyCapytain.errors
:synopsis: MyCapytain errors
.. moduleauthor:: Thibault Clérice <leponteineptique@gmail.com>
"""
class MyCapytainException(BaseException):
""" Namespacing errors
"""
class JsonLdCollectionMissing(MyCapytainException):
""" Error thrown when a JSON LD contains no principle collection
Raised when a json supposed to contain collection is parsed
but nothing is found
"""
class DuplicateReference(SyntaxWarning, MyCapytainException):
""" Error generated when a duplicate is found in CtsReference
"""
class RefsDeclError(Exception, MyCapytainException):
""" Error issued when an the refsDecl does not succeed in xpath (no results)
"""
pass
class InvalidSiblingRequest(Exception, MyCapytainException):
""" This error is thrown when one attempts to get previous or next passage on a passage with a range of different
depth, ex. : 1-2.25
"""
pass
class InvalidURN(Exception, MyCapytainException):
""" This error is thrown when URN are not valid
"""
class MissingAttribute(Exception, MyCapytainException):
""" This error is thrown when an attribute is not present in the Object (missing at startup)
"""
class UnknownObjectError(ValueError, MyCapytainException):
""" This error is thrown when an object does not exist in an inventory or in an API
"""
class UnknownNamespace(ValueError, MyCapytainException):
""" This error is thrown when a namespace is unknown
"""
class UndispatchedTextError(Exception, MyCapytainException):
""" This error is thrown when a text has not been dispatched by a dispatcher
"""
class UnknownCollection(KeyError, MyCapytainException):
""" A collection is unknown to its ancestor
"""
class EmptyReference(SyntaxWarning, MyCapytainException):
""" Error generated when a CtsReference does not exist or is invalid
"""
class CitationDepthError(UnknownObjectError, MyCapytainException):
""" Error generated when the depth of a requested citation is deeper than the citation scheme of the text
"""
class MissingRefsDecl(Exception, MyCapytainException):
""" A text has no properly encoded refsDecl
"""
class PaginationBrowsingError(MyCapytainException):
""" When contacting a remote service and some part of the pages where not reachable or parsable
"""
class CapitainsXPathError(Exception):
def __init__(self, message):
super(CapitainsXPathError, self).__init__()
self.message = message
def __repr__(self):
return "CapitainsXPathError("+self.message+")"
|
Capitains/MyCapytain
|
MyCapytain/errors.py
|
Python
|
mpl-2.0
| 2,639 | 0.003412 |
# Copyright 2016 Internap
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from ubersmith_remote_module_server import api, router
class Server(object):
def __init__(self, modules):
self.router = router.Router()
self.app = Flask(__name__)
self.api = api.Api(modules, self.app, self.router)
def run(self, *args, **kwargs):
self.app.run(*args, **kwargs)
|
mat128/python-ubersmith-remote-module-server
|
ubersmith_remote_module_server/server.py
|
Python
|
apache-2.0
| 915 | 0.002186 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pure-Python application server for testing applications locally.
Given a port and the paths to a valid application directory (with an 'app.yaml'
file), the external library directory, and a relative URL to use for logins,
creates an HTTP server that can be used to test an application locally. Uses
stubs instead of actual APIs when SetupStubs() is called first.
Example:
root_path = '/path/to/application/directory'
login_url = '/login'
port = 8080
server = dev_appserver.CreateServer(root_path, login_url, port)
server.serve_forever()
"""
from __future__ import with_statement
from google.appengine.tools import os_compat
import __builtin__
import BaseHTTPServer
import base64
import binascii
import calendar
import cStringIO
import cgi
import cgitb
import email.Utils
import errno
import hashlib
import heapq
import httplib
import imp
import inspect
import logging
import mimetools
import mimetypes
import os
import select
import shutil
import simplejson
import StringIO
import struct
import tempfile
import wsgiref.headers
import yaml
import re
import sre_compile
import sre_constants
import sre_parse
import socket
import sys
import time
import types
import urlparse
import urllib
import zlib
import google
try:
from google.third_party.apphosting.python.webapp2 import v2_3 as tmp
sys.path.append(os.path.dirname(tmp.__file__))
del tmp
except ImportError:
pass
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import appinfo_includes
from google.appengine.api import app_logging
from google.appengine.api import blobstore
from google.appengine.api import croninfo
from google.appengine.api import datastore
from google.appengine.api import datastore_file_stub
from google.appengine.api import lib_config
from google.appengine.api import mail
from google.appengine.api import mail_stub
from google.appengine.api import namespace_manager
from google.appengine.api import request_info
from google.appengine.api import urlfetch_stub
from google.appengine.api import user_service_stub
from google.appengine.api import yaml_errors
from google.appengine.api.app_identity import app_identity_stub
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.blobstore import file_blob_storage
from google.appengine.api.capabilities import capability_stub
from google.appengine.api.channel import channel_service_stub
from google.appengine.api.files import file_service_stub
from google.appengine.api.logservice import logservice
from google.appengine.api.logservice import logservice_stub
from google.appengine.api.search import simple_search_stub
from google.appengine.api.taskqueue import taskqueue_stub
from google.appengine.api.prospective_search import prospective_search_stub
from google.appengine.api.remote_socket import _remote_socket_stub
from google.appengine.api.memcache import memcache_stub
from google.appengine.api import rdbms_mysqldb
from google.appengine.api.system import system_stub
from google.appengine.api.xmpp import xmpp_service_stub
from google.appengine.datastore import datastore_sqlite_stub
from google.appengine.datastore import datastore_stub_util
from google.appengine.datastore import datastore_v4_stub
from google.appengine import dist
try:
from google.appengine.runtime import request_environment
from google.appengine.runtime import runtime
except:
request_environment = None
runtime = None
from google.appengine.tools import dev_appserver_apiserver
from google.appengine.tools import dev_appserver_blobimage
from google.appengine.tools import dev_appserver_blobstore
from google.appengine.tools import dev_appserver_channel
from google.appengine.tools import dev_appserver_import_hook
from google.appengine.tools import dev_appserver_login
from google.appengine.tools import dev_appserver_multiprocess as multiprocess
from google.appengine.tools import dev_appserver_oauth
from google.appengine.tools import dev_appserver_upload
from google.storage.speckle.python.api import rdbms
CouldNotFindModuleError = dev_appserver_import_hook.CouldNotFindModuleError
FakeAccess = dev_appserver_import_hook.FakeAccess
FakeFile = dev_appserver_import_hook.FakeFile
FakeReadlink = dev_appserver_import_hook.FakeReadlink
FakeSetLocale = dev_appserver_import_hook.FakeSetLocale
FakeUnlink = dev_appserver_import_hook.FakeUnlink
GetSubmoduleName = dev_appserver_import_hook.GetSubmoduleName
HardenedModulesHook = dev_appserver_import_hook.HardenedModulesHook
SDK_ROOT = dev_appserver_import_hook.SDK_ROOT
PYTHON_LIB_VAR = '$PYTHON_LIB'
DEVEL_CONSOLE_PATH = PYTHON_LIB_VAR + '/google/appengine/ext/admin'
REMOTE_API_PATH = (PYTHON_LIB_VAR +
'/google/appengine/ext/remote_api/handler.py')
FILE_MISSING_EXCEPTIONS = frozenset([errno.ENOENT, errno.ENOTDIR])
MAX_URL_LENGTH = 2047
DEFAULT_ENV = {
'GATEWAY_INTERFACE': 'CGI/1.1',
'AUTH_DOMAIN': 'gmail.com',
'USER_ORGANIZATION': '',
'TZ': 'UTC',
}
DEFAULT_SELECT_DELAY = 30.0
for ext, mime_type in mail.EXTENSION_MIME_MAP.iteritems():
mimetypes.add_type(mime_type, '.' + ext)
MAX_RUNTIME_RESPONSE_SIZE = 32 << 20
MAX_REQUEST_SIZE = 32 * 1024 * 1024
COPY_BLOCK_SIZE = 1 << 20
API_VERSION = '1'
VERSION_FILE = '../../VERSION'
DEVEL_PAYLOAD_HEADER = 'HTTP_X_APPENGINE_DEVELOPMENT_PAYLOAD'
DEVEL_PAYLOAD_RAW_HEADER = 'X-AppEngine-Development-Payload'
DEVEL_FAKE_IS_ADMIN_HEADER = 'HTTP_X_APPENGINE_FAKE_IS_ADMIN'
DEVEL_FAKE_IS_ADMIN_RAW_HEADER = 'X-AppEngine-Fake-Is-Admin'
FILE_STUB_DEPRECATION_MESSAGE = (
"""The datastore file stub is deprecated, and
will stop being the default in a future release.
Append the --use_sqlite flag to use the new SQLite stub.
You can port your existing data using the --port_sqlite_data flag or
purge your previous test data with --clear_datastore.
""")
NON_PUBLIC_CACHE_CONTROLS = frozenset(['private', 'no-cache', 'no-store'])
class Error(Exception):
"""Base-class for exceptions in this module."""
class InvalidAppConfigError(Error):
"""The supplied application configuration file is invalid."""
class AppConfigNotFoundError(Error):
"""Application configuration file not found."""
class CompileError(Error):
"""Application could not be compiled."""
def __init__(self, text):
self.text = text
class ExecuteError(Error):
"""Application could not be executed."""
def __init__(self, text, log):
self.text = text
self.log = log
def MonkeyPatchPdb(pdb):
"""Given a reference to the pdb module, fix its set_trace function.
This will allow the standard trick of setting a breakpoint in your
code by inserting a call to pdb.set_trace() to work properly, as
long as the original stdin and stdout of dev_appserver.py are
connected to a console or shell window.
"""
def NewSetTrace():
"""Replacement for set_trace() that uses the original i/o streams.
This is necessary because by the time the user code that might
invoke pdb.set_trace() runs, the default sys.stdin and sys.stdout
are redirected to the HTTP request and response streams instead,
so that pdb will encounter garbage (or EOF) in its input, and its
output will garble the HTTP response. Fortunately, sys.__stdin__
and sys.__stderr__ retain references to the original streams --
this is a standard Python feature. Also, fortunately, as of
Python 2.5, the Pdb class lets you easily override stdin and
stdout. The original set_trace() function does essentially the
same thing as the code here except it instantiates Pdb() without
arguments.
"""
p = pdb.Pdb(stdin=sys.__stdin__, stdout=sys.__stdout__)
p.set_trace(sys._getframe().f_back)
pdb.set_trace = NewSetTrace
def MonkeyPatchThreadingLocal(_threading_local):
"""Given a reference to the _threading_local module, fix _localbase.__new__.
This ensures that using dev_appserver with a Python interpreter older than
2.7 will include the fix to the _threading_local._localbase.__new__ method
which was introduced in Python 2.7 (http://bugs.python.org/issue1522237).
"""
@staticmethod
def New(cls, *args, **kw):
self = object.__new__(cls)
key = '_local__key', 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', _threading_local.RLock())
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError('Initialization arguments are not supported')
dict = object.__getattribute__(self, '__dict__')
_threading_local.current_thread().__dict__[key] = dict
return self
_threading_local._localbase.__new__ = New
def SplitURL(relative_url):
"""Splits a relative URL into its path and query-string components.
Args:
relative_url: String containing the relative URL (often starting with '/')
to split. Should be properly escaped as www-form-urlencoded data.
Returns:
Tuple (script_name, query_string) where:
script_name: Relative URL of the script that was accessed.
query_string: String containing everything after the '?' character.
"""
(unused_scheme, unused_netloc, path, query,
unused_fragment) = urlparse.urlsplit(relative_url)
return path, query
def GetFullURL(server_name, server_port, relative_url):
"""Returns the full, original URL used to access the relative URL.
Args:
server_name: Name of the local host, or the value of the 'host' header
from the request.
server_port: Port on which the request was served (string or int).
relative_url: Relative URL that was accessed, including query string.
Returns:
String containing the original URL.
"""
if str(server_port) != '80':
netloc = '%s:%s' % (server_name, server_port)
else:
netloc = server_name
return 'http://%s%s' % (netloc, relative_url)
def CopyStreamPart(source, destination, content_size):
"""Copy a portion of a stream from one file-like object to another.
Args:
source: Source stream to copy from.
destination: Destination stream to copy to.
content_size: Maximum bytes to copy.
Returns:
Number of bytes actually copied.
"""
bytes_copied = 0
bytes_left = content_size
while bytes_left > 0:
bytes = source.read(min(bytes_left, COPY_BLOCK_SIZE))
bytes_read = len(bytes)
if bytes_read == 0:
break
destination.write(bytes)
bytes_copied += bytes_read
bytes_left -= bytes_read
return bytes_copied
def AppIdWithDefaultPartition(app_id, default_partition):
"""Add a partition to an application id if necessary."""
if not default_partition:
return app_id
if '~' in app_id:
return app_id
return default_partition + '~' + app_id
class AppServerRequest(object):
"""Encapsulates app-server request.
Object used to hold a full appserver request. Used as a container that is
passed through the request forward chain and ultimately sent to the
URLDispatcher instances.
Attributes:
relative_url: String containing the URL accessed.
path: Local path of the resource that was matched; back-references will be
replaced by values matched in the relative_url. Path may be relative
or absolute, depending on the resource being served (e.g., static files
will have an absolute path; scripts will be relative).
headers: Instance of mimetools.Message with headers from the request.
infile: File-like object with input data from the request.
force_admin: Allow request admin-only URLs to proceed regardless of whether
user is logged in or is an admin.
"""
ATTRIBUTES = ['relative_url',
'path',
'headers',
'infile',
'force_admin',
]
def __init__(self,
relative_url,
path,
headers,
infile,
force_admin=False):
"""Constructor.
Args:
relative_url: Mapped directly to attribute.
path: Mapped directly to attribute.
headers: Mapped directly to attribute.
infile: Mapped directly to attribute.
force_admin: Mapped directly to attribute.
"""
self.relative_url = relative_url
self.path = path
self.headers = headers
self.infile = infile
self.force_admin = force_admin
if (DEVEL_PAYLOAD_RAW_HEADER in self.headers or
DEVEL_FAKE_IS_ADMIN_RAW_HEADER in self.headers):
self.force_admin = True
def __eq__(self, other):
"""Used mainly for testing.
Returns:
True if all fields of both requests are equal, else False.
"""
if type(self) == type(other):
for attribute in self.ATTRIBUTES:
if getattr(self, attribute) != getattr(other, attribute):
return False
return True
def __repr__(self):
"""String representation of request.
Used mainly for testing.
Returns:
String representation of AppServerRequest. Strings of different
request objects that have the same values for all fields compare
as equal.
"""
results = []
for attribute in self.ATTRIBUTES:
results.append('%s: %s' % (attribute, getattr(self, attribute)))
return '<AppServerRequest %s>' % ' '.join(results)
class URLDispatcher(object):
"""Base-class for handling HTTP requests."""
def Dispatch(self,
request,
outfile,
base_env_dict=None):
"""Dispatch and handle an HTTP request.
base_env_dict should contain at least these CGI variables:
REQUEST_METHOD, REMOTE_ADDR, SERVER_SOFTWARE, SERVER_NAME,
SERVER_PROTOCOL, SERVER_PORT
Args:
request: AppServerRequest instance.
outfile: File-like object where output data should be written.
base_env_dict: Dictionary of CGI environment parameters if available.
Defaults to None.
Returns:
None if request handling is complete.
A new AppServerRequest instance if internal redirect is required.
"""
raise NotImplementedError
def EndRedirect(self, dispatched_output, original_output):
"""Process the end of an internal redirect.
This method is called after all subsequent dispatch requests have finished.
By default the output from the dispatched process is copied to the original.
This will not be called on dispatchers that do not return an internal
redirect.
Args:
dispatched_output: StringIO buffer containing the results from the
dispatched
original_output: The original output file.
Returns:
None if request handling is complete.
A new AppServerRequest instance if internal redirect is required.
"""
original_output.write(dispatched_output.read())
class URLMatcher(object):
"""Matches an arbitrary URL using a list of URL patterns from an application.
Each URL pattern has an associated URLDispatcher instance and path to the
resource's location on disk. See AddURL for more details. The first pattern
that matches an inputted URL will have its associated values returned by
Match().
"""
def __init__(self):
"""Initializer."""
self._url_patterns = []
def AddURL(self, regex, dispatcher, path, requires_login, admin_only,
auth_fail_action):
"""Adds a URL pattern to the list of patterns.
If the supplied regex starts with a '^' or ends with a '$' an
InvalidAppConfigError exception will be raised. Start and end symbols
and implicitly added to all regexes, meaning we assume that all regexes
consume all input from a URL.
Args:
regex: String containing the regular expression pattern.
dispatcher: Instance of URLDispatcher that should handle requests that
match this regex.
path: Path on disk for the resource. May contain back-references like
r'\1', r'\2', etc, which will be replaced by the corresponding groups
matched by the regex if present.
requires_login: True if the user must be logged-in before accessing this
URL; False if anyone can access this URL.
admin_only: True if the user must be a logged-in administrator to
access the URL; False if anyone can access the URL.
auth_fail_action: either appinfo.AUTH_FAIL_ACTION_REDIRECT (default)
which indicates that the server should redirect to the login page when
an authentication is needed, or appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED
which indicates that the server should just return a 401 Unauthorized
message immediately.
Raises:
TypeError: if dispatcher is not a URLDispatcher sub-class instance.
InvalidAppConfigError: if regex isn't valid.
"""
if not isinstance(dispatcher, URLDispatcher):
raise TypeError('dispatcher must be a URLDispatcher sub-class')
if regex.startswith('^') or regex.endswith('$'):
raise InvalidAppConfigError('regex starts with "^" or ends with "$"')
adjusted_regex = '^%s$' % regex
try:
url_re = re.compile(adjusted_regex)
except re.error, e:
raise InvalidAppConfigError('regex invalid: %s' % e)
match_tuple = (url_re, dispatcher, path, requires_login, admin_only,
auth_fail_action)
self._url_patterns.append(match_tuple)
def Match(self,
relative_url,
split_url=SplitURL):
"""Matches a URL from a request against the list of URL patterns.
The supplied relative_url may include the query string (i.e., the '?'
character and everything following).
Args:
relative_url: Relative URL being accessed in a request.
split_url: Used for dependency injection.
Returns:
Tuple (dispatcher, matched_path, requires_login, admin_only,
auth_fail_action), which are the corresponding values passed to
AddURL when the matching URL pattern was added to this matcher.
The matched_path will have back-references replaced using values
matched by the URL pattern. If no match was found, dispatcher will
be None.
"""
adjusted_url, unused_query_string = split_url(relative_url)
for url_tuple in self._url_patterns:
url_re, dispatcher, path, requires_login, admin_only, auth_fail_action = url_tuple
the_match = url_re.match(adjusted_url)
if the_match:
adjusted_path = the_match.expand(path)
return (dispatcher, adjusted_path, requires_login, admin_only,
auth_fail_action)
return None, None, None, None, None
def GetDispatchers(self):
"""Retrieves the URLDispatcher objects that could be matched.
Should only be used in tests.
Returns:
A set of URLDispatcher objects.
"""
return set([url_tuple[1] for url_tuple in self._url_patterns])
class MatcherDispatcher(URLDispatcher):
"""Dispatcher across multiple URLMatcher instances."""
def __init__(self,
config,
login_url,
module_manager,
url_matchers,
get_user_info=dev_appserver_login.GetUserInfo,
login_redirect=dev_appserver_login.LoginRedirect):
"""Initializer.
Args:
config: AppInfoExternal instance representing the parsed app.yaml file.
login_url: Relative URL which should be used for handling user logins.
module_manager: ModuleManager instance that is used to detect and reload
modules if the matched Dispatcher is dynamic.
url_matchers: Sequence of URLMatcher objects.
get_user_info: Used for dependency injection.
login_redirect: Used for dependency injection.
"""
self._config = config
self._login_url = login_url
self._module_manager = module_manager
self._url_matchers = tuple(url_matchers)
self._get_user_info = get_user_info
self._login_redirect = login_redirect
def Dispatch(self,
request,
outfile,
base_env_dict=None):
"""Dispatches a request to the first matching dispatcher.
Matchers are checked in the order they were supplied to the constructor.
If no matcher matches, a 404 error will be written to the outfile. The
path variable supplied to this method is ignored.
The value of request.path is ignored.
"""
cookies = ', '.join(request.headers.getheaders('cookie'))
email_addr, admin, user_id = self._get_user_info(cookies)
for matcher in self._url_matchers:
dispatcher, matched_path, requires_login, admin_only, auth_fail_action = matcher.Match(request.relative_url)
if dispatcher is None:
continue
logging.debug('Matched "%s" to %s with path %s',
request.relative_url, dispatcher, matched_path)
if ((requires_login or admin_only) and
not email_addr and
not request.force_admin):
logging.debug('Login required, redirecting user')
if auth_fail_action == appinfo.AUTH_FAIL_ACTION_REDIRECT:
self._login_redirect(self._login_url,
base_env_dict['SERVER_NAME'],
base_env_dict['SERVER_PORT'],
request.relative_url,
outfile)
elif auth_fail_action == appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED:
outfile.write('Status: %d Not authorized\r\n'
'\r\n'
'Login required to view page.'
% (httplib.UNAUTHORIZED))
elif admin_only and not admin and not request.force_admin:
outfile.write('Status: %d Not authorized\r\n'
'\r\n'
'Current logged in user %s is not '
'authorized to view this page.'
% (httplib.FORBIDDEN, email_addr))
else:
request.path = matched_path
if (not isinstance(dispatcher, FileDispatcher) and
self._module_manager.AreModuleFilesModified()):
self._module_manager.ResetModules()
forward_request = dispatcher.Dispatch(request,
outfile,
base_env_dict=base_env_dict)
while forward_request:
logging.info('Internal redirection to %s',
forward_request.relative_url)
new_outfile = cStringIO.StringIO()
self.Dispatch(forward_request,
new_outfile,
dict(base_env_dict))
new_outfile.seek(0)
forward_request = dispatcher.EndRedirect(new_outfile, outfile)
return
outfile.write('Status: %d URL did not match\r\n'
'\r\n'
'Not found error: %s did not match any patterns '
'in application configuration.'
% (httplib.NOT_FOUND, request.relative_url))
_IGNORE_REQUEST_HEADERS = frozenset([
'accept-encoding',
'connection',
'keep-alive',
'proxy-authorization',
'te',
'trailer',
'transfer-encoding',
'content-type',
'content-length',
])
_request_id = 0
_request_time = 0
def _generate_request_id_hash():
"""Generates a hash of the current request id."""
return hashlib.sha1(str(_request_id)).hexdigest()[:8].upper()
def _GenerateRequestLogId():
"""Generates the request log id for the current request."""
sec = int(_request_time)
usec = int(1000000 * (_request_time - sec))
h = hashlib.sha1(str(_request_id)).digest()[:4]
packed = struct.Struct('> L L').pack(sec, usec)
return binascii.b2a_hex(packed + h)
def GetGoogleSqlOAuth2RefreshToken(oauth_file_path):
"""Reads the user's Google Cloud SQL OAuth2.0 token from disk."""
if not os.path.exists(oauth_file_path):
return None
try:
with open(oauth_file_path) as oauth_file:
token = simplejson.load(oauth_file)
return token['refresh_token']
except (IOError, KeyError, simplejson.decoder.JSONDecodeError):
logging.exception(
'Could not read OAuth2.0 token from %s', oauth_file_path)
return None
def SetupEnvironment(cgi_path,
relative_url,
headers,
infile,
split_url=SplitURL,
get_user_info=dev_appserver_login.GetUserInfo):
"""Sets up environment variables for a CGI.
Args:
cgi_path: Full file-system path to the CGI being executed.
relative_url: Relative URL used to access the CGI.
headers: Instance of mimetools.Message containing request headers.
infile: File-like object with input data from the request.
split_url, get_user_info: Used for dependency injection.
Returns:
Dictionary containing CGI environment variables.
"""
env = DEFAULT_ENV.copy()
script_name, query_string = split_url(relative_url)
env['_AH_ENCODED_SCRIPT_NAME'] = script_name
env['SCRIPT_NAME'] = ''
env['QUERY_STRING'] = query_string
env['PATH_INFO'] = urllib.unquote(script_name)
env['PATH_TRANSLATED'] = cgi_path
env['CONTENT_TYPE'] = headers.getheader('content-type',
'application/x-www-form-urlencoded')
env['CONTENT_LENGTH'] = headers.getheader('content-length', '')
cookies = ', '.join(headers.getheaders('cookie'))
email_addr, admin, user_id = get_user_info(cookies)
env['USER_EMAIL'] = email_addr
env['USER_ID'] = user_id
if admin:
env['USER_IS_ADMIN'] = '1'
if env['AUTH_DOMAIN'] == '*':
auth_domain = 'gmail.com'
parts = email_addr.split('@')
if len(parts) == 2 and parts[1]:
auth_domain = parts[1]
env['AUTH_DOMAIN'] = auth_domain
env['REQUEST_LOG_ID'] = _GenerateRequestLogId()
env['REQUEST_ID_HASH'] = _generate_request_id_hash()
for key in headers:
if key in _IGNORE_REQUEST_HEADERS:
continue
adjusted_name = key.replace('-', '_').upper()
env['HTTP_' + adjusted_name] = ', '.join(headers.getheaders(key))
if DEVEL_PAYLOAD_HEADER in env:
del env[DEVEL_PAYLOAD_HEADER]
new_data = base64.standard_b64decode(infile.getvalue())
infile.seek(0)
infile.truncate()
infile.write(new_data)
infile.seek(0)
env['CONTENT_LENGTH'] = str(len(new_data))
if DEVEL_FAKE_IS_ADMIN_HEADER in env:
del env[DEVEL_FAKE_IS_ADMIN_HEADER]
token = GetGoogleSqlOAuth2RefreshToken(os.path.expanduser(
rdbms.OAUTH_CREDENTIALS_PATH))
if token:
env['GOOGLE_SQL_OAUTH2_REFRESH_TOKEN'] = token
return env
def NotImplementedFake(*args, **kwargs):
"""Fake for methods/functions that are not implemented in the production
environment.
"""
raise NotImplementedError('This class/method is not available.')
class NotImplementedFakeClass(object):
"""Fake class for classes that are not implemented in the production env.
"""
__init__ = NotImplementedFake
def IsEncodingsModule(module_name):
"""Determines if the supplied module is related to encodings in any way.
Encodings-related modules cannot be reloaded, so they need to be treated
specially when sys.modules is modified in any way.
Args:
module_name: Absolute name of the module regardless of how it is imported
into the local namespace (e.g., foo.bar.baz).
Returns:
True if it's an encodings-related module; False otherwise.
"""
if (module_name in ('codecs', 'encodings') or
module_name.startswith('encodings.')):
return True
return False
def ClearAllButEncodingsModules(module_dict):
"""Clear all modules in a module dictionary except for those modules that
are in any way related to encodings.
Args:
module_dict: Dictionary in the form used by sys.modules.
"""
for module_name in module_dict.keys():
if not IsEncodingsModule(module_name) and module_name != 'sys':
del module_dict[module_name]
def ConnectAndDisconnectChildModules(old_module_dict, new_module_dict):
"""Prepares for switching from old_module_dict to new_module_dict.
Disconnects child modules going away from parents that remain, and reconnects
child modules that are being added back in to old parents. This is needed to
satisfy code that follows the getattr() descendant chain rather than looking
up the desired module directly in the module dict.
Args:
old_module_dict: The module dict being replaced, looks like sys.modules.
new_module_dict: The module dict takings its place, looks like sys.modules.
"""
old_keys = set(old_module_dict.keys())
new_keys = set(new_module_dict.keys())
for deleted_module_name in old_keys - new_keys:
if old_module_dict[deleted_module_name] is None:
continue
segments = deleted_module_name.rsplit('.', 1)
if len(segments) == 2:
parent_module = new_module_dict.get(segments[0])
if parent_module and hasattr(parent_module, segments[1]):
delattr(parent_module, segments[1])
for added_module_name in new_keys - old_keys:
if new_module_dict[added_module_name] is None:
continue
segments = added_module_name.rsplit('.', 1)
if len(segments) == 2:
parent_module = old_module_dict.get(segments[0])
child_module = new_module_dict[added_module_name]
if (parent_module and
getattr(parent_module, segments[1], None) is not child_module):
setattr(parent_module, segments[1], child_module)
SHARED_MODULE_PREFIXES = set([
'google',
'logging',
'sys',
'warnings',
're',
'sre_compile',
'sre_constants',
'sre_parse',
'email',
'wsgiref',
'MySQLdb',
'decimal',
])
NOT_SHARED_MODULE_PREFIXES = set([
'google.appengine.ext',
])
def ModuleNameHasPrefix(module_name, prefix_set):
"""Determines if a module's name belongs to a set of prefix strings.
Args:
module_name: String containing the fully qualified module name.
prefix_set: Iterable set of module name prefixes to check against.
Returns:
True if the module_name belongs to the prefix set or is a submodule of
any of the modules specified in the prefix_set. Otherwise False.
"""
for prefix in prefix_set:
if prefix == module_name:
return True
if module_name.startswith(prefix + '.'):
return True
return False
def SetupSharedModules(module_dict):
"""Creates a module dictionary for the hardened part of the process.
Module dictionary will contain modules that should be shared between the
hardened and unhardened parts of the process.
Args:
module_dict: Module dictionary from which existing modules should be
pulled (usually sys.modules).
Returns:
A new module dictionary.
"""
output_dict = {}
for module_name, module in module_dict.iteritems():
if module is None:
continue
if IsEncodingsModule(module_name):
output_dict[module_name] = module
continue
shared_prefix = ModuleNameHasPrefix(module_name, SHARED_MODULE_PREFIXES)
banned_prefix = ModuleNameHasPrefix(module_name, NOT_SHARED_MODULE_PREFIXES)
if shared_prefix and not banned_prefix:
output_dict[module_name] = module
return output_dict
def ModuleHasValidMainFunction(module):
"""Determines if a module has a main function that takes no arguments.
This includes functions that have arguments with defaults that are all
assigned, thus requiring no additional arguments in order to be called.
Args:
module: A types.ModuleType instance.
Returns:
True if the module has a valid, reusable main function; False otherwise.
"""
if hasattr(module, 'main') and type(module.main) is types.FunctionType:
arg_names, var_args, var_kwargs, default_values = inspect.getargspec(
module.main)
if len(arg_names) == 0:
return True
if default_values is not None and len(arg_names) == len(default_values):
return True
return False
def CheckScriptExists(cgi_path, handler_path):
"""Check that the given handler_path is a file that exists on disk.
Args:
cgi_path: Absolute path to the CGI script file on disk.
handler_path: CGI path stored in the application configuration (as a path
like 'foo/bar/baz.py'). May contain $PYTHON_LIB references.
Raises:
CouldNotFindModuleError: if the given handler_path is a file and doesn't
have the expected extension.
"""
if handler_path.startswith(PYTHON_LIB_VAR + '/'):
return
if (not os.path.isdir(cgi_path) and
not os.path.isfile(cgi_path) and
os.path.isfile(cgi_path + '.py')):
raise CouldNotFindModuleError(
'Perhaps you meant to have the line "script: %s.py" in your app.yaml' %
handler_path)
def GetScriptModuleName(handler_path):
"""Determines the fully-qualified Python module name of a script on disk.
Args:
handler_path: CGI path stored in the application configuration (as a path
like 'foo/bar/baz.py'). May contain $PYTHON_LIB references.
Returns:
String containing the corresponding module name (e.g., 'foo.bar.baz').
"""
if handler_path.startswith(PYTHON_LIB_VAR + '/'):
handler_path = handler_path[len(PYTHON_LIB_VAR):]
handler_path = os.path.normpath(handler_path)
extension_index = handler_path.rfind('.py')
if extension_index != -1:
handler_path = handler_path[:extension_index]
module_fullname = handler_path.replace(os.sep, '.')
module_fullname = module_fullname.strip('.')
module_fullname = re.sub('\.+', '.', module_fullname)
if module_fullname.endswith('.__init__'):
module_fullname = module_fullname[:-len('.__init__')]
return module_fullname
def FindMissingInitFiles(cgi_path, module_fullname, isfile=os.path.isfile):
"""Determines which __init__.py files are missing from a module's parent
packages.
Args:
cgi_path: Absolute path of the CGI module file on disk.
module_fullname: Fully qualified Python module name used to import the
cgi_path module.
isfile: Used for testing.
Returns:
List containing the paths to the missing __init__.py files.
"""
missing_init_files = []
if cgi_path.endswith('.py'):
module_base = os.path.dirname(cgi_path)
else:
module_base = cgi_path
depth_count = module_fullname.count('.')
if cgi_path.endswith('__init__.py') or not cgi_path.endswith('.py'):
depth_count += 1
for index in xrange(depth_count):
current_init_file = os.path.abspath(
os.path.join(module_base, '__init__.py'))
if not isfile(current_init_file):
missing_init_files.append(current_init_file)
module_base = os.path.abspath(os.path.join(module_base, os.pardir))
return missing_init_files
def LoadTargetModule(handler_path,
cgi_path,
import_hook,
module_dict=sys.modules):
"""Loads a target CGI script by importing it as a Python module.
If the module for the target CGI script has already been loaded before,
the new module will be loaded in its place using the same module object,
possibly overwriting existing module attributes.
Args:
handler_path: CGI path stored in the application configuration (as a path
like 'foo/bar/baz.py'). Should not have $PYTHON_LIB references.
cgi_path: Absolute path to the CGI script file on disk.
import_hook: Instance of HardenedModulesHook to use for module loading.
module_dict: Used for dependency injection.
Returns:
Tuple (module_fullname, script_module, module_code) where:
module_fullname: Fully qualified module name used to import the script.
script_module: The ModuleType object corresponding to the module_fullname.
If the module has not already been loaded, this will be an empty
shell of a module.
module_code: Code object (returned by compile built-in) corresponding
to the cgi_path to run. If the script_module was previously loaded
and has a main() function that can be reused, this will be None.
Raises:
CouldNotFindModuleError if the given handler_path is a file and doesn't have
the expected extension.
"""
CheckScriptExists(cgi_path, handler_path)
module_fullname = GetScriptModuleName(handler_path)
script_module = module_dict.get(module_fullname)
module_code = None
if script_module is not None and ModuleHasValidMainFunction(script_module):
logging.debug('Reusing main() function of module "%s"', module_fullname)
else:
if script_module is None:
script_module = imp.new_module(module_fullname)
script_module.__loader__ = import_hook
try:
module_code = import_hook.get_code(module_fullname)
full_path, search_path, submodule = (
import_hook.GetModuleInfo(module_fullname))
script_module.__file__ = full_path
if search_path is not None:
script_module.__path__ = search_path
except UnicodeDecodeError, e:
error = ('%s please see http://www.python.org/peps'
'/pep-0263.html for details (%s)' % (e, handler_path))
raise SyntaxError(error)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
import_error_message = str(exc_type)
if exc_value:
import_error_message += ': ' + str(exc_value)
logging.exception('Encountered error loading module "%s": %s',
module_fullname, import_error_message)
missing_inits = FindMissingInitFiles(cgi_path, module_fullname)
if missing_inits:
logging.warning('Missing package initialization files: %s',
', '.join(missing_inits))
else:
logging.error('Parent package initialization files are present, '
'but must be broken')
independent_load_successful = True
if not os.path.isfile(cgi_path):
independent_load_successful = False
else:
try:
source_file = open(cgi_path)
try:
module_code = compile(source_file.read(), cgi_path, 'exec')
script_module.__file__ = cgi_path
finally:
source_file.close()
except OSError:
independent_load_successful = False
if not independent_load_successful:
raise exc_type, exc_value, exc_tb
module_dict[module_fullname] = script_module
return module_fullname, script_module, module_code
def _WriteErrorToOutput(status, message, outfile):
"""Writes an error status response to the response outfile.
Args:
status: The status to return, e.g. '411 Length Required'.
message: A human-readable error message.
outfile: Response outfile.
"""
logging.error(message)
outfile.write('Status: %s\r\n\r\n%s' % (status, message))
def GetRequestSize(request, env_dict, outfile):
"""Gets the size (content length) of the given request.
On error, this method writes an error message to the response outfile and
returns None. Errors include the request missing a required header and the
request being too large.
Args:
request: AppServerRequest instance.
env_dict: Environment dictionary. May be None.
outfile: Response outfile.
Returns:
The calculated request size, or None on error.
"""
if 'content-length' in request.headers:
request_size = int(request.headers['content-length'])
elif env_dict and env_dict.get('REQUEST_METHOD', '') == 'POST':
_WriteErrorToOutput('%d Length required' % httplib.LENGTH_REQUIRED,
'POST requests require a Content-length header.',
outfile)
return None
else:
request_size = 0
if request_size <= MAX_REQUEST_SIZE:
return request_size
else:
msg = ('HTTP request was too large: %d. The limit is: %d.'
% (request_size, MAX_REQUEST_SIZE))
_WriteErrorToOutput(
'%d Request entity too large' % httplib.REQUEST_ENTITY_TOO_LARGE,
msg, outfile)
return None
def ExecuteOrImportScript(config, handler_path, cgi_path, import_hook):
"""Executes a CGI script by importing it as a new module.
This possibly reuses the module's main() function if it is defined and
takes no arguments.
Basic technique lifted from PEP 338 and Python2.5's runpy module. See:
http://www.python.org/dev/peps/pep-0338/
See the section entitled "Import Statements and the Main Module" to understand
why a module named '__main__' cannot do relative imports. To get around this,
the requested module's path could be added to sys.path on each request.
Args:
config: AppInfoExternal instance representing the parsed app.yaml file.
handler_path: CGI path stored in the application configuration (as a path
like 'foo/bar/baz.py'). Should not have $PYTHON_LIB references.
cgi_path: Absolute path to the CGI script file on disk.
import_hook: Instance of HardenedModulesHook to use for module loading.
Returns:
True if the response code had an error status (e.g., 404), or False if it
did not.
Raises:
Any kind of exception that could have been raised when loading the target
module, running a target script, or executing the application code itself.
"""
module_fullname, script_module, module_code = LoadTargetModule(
handler_path, cgi_path, import_hook)
script_module.__name__ = '__main__'
sys.modules['__main__'] = script_module
try:
import pdb
MonkeyPatchPdb(pdb)
if module_code:
exec module_code in script_module.__dict__
else:
script_module.main()
sys.stdout.flush()
sys.stdout.seek(0)
try:
headers = mimetools.Message(sys.stdout)
finally:
sys.stdout.seek(0, 2)
status_header = headers.get('status')
error_response = False
if status_header:
try:
status_code = int(status_header.split(' ', 1)[0])
error_response = status_code >= 400
except ValueError:
error_response = True
if not error_response:
try:
parent_package = import_hook.GetParentPackage(module_fullname)
except Exception:
parent_package = None
if parent_package is not None:
submodule = GetSubmoduleName(module_fullname)
setattr(parent_package, submodule, script_module)
return error_response
finally:
script_module.__name__ = module_fullname
def ExecutePy27Handler(config, handler_path, cgi_path, import_hook):
"""Equivalent to ExecuteOrImportScript for Python 2.7 runtime.
This dispatches to google.appengine.runtime.runtime,
which in turn will dispatch to either the cgi or the wsgi module in
the same package, depending on the form of handler_path.
Args:
config: AppInfoExternal instance representing the parsed app.yaml file.
handler_path: handler ("script") from the application configuration;
either a script reference like foo/bar.py, or an object reference
like foo.bar.app.
cgi_path: Absolute path to the CGI script file on disk;
typically the app dir joined with handler_path.
import_hook: Instance of HardenedModulesHook to use for module loading.
Returns:
True if the response code had an error status (e.g., 404), or False if it
did not.
Raises:
Any kind of exception that could have been raised when loading the target
module, running a target script, or executing the application code itself.
"""
if request_environment is None or runtime is None:
raise RuntimeError('Python 2.5 is too old to emulate the Python 2.7 runtime.'
' Please use Python 2.6 or Python 2.7.')
import os
save_environ = os.environ
save_getenv = os.getenv
env = dict(save_environ)
if env.get('_AH_THREADSAFE'):
env['wsgi.multithread'] = True
url = 'http://%s%s' % (env.get('HTTP_HOST', 'localhost:8080'),
env.get('_AH_ENCODED_SCRIPT_NAME', '/'))
qs = env.get('QUERY_STRING')
if qs:
url += '?' + qs
post_data = sys.stdin.read()
if 'CONTENT_TYPE' in env:
if post_data:
env['HTTP_CONTENT_TYPE'] = env['CONTENT_TYPE']
del env['CONTENT_TYPE']
if 'CONTENT_LENGTH' in env:
if env['CONTENT_LENGTH']:
env['HTTP_CONTENT_LENGTH'] = env['CONTENT_LENGTH']
del env['CONTENT_LENGTH']
if cgi_path.endswith(handler_path):
application_root = cgi_path[:-len(handler_path)]
if application_root.endswith('/') and application_root != '/':
application_root = application_root[:-1]
else:
application_root = ''
try:
import pdb
MonkeyPatchPdb(pdb)
import _threading_local
MonkeyPatchThreadingLocal(_threading_local)
os.environ = request_environment.RequestLocalEnviron(
request_environment.current_request)
os.getenv = os.environ.get
response = runtime.HandleRequest(env, handler_path, url,
post_data, application_root, SDK_ROOT,
import_hook)
finally:
os.environ = save_environ
os.getenv = save_getenv
error = response.get('error')
if error:
status = 500
else:
status = 200
status = response.get('response_code', status)
sys.stdout.write('Status: %s\r\n' % status)
for key, value in response.get('headers', ()):
key = '-'.join(key.split())
value = value.replace('\r', ' ').replace('\n', ' ')
sys.stdout.write('%s: %s\r\n' % (key, value))
sys.stdout.write('\r\n')
body = response.get('body')
if body:
sys.stdout.write(body)
logs = response.get('logs')
if logs:
for timestamp_usec, severity, message in logs:
logging.log(severity*10 + 10, '@%s: %s',
time.ctime(timestamp_usec*1e-6), message)
return error
class LoggingStream(object):
"""A stream that writes logs at level error."""
def write(self, message):
logging.getLogger()._log(logging.ERROR, message, ())
def writelines(self, lines):
for line in lines:
logging.getLogger()._log(logging.ERROR, line, ())
def __getattr__(self, key):
return getattr(sys.__stderr__, key)
def ExecuteCGI(config,
root_path,
handler_path,
cgi_path,
env,
infile,
outfile,
module_dict,
exec_script=ExecuteOrImportScript,
exec_py27_handler=ExecutePy27Handler):
"""Executes Python file in this process as if it were a CGI.
Does not return an HTTP response line. CGIs should output headers followed by
the body content.
The modules in sys.modules should be the same before and after the CGI is
executed, with the specific exception of encodings-related modules, which
cannot be reloaded and thus must always stay in sys.modules.
Args:
config: AppInfoExternal instance representing the parsed app.yaml file.
root_path: Path to the root of the application.
handler_path: CGI path stored in the application configuration (as a path
like 'foo/bar/baz.py'). May contain $PYTHON_LIB references.
cgi_path: Absolute path to the CGI script file on disk.
env: Dictionary of environment variables to use for the execution.
infile: File-like object to read HTTP request input data from.
outfile: FIle-like object to write HTTP response data to.
module_dict: Dictionary in which application-loaded modules should be
preserved between requests. This removes the need to reload modules that
are reused between requests, significantly increasing load performance.
This dictionary must be separate from the sys.modules dictionary.
exec_script: Used for dependency injection.
exec_py27_handler: Used for dependency injection.
"""
old_module_dict = sys.modules.copy()
old_builtin = __builtin__.__dict__.copy()
old_argv = sys.argv
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
old_env = os.environ.copy()
old_cwd = os.getcwd()
old_file_type = types.FileType
reset_modules = False
app_log_handler = None
try:
ConnectAndDisconnectChildModules(sys.modules, module_dict)
ClearAllButEncodingsModules(sys.modules)
sys.modules.update(module_dict)
sys.argv = [cgi_path]
sys.stdin = cStringIO.StringIO(infile.getvalue())
sys.stdout = outfile
sys.stderr = LoggingStream()
logservice._global_buffer = logservice.LogsBuffer()
app_log_handler = app_logging.AppLogsHandler()
logging.getLogger().addHandler(app_log_handler)
os.environ.clear()
os.environ.update(env)
cgi_dir = os.path.normpath(os.path.dirname(cgi_path))
root_path = os.path.normpath(os.path.abspath(root_path))
if (cgi_dir.startswith(root_path + os.sep) and
not (config and config.runtime == 'python27')):
os.chdir(cgi_dir)
else:
os.chdir(root_path)
dist.fix_paths(root_path, SDK_ROOT)
hook = HardenedModulesHook(config, sys.modules, root_path)
sys.meta_path = [finder for finder in sys.meta_path
if not isinstance(finder, HardenedModulesHook)]
sys.meta_path.insert(0, hook)
if hasattr(sys, 'path_importer_cache'):
sys.path_importer_cache.clear()
__builtin__.file = FakeFile
__builtin__.open = FakeFile
types.FileType = FakeFile
if not (config and config.runtime == 'python27'):
__builtin__.buffer = NotImplementedFakeClass
sys.modules['__builtin__'] = __builtin__
logging.debug('Executing CGI with env:\n%s', repr(env))
try:
if handler_path and config and config.runtime == 'python27':
reset_modules = exec_py27_handler(config, handler_path, cgi_path, hook)
else:
reset_modules = exec_script(config, handler_path, cgi_path, hook)
except SystemExit, e:
logging.debug('CGI exited with status: %s', e)
except:
reset_modules = True
raise
finally:
sys.path_importer_cache.clear()
_ClearTemplateCache(sys.modules)
module_dict.update(sys.modules)
ConnectAndDisconnectChildModules(sys.modules, old_module_dict)
ClearAllButEncodingsModules(sys.modules)
sys.modules.update(old_module_dict)
__builtin__.__dict__.update(old_builtin)
sys.argv = old_argv
sys.stdin = old_stdin
sys.stdout = old_stdout
sys.stderr = old_stderr
logging.getLogger().removeHandler(app_log_handler)
os.environ.clear()
os.environ.update(old_env)
os.chdir(old_cwd)
types.FileType = old_file_type
class CGIDispatcher(URLDispatcher):
"""Dispatcher that executes Python CGI scripts."""
def __init__(self,
config,
module_dict,
root_path,
path_adjuster,
setup_env=SetupEnvironment,
exec_cgi=ExecuteCGI):
"""Initializer.
Args:
config: AppInfoExternal instance representing the parsed app.yaml file.
module_dict: Dictionary in which application-loaded modules should be
preserved between requests. This dictionary must be separate from the
sys.modules dictionary.
path_adjuster: Instance of PathAdjuster to use for finding absolute
paths of CGI files on disk.
setup_env, exec_cgi: Used for dependency injection.
"""
self._config = config
self._module_dict = module_dict
self._root_path = root_path
self._path_adjuster = path_adjuster
self._setup_env = setup_env
self._exec_cgi = exec_cgi
def Dispatch(self,
request,
outfile,
base_env_dict=None):
"""Dispatches the Python CGI."""
request_size = GetRequestSize(request, base_env_dict, outfile)
if request_size is None:
return
memory_file = cStringIO.StringIO()
CopyStreamPart(request.infile, memory_file, request_size)
memory_file.seek(0)
before_level = logging.root.level
try:
env = {}
if self._config.env_variables:
env.update(self._config.env_variables)
if base_env_dict:
env.update(base_env_dict)
cgi_path = self._path_adjuster.AdjustPath(request.path)
env.update(self._setup_env(cgi_path,
request.relative_url,
request.headers,
memory_file))
self._exec_cgi(self._config,
self._root_path,
request.path,
cgi_path,
env,
memory_file,
outfile,
self._module_dict)
finally:
logging.root.level = before_level
def __str__(self):
"""Returns a string representation of this dispatcher."""
return 'CGI dispatcher'
class LocalCGIDispatcher(CGIDispatcher):
"""Dispatcher that executes local functions like they're CGIs.
The contents of sys.modules will be preserved for local CGIs running this
dispatcher, but module hardening will still occur for any new imports. Thus,
be sure that any local CGIs have loaded all of their dependent modules
_before_ they are executed.
"""
def __init__(self, config, module_dict, path_adjuster, cgi_func):
"""Initializer.
Args:
config: AppInfoExternal instance representing the parsed app.yaml file.
module_dict: Passed to CGIDispatcher.
path_adjuster: Passed to CGIDispatcher.
cgi_func: Callable function taking no parameters that should be
executed in a CGI environment in the current process.
"""
self._cgi_func = cgi_func
def curried_exec_script(*args, **kwargs):
cgi_func()
return False
def curried_exec_cgi(*args, **kwargs):
kwargs['exec_script'] = curried_exec_script
return ExecuteCGI(*args, **kwargs)
CGIDispatcher.__init__(self,
config,
module_dict,
'',
path_adjuster,
exec_cgi=curried_exec_cgi)
def Dispatch(self, *args, **kwargs):
"""Preserves sys.modules for CGIDispatcher.Dispatch."""
self._module_dict.update(sys.modules)
CGIDispatcher.Dispatch(self, *args, **kwargs)
def __str__(self):
"""Returns a string representation of this dispatcher."""
return 'Local CGI dispatcher for %s' % self._cgi_func
class PathAdjuster(object):
"""Adjusts application file paths to paths relative to the application or
external library directories."""
def __init__(self, root_path):
"""Initializer.
Args:
root_path: Path to the root of the application running on the server.
"""
self._root_path = os.path.abspath(root_path)
def AdjustPath(self, path):
"""Adjusts application file paths to relative to the application.
More precisely this method adjusts application file path to paths
relative to the application or external library directories.
Handler paths that start with $PYTHON_LIB will be converted to paths
relative to the google directory.
Args:
path: File path that should be adjusted.
Returns:
The adjusted path.
"""
if path.startswith(PYTHON_LIB_VAR):
path = os.path.join(SDK_ROOT, path[len(PYTHON_LIB_VAR) + 1:])
else:
path = os.path.join(self._root_path, path)
return path
class StaticFileConfigMatcher(object):
"""Keeps track of file/directory specific application configuration.
Specifically:
- Computes mime type based on URLMap and file extension.
- Decides on cache expiration time based on URLMap and default expiration.
- Decides what HTTP headers to add to responses.
To determine the mime type, we first see if there is any mime-type property
on each URLMap entry. If non is specified, we use the mimetypes module to
guess the mime type from the file path extension, and use
application/octet-stream if we can't find the mimetype.
"""
def __init__(self,
url_map_list,
default_expiration):
"""Initializer.
Args:
url_map_list: List of appinfo.URLMap objects.
If empty or None, then we always use the mime type chosen by the
mimetypes module.
default_expiration: String describing default expiration time for browser
based caching of static files. If set to None this disallows any
browser caching of static content.
"""
if default_expiration is not None:
self._default_expiration = appinfo.ParseExpiration(default_expiration)
else:
self._default_expiration = None
self._patterns = []
for url_map in url_map_list or []:
handler_type = url_map.GetHandlerType()
if handler_type not in (appinfo.STATIC_FILES, appinfo.STATIC_DIR):
continue
path_re = _StaticFilePathRe(url_map)
try:
self._patterns.append((re.compile(path_re), url_map))
except re.error, e:
raise InvalidAppConfigError('regex %s does not compile: %s' %
(path_re, e))
_DUMMY_URLMAP = appinfo.URLMap()
def _FirstMatch(self, path):
"""Returns the first appinfo.URLMap that matches path, or a dummy instance.
A dummy instance is returned when no appinfo.URLMap matches path (see the
URLMap.static_file_path_re property). When a dummy instance is returned, it
is always the same one. The dummy instance is constructed simply by doing
the following:
appinfo.URLMap()
Args:
path: A string containing the file's path relative to the app.
Returns:
The first appinfo.URLMap (in the list that was passed to the constructor)
that matches path. Matching depends on whether URLMap is a static_dir
handler or a static_files handler. In either case, matching is done
according to the URLMap.static_file_path_re property.
"""
for path_re, url_map in self._patterns:
if path_re.match(path):
return url_map
return StaticFileConfigMatcher._DUMMY_URLMAP
def IsStaticFile(self, path):
"""Tests if the given path points to a "static" file.
Args:
path: A string containing the file's path relative to the app.
Returns:
Boolean, True if the file was configured to be static.
"""
return self._FirstMatch(path) is not self._DUMMY_URLMAP
def GetMimeType(self, path):
"""Returns the mime type that we should use when serving the specified file.
Args:
path: A string containing the file's path relative to the app.
Returns:
String containing the mime type to use. Will be 'application/octet-stream'
if we have no idea what it should be.
"""
url_map = self._FirstMatch(path)
if url_map.mime_type is not None:
return url_map.mime_type
unused_filename, extension = os.path.splitext(path)
return mimetypes.types_map.get(extension, 'application/octet-stream')
def GetExpiration(self, path):
"""Returns the cache expiration duration to be users for the given file.
Args:
path: A string containing the file's path relative to the app.
Returns:
Integer number of seconds to be used for browser cache expiration time.
"""
if self._default_expiration is None:
return 0
url_map = self._FirstMatch(path)
if url_map.expiration is None:
return self._default_expiration
return appinfo.ParseExpiration(url_map.expiration)
def GetHttpHeaders(self, path):
"""Returns http_headers of the matching appinfo.URLMap, or an empty one.
Args:
path: A string containing the file's path relative to the app.
Returns:
A user-specified HTTP headers to be used in static content response. These
headers are contained in an appinfo.HttpHeadersDict, which maps header
names to values (both strings).
"""
return self._FirstMatch(path).http_headers or appinfo.HttpHeadersDict()
def ReadDataFile(data_path, openfile=file):
"""Reads a file on disk, returning a corresponding HTTP status and data.
Args:
data_path: Path to the file on disk to read.
openfile: Used for dependency injection.
Returns:
Tuple (status, data) where status is an HTTP response code, and data is
the data read; will be an empty string if an error occurred or the
file was empty.
"""
status = httplib.INTERNAL_SERVER_ERROR
data = ""
try:
data_file = openfile(data_path, 'rb')
try:
data = data_file.read()
finally:
data_file.close()
status = httplib.OK
except (OSError, IOError), e:
logging.error('Error encountered reading file "%s":\n%s', data_path, e)
if e.errno in FILE_MISSING_EXCEPTIONS:
status = httplib.NOT_FOUND
else:
status = httplib.FORBIDDEN
return status, data
class FileDispatcher(URLDispatcher):
"""Dispatcher that reads data files from disk."""
def __init__(self,
config,
path_adjuster,
static_file_config_matcher,
read_data_file=ReadDataFile):
"""Initializer.
Args:
config: AppInfoExternal instance representing the parsed app.yaml file.
path_adjuster: Instance of PathAdjuster to use for finding absolute
paths of data files on disk.
static_file_config_matcher: StaticFileConfigMatcher object.
read_data_file: Used for dependency injection.
"""
self._config = config
self._path_adjuster = path_adjuster
self._static_file_config_matcher = static_file_config_matcher
self._read_data_file = read_data_file
def Dispatch(self, request, outfile, base_env_dict=None):
"""Reads the file and returns the response status and data."""
full_path = self._path_adjuster.AdjustPath(request.path)
status, data = self._read_data_file(full_path)
content_type = self._static_file_config_matcher.GetMimeType(request.path)
static_file = self._static_file_config_matcher.IsStaticFile(request.path)
expiration = self._static_file_config_matcher.GetExpiration(request.path)
current_etag = self.CreateEtag(data)
if_match_etag = request.headers.get('if-match', None)
if_none_match_etag = request.headers.get('if-none-match', '').split(',')
http_headers = self._static_file_config_matcher.GetHttpHeaders(request.path)
def WriteHeader(name, value):
if http_headers.Get(name) is None:
outfile.write('%s: %s\r\n' % (name, value))
if if_match_etag and not self._CheckETagMatches(if_match_etag.split(','),
current_etag,
False):
outfile.write('Status: %s\r\n' % httplib.PRECONDITION_FAILED)
WriteHeader('ETag', current_etag)
outfile.write('\r\n')
elif self._CheckETagMatches(if_none_match_etag, current_etag, True):
outfile.write('Status: %s\r\n' % httplib.NOT_MODIFIED)
WriteHeader('ETag', current_etag)
outfile.write('\r\n')
else:
outfile.write('Status: %d\r\n' % status)
WriteHeader('Content-Type', content_type)
if expiration:
fmt = email.Utils.formatdate
WriteHeader('Expires', fmt(time.time() + expiration, usegmt=True))
WriteHeader('Cache-Control', 'public, max-age=%i' % expiration)
if static_file:
WriteHeader('ETag', '"%s"' % current_etag)
for header in http_headers.iteritems():
outfile.write('%s: %s\r\n' % header)
outfile.write('\r\n')
outfile.write(data)
def __str__(self):
"""Returns a string representation of this dispatcher."""
return 'File dispatcher'
@staticmethod
def CreateEtag(data):
"""Returns string of hash of file content, unique per URL."""
data_crc = zlib.crc32(data)
return base64.b64encode(str(data_crc))
@staticmethod
def _CheckETagMatches(supplied_etags, current_etag, allow_weak_match):
"""Checks if there is an entity tag match.
Args:
supplied_etags: list of input etags
current_etag: the calculated etag for the entity
allow_weak_match: Allow for weak tag comparison.
Returns:
True if there is a match, False otherwise.
"""
for tag in supplied_etags:
if allow_weak_match and tag.startswith('W/'):
tag = tag[2:]
tag_data = tag.strip('"')
if tag_data == '*' or tag_data == current_etag:
return True
return False
_IGNORE_RESPONSE_HEADERS = frozenset([
'connection',
'content-encoding',
'date',
'keep-alive',
'proxy-authenticate',
'server',
'trailer',
'transfer-encoding',
'upgrade',
blobstore.BLOB_KEY_HEADER
])
class AppServerResponse(object):
"""Development appserver response object.
Object used to hold the full appserver response. Used as a container
that is passed through the request rewrite chain and ultimately sent
to the web client.
Attributes:
status_code: Integer HTTP response status (e.g., 200, 302, 404, 500)
status_message: String containing an informational message about the
response code, possibly derived from the 'status' header, if supplied.
headers: mimetools.Message containing the HTTP headers of the response.
body: File-like object containing the body of the response.
large_response: Indicates that response is permitted to be larger than
MAX_RUNTIME_RESPONSE_SIZE.
"""
__slots__ = ['status_code',
'status_message',
'headers',
'body',
'large_response']
def __init__(self, response_file=None, **kwds):
"""Initializer.
Args:
response_file: A file-like object that contains the full response
generated by the user application request handler. If present
the headers and body are set from this value, although the values
may be further overridden by the keyword parameters.
kwds: All keywords are mapped to attributes of AppServerResponse.
"""
self.status_code = 200
self.status_message = 'Good to go'
self.large_response = False
if response_file:
self.SetResponse(response_file)
else:
self.headers = mimetools.Message(cStringIO.StringIO())
self.body = None
for name, value in kwds.iteritems():
setattr(self, name, value)
def SetResponse(self, response_file):
"""Sets headers and body from the response file.
Args:
response_file: File like object to set body and headers from.
"""
self.headers = mimetools.Message(response_file)
self.body = response_file
@property
def header_data(self):
"""Get header data as a string.
Returns:
String representation of header with line breaks cleaned up.
"""
header_list = []
for header in self.headers.headers:
header = header.rstrip('\n\r')
header_list.append(header)
if not self.headers.getheader('Content-Type'):
header_list.append('Content-Type: text/html')
return '\r\n'.join(header_list) + '\r\n'
def IgnoreHeadersRewriter(response):
"""Ignore specific response headers.
Certain response headers cannot be modified by an Application. For a
complete list of these headers please see:
https://developers.google.com/appengine/docs/python/tools/webapp/responseclass#Disallowed_HTTP_Response_Headers
This rewriter simply removes those headers.
"""
for h in _IGNORE_RESPONSE_HEADERS:
if h in response.headers:
del response.headers[h]
def ValidHeadersRewriter(response):
"""Remove invalid response headers.
Response headers must be printable ascii characters. This is enforced in
production by http_proto.cc IsValidHeader.
This rewriter will remove headers that contain non ascii characters.
"""
for (key, value) in response.headers.items():
try:
key.decode('ascii')
value.decode('ascii')
except UnicodeDecodeError:
del response.headers[key]
def ParseStatusRewriter(response):
"""Parse status header, if it exists.
Handles the server-side 'status' header, which instructs the server to change
the HTTP response code accordingly. Handles the 'location' header, which
issues an HTTP 302 redirect to the client. Also corrects the 'content-length'
header to reflect actual content length in case extra information has been
appended to the response body.
If the 'status' header supplied by the client is invalid, this method will
set the response to a 500 with an error message as content.
"""
location_value = response.headers.getheader('location')
status_value = response.headers.getheader('status')
if status_value:
response_status = status_value
del response.headers['status']
elif location_value:
response_status = '%d Redirecting' % httplib.FOUND
else:
return response
status_parts = response_status.split(' ', 1)
response.status_code, response.status_message = (status_parts + [''])[:2]
try:
response.status_code = int(response.status_code)
except ValueError:
response.status_code = 500
response.body = cStringIO.StringIO(
'Error: Invalid "status" header value returned.')
def GetAllHeaders(message, name):
"""Get all headers of a given name in a message.
Args:
message: A mimetools.Message object.
name: The name of the header.
Yields:
A sequence of values of all headers with the given name.
"""
for header_line in message.getallmatchingheaders(name):
yield header_line.split(':', 1)[1].strip()
def CacheRewriter(response):
"""Update the cache header."""
if response.status_code == httplib.NOT_MODIFIED:
return
if not 'Cache-Control' in response.headers:
response.headers['Cache-Control'] = 'no-cache'
if not 'Expires' in response.headers:
response.headers['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT'
if 'Set-Cookie' in response.headers:
current_date = time.time()
expires = response.headers.get('Expires')
reset_expires = True
if expires:
expires_time = email.Utils.parsedate(expires)
if expires_time:
reset_expires = calendar.timegm(expires_time) >= current_date
if reset_expires:
response.headers['Expires'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(current_date))
cache_directives = []
for header in GetAllHeaders(response.headers, 'Cache-Control'):
cache_directives.extend(v.strip() for v in header.split(','))
cache_directives = [d for d in cache_directives if d != 'public']
if not NON_PUBLIC_CACHE_CONTROLS.intersection(cache_directives):
cache_directives.append('private')
response.headers['Cache-Control'] = ', '.join(cache_directives)
def _RemainingDataSize(input_buffer):
"""Computes how much data is remaining in the buffer.
It leaves the buffer in its initial state.
Args:
input_buffer: a file-like object with seek and tell methods.
Returns:
integer representing how much data is remaining in the buffer.
"""
current_position = input_buffer.tell()
input_buffer.seek(0, 2)
remaining_data_size = input_buffer.tell() - current_position
input_buffer.seek(current_position)
return remaining_data_size
def ContentLengthRewriter(response, request_headers, env_dict):
"""Rewrite the Content-Length header.
Even though Content-Length is not a user modifiable header, App Engine
sends a correct Content-Length to the user based on the actual response.
"""
if env_dict and env_dict.get('REQUEST_METHOD', '') == 'HEAD':
return
if response.status_code != httplib.NOT_MODIFIED:
response.headers['Content-Length'] = str(_RemainingDataSize(response.body))
elif 'Content-Length' in response.headers:
del response.headers['Content-Length']
def CreateResponseRewritersChain():
"""Create the default response rewriter chain.
A response rewriter is the a function that gets a final chance to change part
of the dev_appservers response. A rewriter is not like a dispatcher in that
it is called after every request has been handled by the dispatchers
regardless of which dispatcher was used.
The order in which rewriters are registered will be the order in which they
are used to rewrite the response. Modifications from earlier rewriters
are used as input to later rewriters.
A response rewriter is a function that can rewrite the request in any way.
Thefunction can returned modified values or the original values it was
passed.
A rewriter function has the following parameters and return values:
Args:
status_code: Status code of response from dev_appserver or previous
rewriter.
status_message: Text corresponding to status code.
headers: mimetools.Message instance with parsed headers. NOTE: These
headers can contain its own 'status' field, but the default
dev_appserver implementation will remove this. Future rewriters
should avoid re-introducing the status field and return new codes
instead.
body: File object containing the body of the response. This position of
this file may not be at the start of the file. Any content before the
files position is considered not to be part of the final body.
Returns:
An AppServerResponse instance.
Returns:
List of response rewriters.
"""
rewriters = [ParseStatusRewriter,
dev_appserver_blobstore.DownloadRewriter,
IgnoreHeadersRewriter,
ValidHeadersRewriter,
CacheRewriter,
ContentLengthRewriter,
]
return rewriters
def RewriteResponse(response_file,
response_rewriters=None,
request_headers=None,
env_dict=None):
"""Allows final rewrite of dev_appserver response.
This function receives the unparsed HTTP response from the application
or internal handler, parses out the basic structure and feeds that structure
in to a chain of response rewriters.
It also makes sure the final HTTP headers are properly terminated.
For more about response rewriters, please see documentation for
CreateResponeRewritersChain.
Args:
response_file: File-like object containing the full HTTP response including
the response code, all headers, and the request body.
response_rewriters: A list of response rewriters. If none is provided it
will create a new chain using CreateResponseRewritersChain.
request_headers: Original request headers.
env_dict: Environment dictionary.
Returns:
An AppServerResponse instance configured with the rewritten response.
"""
if response_rewriters is None:
response_rewriters = CreateResponseRewritersChain()
response = AppServerResponse(response_file)
for response_rewriter in response_rewriters:
if response_rewriter.func_code.co_argcount == 1:
response_rewriter(response)
elif response_rewriter.func_code.co_argcount == 2:
response_rewriter(response, request_headers)
else:
response_rewriter(response, request_headers, env_dict)
return response
class ModuleManager(object):
"""Manages loaded modules in the runtime.
Responsible for monitoring and reporting about file modification times.
Modules can be loaded from source or precompiled byte-code files. When a
file has source code, the ModuleManager monitors the modification time of
the source file even if the module itself is loaded from byte-code.
"""
def __init__(self, modules):
"""Initializer.
Args:
modules: Dictionary containing monitored modules.
"""
self._modules = modules
self._default_modules = self._modules.copy()
self._save_path_hooks = sys.path_hooks[:]
self._modification_times = {}
self._dirty = True
@staticmethod
def GetModuleFile(module, is_file=os.path.isfile):
"""Helper method to try to determine modules source file.
Args:
module: Module object to get file for.
is_file: Function used to determine if a given path is a file.
Returns:
Path of the module's corresponding Python source file if it exists, or
just the module's compiled Python file. If the module has an invalid
__file__ attribute, None will be returned.
"""
module_file = getattr(module, '__file__', None)
if module_file is None:
return None
source_file = module_file[:module_file.rfind('py') + 2]
if is_file(source_file):
return source_file
return module.__file__
def AreModuleFilesModified(self):
"""Determines if any monitored files have been modified.
Returns:
True if one or more files have been modified, False otherwise.
"""
self._dirty = True
for name, (mtime, fname) in self._modification_times.iteritems():
if name not in self._modules:
continue
module = self._modules[name]
try:
if mtime != os.path.getmtime(fname):
self._dirty = True
return True
except OSError, e:
if e.errno in FILE_MISSING_EXCEPTIONS:
self._dirty = True
return True
raise e
return False
def UpdateModuleFileModificationTimes(self):
"""Records the current modification times of all monitored modules."""
if not self._dirty:
return
self._modification_times.clear()
for name, module in self._modules.items():
if not isinstance(module, types.ModuleType):
continue
module_file = self.GetModuleFile(module)
if not module_file:
continue
try:
self._modification_times[name] = (os.path.getmtime(module_file),
module_file)
except OSError, e:
if e.errno not in FILE_MISSING_EXCEPTIONS:
raise e
self._dirty = False
def ResetModules(self):
"""Clear modules so that when request is run they are reloaded."""
lib_config._default_registry.reset()
self._modules.clear()
self._modules.update(self._default_modules)
sys.path_hooks[:] = self._save_path_hooks
sys.meta_path = []
apiproxy_stub_map.apiproxy.GetPreCallHooks().Clear()
apiproxy_stub_map.apiproxy.GetPostCallHooks().Clear()
def GetVersionObject(isfile=os.path.isfile, open_fn=open):
"""Gets the version of the SDK by parsing the VERSION file.
Args:
isfile: used for testing.
open_fn: Used for testing.
Returns:
A Yaml object or None if the VERSION file does not exist.
"""
version_filename = os.path.join(os.path.dirname(google.appengine.__file__),
VERSION_FILE)
if not isfile(version_filename):
logging.error('Could not find version file at %s', version_filename)
return None
version_fh = open_fn(version_filename, 'r')
try:
version = yaml.safe_load(version_fh)
finally:
version_fh.close()
return version
def _ClearTemplateCache(module_dict=sys.modules):
"""Clear template cache in webapp.template module.
Attempts to load template module. Ignores failure. If module loads, the
template cache is cleared.
Args:
module_dict: Used for dependency injection.
"""
template_module = module_dict.get('google.appengine.ext.webapp.template')
if template_module is not None:
template_module.template_cache.clear()
def CreateRequestHandler(root_path,
login_url,
static_caching=True,
default_partition=None,
interactive_console=True):
"""Creates a new BaseHTTPRequestHandler sub-class.
This class will be used with the Python BaseHTTPServer module's HTTP server.
Python's built-in HTTP server does not support passing context information
along to instances of its request handlers. This function gets around that
by creating a sub-class of the handler in a closure that has access to
this context information.
Args:
root_path: Path to the root of the application running on the server.
login_url: Relative URL which should be used for handling user logins.
static_caching: True if browser caching of static files should be allowed.
default_partition: Default partition to use in the application id.
interactive_console: Whether to add the interactive console.
Returns:
Sub-class of BaseHTTPRequestHandler.
"""
application_module_dict = SetupSharedModules(sys.modules)
application_config_cache = AppConfigCache()
class DevAppServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Dispatches URLs using patterns from a URLMatcher.
The URLMatcher is created by loading an application's configuration file.
Executes CGI scripts in the local process so the scripts can use mock
versions of APIs.
HTTP requests that correctly specify a user info cookie
(dev_appserver_login.COOKIE_NAME) will have the 'USER_EMAIL' environment
variable set accordingly. If the user is also an admin, the
'USER_IS_ADMIN' variable will exist and be set to '1'. If the user is not
logged in, 'USER_EMAIL' will be set to the empty string.
On each request, raises an InvalidAppConfigError exception if the
application configuration file in the directory specified by the root_path
argument is invalid.
"""
server_version = 'Development/1.0'
module_dict = application_module_dict
module_manager = ModuleManager(application_module_dict)
config_cache = application_config_cache
rewriter_chain = CreateResponseRewritersChain()
channel_poll_path_re = re.compile(
dev_appserver_channel.CHANNEL_POLL_PATTERN)
def __init__(self, *args, **kwargs):
"""Initializer.
Args:
args: Positional arguments passed to the superclass constructor.
kwargs: Keyword arguments passed to the superclass constructor.
"""
self._log_record_writer = apiproxy_stub_map.apiproxy.GetStub('logservice')
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def version_string(self):
"""Returns server's version string used for Server HTTP header."""
return self.server_version
def do_GET(self):
"""Handle GET requests."""
if self._HasNoBody('GET'):
self._HandleRequest()
def do_POST(self):
"""Handles POST requests."""
self._HandleRequest()
def do_PUT(self):
"""Handle PUT requests."""
self._HandleRequest()
def do_HEAD(self):
"""Handle HEAD requests."""
if self._HasNoBody('HEAD'):
self._HandleRequest()
def do_OPTIONS(self):
"""Handles OPTIONS requests."""
self._HandleRequest()
def do_DELETE(self):
"""Handle DELETE requests."""
self._HandleRequest()
def do_TRACE(self):
"""Handles TRACE requests."""
if self._HasNoBody('TRACE'):
self._HandleRequest()
def _HasNoBody(self, method):
"""Check for request body in HTTP methods where no body is permitted.
If a request body is present a 400 (Invalid request) response is sent.
Args:
method: The request method.
Returns:
True if no request body is present, False otherwise.
"""
content_length = int(self.headers.get('content-length', 0))
if content_length:
body = self.rfile.read(content_length)
logging.warning('Request body in %s is not permitted: %s', method, body)
self.send_response(httplib.BAD_REQUEST)
return False
return True
def _Dispatch(self, dispatcher, socket_infile, outfile, env_dict):
"""Copy request data from socket and dispatch.
Args:
dispatcher: Dispatcher to handle request (MatcherDispatcher).
socket_infile: Original request file stream.
outfile: Output file to write response to.
env_dict: Environment dictionary.
"""
request_descriptor, request_file_name = tempfile.mkstemp('.tmp',
'request.')
try:
request_file = open(request_file_name, 'wb')
try:
CopyStreamPart(self.rfile,
request_file,
int(self.headers.get('content-length', 0)))
finally:
request_file.close()
request_file = open(request_file_name, 'rb')
try:
app_server_request = AppServerRequest(self.path,
None,
self.headers,
request_file)
dispatcher.Dispatch(app_server_request,
outfile,
base_env_dict=env_dict)
finally:
request_file.close()
finally:
try:
os.close(request_descriptor)
try:
os.remove(request_file_name)
except OSError, err:
if getattr(err, 'winerror', 0) == os_compat.ERROR_SHARING_VIOLATION:
logging.warning('Failed removing %s', request_file_name)
else:
raise
except OSError, err:
if err.errno != errno.ENOENT:
raise
def _HandleRequest(self):
"""Handles any type of request and prints exceptions if they occur."""
host_name = self.headers.get('host') or self.server.server_name
server_name = host_name.split(':', 1)[0]
env_dict = {
'REQUEST_METHOD': self.command,
'REMOTE_ADDR': self.client_address[0],
'SERVER_SOFTWARE': self.server_version,
'SERVER_NAME': server_name,
'SERVER_PROTOCOL': self.protocol_version,
'SERVER_PORT': str(self.server.server_port),
}
full_url = GetFullURL(server_name, self.server.server_port, self.path)
if len(full_url) > MAX_URL_LENGTH:
msg = 'Requested URI too long: %s' % full_url
logging.error(msg)
self.send_response(httplib.REQUEST_URI_TOO_LONG, msg)
return
tbhandler = cgitb.Hook(file=self.wfile).handle
try:
config, explicit_matcher, from_cache = LoadAppConfig(
root_path, self.module_dict, cache=self.config_cache,
static_caching=static_caching, default_partition=default_partition)
if not from_cache:
self.module_manager.ResetModules()
implicit_matcher = CreateImplicitMatcher(config,
self.module_dict,
root_path,
login_url)
if self.path.startswith('/_ah/admin'):
if any((handler.url == '/_ah/datastore_admin.*'
for handler in config.handlers)):
self.headers['X-AppEngine-Datastore-Admin-Enabled'] = 'True'
self.headers['X-AppEngine-Interactive-Console-Enabled'] = str(
interactive_console)
if config.api_version != API_VERSION:
logging.error(
"API versions cannot be switched dynamically: %r != %r",
config.api_version, API_VERSION)
sys.exit(1)
(exclude, service_match) = ReservedPathFilter(
config.inbound_services).ExcludePath(self.path)
if exclude:
logging.warning(
'Request to %s excluded because %s is not enabled '
'in inbound_services in app.yaml' % (self.path, service_match))
self.send_response(httplib.NOT_FOUND)
return
if config.runtime == 'go':
from google.appengine.ext import go
go.APP_CONFIG = config
version = GetVersionObject()
env_dict['SDK_VERSION'] = version['release']
env_dict['CURRENT_VERSION_ID'] = config.version + ".1"
env_dict['APPLICATION_ID'] = config.application
env_dict['DEFAULT_VERSION_HOSTNAME'] = self.server.frontend_hostport
env_dict['APPENGINE_RUNTIME'] = config.runtime
if config.runtime == 'python27' and config.threadsafe:
env_dict['_AH_THREADSAFE'] = '1'
global _request_time
global _request_id
_request_time = time.time()
_request_id += 1
request_id_hash = _generate_request_id_hash()
env_dict['REQUEST_ID_HASH'] = request_id_hash
os.environ['REQUEST_ID_HASH'] = request_id_hash
multiprocess.GlobalProcess().UpdateEnv(env_dict)
cookies = ', '.join(self.headers.getheaders('cookie'))
email_addr, admin, user_id = dev_appserver_login.GetUserInfo(cookies)
self._log_record_writer.start_request(
request_id=None,
user_request_id=_GenerateRequestLogId(),
ip=env_dict['REMOTE_ADDR'],
app_id=env_dict['APPLICATION_ID'],
version_id=env_dict['CURRENT_VERSION_ID'],
nickname=email_addr.split('@')[0],
user_agent=self.headers.get('user-agent', ''),
host=host_name,
method=self.command,
resource=self.path,
http_version=self.request_version)
dispatcher = MatcherDispatcher(config, login_url, self.module_manager,
[implicit_matcher, explicit_matcher])
if multiprocess.GlobalProcess().HandleRequest(self):
return
outfile = cStringIO.StringIO()
try:
self._Dispatch(dispatcher, self.rfile, outfile, env_dict)
finally:
self.module_manager.UpdateModuleFileModificationTimes()
outfile.flush()
outfile.seek(0)
response = RewriteResponse(outfile, self.rewriter_chain, self.headers,
env_dict)
runtime_response_size = _RemainingDataSize(response.body)
if self.command == 'HEAD' and runtime_response_size > 0:
logging.warning('Dropping unexpected body in response to HEAD '
'request')
response.body = cStringIO.StringIO('')
elif (not response.large_response and
runtime_response_size > MAX_RUNTIME_RESPONSE_SIZE):
logging.error('Response too large: %d, max is %d',
runtime_response_size, MAX_RUNTIME_RESPONSE_SIZE)
response.status_code = 500
response.status_message = 'Forbidden'
new_response = ('HTTP response was too large: %d. '
'The limit is: %d.'
% (runtime_response_size,
MAX_RUNTIME_RESPONSE_SIZE))
response.headers['Content-Length'] = str(len(new_response))
response.body = cStringIO.StringIO(new_response)
multiprocess.GlobalProcess().RequestComplete(self, response)
except yaml_errors.EventListenerError, e:
title = 'Fatal error when loading application configuration'
msg = '%s:\n%s' % (title, str(e))
logging.error(msg)
self.send_response(httplib.INTERNAL_SERVER_ERROR, title)
self.wfile.write('Content-Type: text/html\r\n\r\n')
self.wfile.write('<pre>%s</pre>' % cgi.escape(msg))
except KeyboardInterrupt, e:
logging.info('Server interrupted by user, terminating')
self.server.stop_serving_forever()
except CompileError, e:
msg = 'Compile error:\n' + e.text + '\n'
logging.error(msg)
self.send_response(httplib.INTERNAL_SERVER_ERROR, 'Compile error')
self.wfile.write('Content-Type: text/plain; charset=utf-8\r\n\r\n')
self.wfile.write(msg)
except ExecuteError, e:
logging.error(e.text)
self.send_response(httplib.INTERNAL_SERVER_ERROR, 'Execute error')
self.wfile.write('Content-Type: text/html; charset=utf-8\r\n\r\n')
self.wfile.write('<title>App failure</title>\n')
self.wfile.write(e.text + '\n<pre>\n')
for l in e.log:
self.wfile.write(cgi.escape(l))
self.wfile.write('</pre>\n')
except:
msg = 'Exception encountered handling request'
logging.exception(msg)
self.send_response(httplib.INTERNAL_SERVER_ERROR, msg)
tbhandler()
else:
try:
self.send_response(response.status_code, response.status_message)
self.wfile.write(response.header_data)
self.wfile.write('\r\n')
shutil.copyfileobj(response.body, self.wfile, COPY_BLOCK_SIZE)
except (IOError, OSError), e:
if e.errno not in [errno.EPIPE, os_compat.WSAECONNABORTED]:
raise e
except socket.error, e:
if len(e.args) >= 1 and e.args[0] != errno.EPIPE:
raise e
def log_error(self, format, *args):
"""Redirect error messages through the logging module."""
logging.error(format, *args)
def log_message(self, format, *args):
"""Redirect log messages through the logging module."""
if hasattr(self, 'path') and self.channel_poll_path_re.match(self.path):
logging.debug(format, *args)
else:
logging.info(format, *args)
def log_request(self, code='-', size='-'):
"""Indicate that this request has completed."""
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
if code == '-':
code = 0
if size == '-':
size = 0
logservice.logs_buffer().flush()
self._log_record_writer.end_request(None, code, size)
return DevAppServerRequestHandler
def ReadAppConfig(appinfo_path, parse_app_config=appinfo_includes.Parse):
"""Reads app.yaml file and returns its app id and list of URLMap instances.
Args:
appinfo_path: String containing the path to the app.yaml file.
parse_app_config: Used for dependency injection.
Returns:
AppInfoExternal instance.
Raises:
If the config file could not be read or the config does not contain any
URLMap instances, this function will raise an InvalidAppConfigError
exception.
"""
try:
appinfo_file = file(appinfo_path, 'r')
except IOError, unused_e:
raise InvalidAppConfigError(
'Application configuration could not be read from "%s"' % appinfo_path)
try:
return parse_app_config(appinfo_file)
finally:
appinfo_file.close()
def _StaticFilePathRe(url_map):
"""Returns a regular expression string that matches static file paths.
Args:
url_map: A fully initialized static_files or static_dir appinfo.URLMap
instance.
Returns:
The regular expression matches paths, relative to the application's root
directory, of files that this static handler serves. re.compile should
accept the returned string.
Raises:
AssertionError: The url_map argument was not an URLMap for a static handler.
"""
handler_type = url_map.GetHandlerType()
if handler_type == 'static_files':
return url_map.upload + '$'
elif handler_type == 'static_dir':
path = url_map.static_dir.rstrip(os.path.sep)
return path + re.escape(os.path.sep) + r'(.*)'
assert False, 'This property only applies to static handlers.'
def CreateURLMatcherFromMaps(config,
root_path,
url_map_list,
module_dict,
default_expiration,
create_url_matcher=URLMatcher,
create_cgi_dispatcher=CGIDispatcher,
create_file_dispatcher=FileDispatcher,
create_path_adjuster=PathAdjuster,
normpath=os.path.normpath):
"""Creates a URLMatcher instance from URLMap.
Creates all of the correct URLDispatcher instances to handle the various
content types in the application configuration.
Args:
config: AppInfoExternal instance representing the parsed app.yaml file.
root_path: Path to the root of the application running on the server.
url_map_list: List of appinfo.URLMap objects to initialize this
matcher with. Can be an empty list if you would like to add patterns
manually or use config.handlers as a default.
module_dict: Dictionary in which application-loaded modules should be
preserved between requests. This dictionary must be separate from the
sys.modules dictionary.
default_expiration: String describing default expiration time for browser
based caching of static files. If set to None this disallows any
browser caching of static content.
create_url_matcher: Used for dependency injection.
create_cgi_dispatcher: Used for dependency injection.
create_file_dispatcher: Used for dependency injection.
create_path_adjuster: Used for dependency injection.
normpath: Used for dependency injection.
Returns:
Instance of URLMatcher with the supplied URLMap objects properly loaded.
Raises:
InvalidAppConfigError: if a handler is an unknown type.
"""
if config and config.handlers and not url_map_list:
url_map_list = config.handlers
url_matcher = create_url_matcher()
path_adjuster = create_path_adjuster(root_path)
cgi_dispatcher = create_cgi_dispatcher(config, module_dict,
root_path, path_adjuster)
static_file_config_matcher = StaticFileConfigMatcher(url_map_list,
default_expiration)
file_dispatcher = create_file_dispatcher(config, path_adjuster,
static_file_config_matcher)
FakeFile.SetStaticFileConfigMatcher(static_file_config_matcher)
for url_map in url_map_list:
admin_only = url_map.login == appinfo.LOGIN_ADMIN
requires_login = url_map.login == appinfo.LOGIN_REQUIRED or admin_only
auth_fail_action = url_map.auth_fail_action
handler_type = url_map.GetHandlerType()
if handler_type == appinfo.HANDLER_SCRIPT:
dispatcher = cgi_dispatcher
elif handler_type in (appinfo.STATIC_FILES, appinfo.STATIC_DIR):
dispatcher = file_dispatcher
else:
raise InvalidAppConfigError('Unknown handler type "%s"' % handler_type)
regex = url_map.url
path = url_map.GetHandler()
if handler_type == appinfo.STATIC_DIR:
if regex[-1] == r'/':
regex = regex[:-1]
if path[-1] == os.path.sep:
path = path[:-1]
regex = '/'.join((re.escape(regex), '(.*)'))
if os.path.sep == '\\':
backref = r'\\1'
else:
backref = r'\1'
path = (normpath(path).replace('\\', '\\\\') +
os.path.sep + backref)
url_matcher.AddURL(regex,
dispatcher,
path,
requires_login, admin_only, auth_fail_action)
return url_matcher
class AppConfigCache(object):
"""Cache used by LoadAppConfig.
If given to LoadAppConfig instances of this class are used to cache contents
of the app config (app.yaml or app.yml) and the Matcher created from it.
Code outside LoadAppConfig should treat instances of this class as opaque
objects and not access its members.
"""
path = None
mtime = None
config = None
matcher = None
def LoadAppConfig(root_path,
module_dict,
cache=None,
static_caching=True,
read_app_config=ReadAppConfig,
create_matcher=CreateURLMatcherFromMaps,
default_partition=None):
"""Creates a Matcher instance for an application configuration file.
Raises an InvalidAppConfigError exception if there is anything wrong with
the application configuration file.
Args:
root_path: Path to the root of the application to load.
module_dict: Dictionary in which application-loaded modules should be
preserved between requests. This dictionary must be separate from the
sys.modules dictionary.
cache: Instance of AppConfigCache or None.
static_caching: True if browser caching of static files should be allowed.
read_app_config: Used for dependency injection.
create_matcher: Used for dependency injection.
default_partition: Default partition to use for the appid.
Returns:
tuple: (AppInfoExternal, URLMatcher, from_cache)
Raises:
AppConfigNotFound: if an app.yaml file cannot be found.
"""
for appinfo_path in [os.path.join(root_path, 'app.yaml'),
os.path.join(root_path, 'app.yml')]:
if os.path.isfile(appinfo_path):
if cache is not None:
mtime = os.path.getmtime(appinfo_path)
if cache.path == appinfo_path and cache.mtime == mtime:
return (cache.config, cache.matcher, True)
cache.config = cache.matcher = cache.path = None
cache.mtime = mtime
config = read_app_config(appinfo_path, appinfo_includes.Parse)
if config.application:
config.application = AppIdWithDefaultPartition(config.application,
default_partition)
multiprocess.GlobalProcess().NewAppInfo(config)
if static_caching:
if config.default_expiration:
default_expiration = config.default_expiration
else:
default_expiration = '0'
else:
default_expiration = None
matcher = create_matcher(config,
root_path,
config.handlers,
module_dict,
default_expiration)
FakeFile.SetSkippedFiles(config.skip_files)
if cache is not None:
cache.path = appinfo_path
cache.config = config
cache.matcher = matcher
return config, matcher, False
raise AppConfigNotFoundError(
'Could not find app.yaml in "%s".' % (root_path,))
class ReservedPathFilter():
"""Checks a path against a set of inbound_services."""
reserved_paths = {
'/_ah/channel/connect': 'channel_presence',
'/_ah/channel/disconnect': 'channel_presence'
}
def __init__(self, inbound_services):
self.inbound_services = inbound_services
def ExcludePath(self, path):
"""Check to see if this is a service url and matches inbound_services."""
skip = False
for reserved_path in self.reserved_paths.keys():
if path.startswith(reserved_path):
if (not self.inbound_services or
self.reserved_paths[reserved_path] not in self.inbound_services):
return (True, self.reserved_paths[reserved_path])
return (False, None)
def CreateInboundServiceFilter(inbound_services):
return ReservedPathFilter(inbound_services)
def ReadCronConfig(croninfo_path, parse_cron_config=croninfo.LoadSingleCron):
"""Reads cron.yaml file and returns a list of CronEntry instances.
Args:
croninfo_path: String containing the path to the cron.yaml file.
parse_cron_config: Used for dependency injection.
Returns:
A CronInfoExternal object.
Raises:
If the config file is unreadable, empty or invalid, this function will
raise an InvalidAppConfigError or a MalformedCronConfiguration exception.
"""
try:
croninfo_file = file(croninfo_path, 'r')
except IOError, e:
raise InvalidAppConfigError(
'Cron configuration could not be read from "%s": %s'
% (croninfo_path, e))
try:
return parse_cron_config(croninfo_file)
finally:
croninfo_file.close()
def _RemoveFile(file_path):
if file_path and os.path.lexists(file_path):
logging.info('Attempting to remove file at %s', file_path)
try:
os.remove(file_path)
except OSError, e:
logging.warning('Removing file failed: %s', e)
def SetupStubs(app_id, **config):
"""Sets up testing stubs of APIs.
Args:
app_id: Application ID being served.
config: keyword arguments.
Keywords:
root_path: Root path to the directory of the application which should
contain the app.yaml, index.yaml, and queue.yaml files.
login_url: Relative URL which should be used for handling user login/logout.
blobstore_path: Path to the directory to store Blobstore blobs in.
datastore_path: Path to the file to store Datastore file stub data in.
prospective_search_path: Path to the file to store Prospective Search stub
data in.
use_sqlite: Use the SQLite stub for the datastore.
auto_id_policy: How datastore stub assigns IDs, sequential or scattered.
high_replication: Use the high replication consistency model
history_path: DEPRECATED, No-op.
clear_datastore: If the datastore should be cleared on startup.
smtp_host: SMTP host used for sending test mail.
smtp_port: SMTP port.
smtp_user: SMTP user.
smtp_password: SMTP password.
mysql_host: MySQL host.
mysql_port: MySQL port.
mysql_user: MySQL user.
mysql_password: MySQL password.
mysql_socket: MySQL socket.
appidentity_email_address: Email address for service account substitute.
appidentity_private_key_path: Path to private key for service account sub.
enable_sendmail: Whether to use sendmail as an alternative to SMTP.
show_mail_body: Whether to log the body of emails.
remove: Used for dependency injection.
disable_task_running: True if tasks should not automatically run after
they are enqueued.
task_retry_seconds: How long to wait after an auto-running task before it
is tried again.
logs_path: Path to the file to store the logs data in.
trusted: True if this app can access data belonging to other apps. This
behavior is different from the real app server and should be left False
except for advanced uses of dev_appserver.
port: The port that this dev_appserver is bound to. Defaults to 8080
address: The host that this dev_appsever is running on. Defaults to
localhost.
search_index_path: Path to the file to store search indexes in.
clear_search_index: If the search indices should be cleared on startup.
"""
root_path = config.get('root_path', None)
login_url = config['login_url']
blobstore_path = config['blobstore_path']
datastore_path = config['datastore_path']
clear_datastore = config['clear_datastore']
prospective_search_path = config.get('prospective_search_path', '')
clear_prospective_search = config.get('clear_prospective_search', False)
use_sqlite = config.get('use_sqlite', False)
auto_id_policy = config.get('auto_id_policy', datastore_stub_util.SEQUENTIAL)
high_replication = config.get('high_replication', False)
require_indexes = config.get('require_indexes', False)
mysql_host = config.get('mysql_host', None)
mysql_port = config.get('mysql_port', 3306)
mysql_user = config.get('mysql_user', None)
mysql_password = config.get('mysql_password', None)
mysql_socket = config.get('mysql_socket', None)
smtp_host = config.get('smtp_host', None)
smtp_port = config.get('smtp_port', 25)
smtp_user = config.get('smtp_user', '')
smtp_password = config.get('smtp_password', '')
enable_sendmail = config.get('enable_sendmail', False)
show_mail_body = config.get('show_mail_body', False)
appidentity_email_address = config.get('appidentity_email_address', None)
appidentity_private_key_path = config.get('appidentity_private_key_path', None)
remove = config.get('remove', os.remove)
disable_task_running = config.get('disable_task_running', False)
task_retry_seconds = config.get('task_retry_seconds', 30)
logs_path = config.get('logs_path', ':memory:')
trusted = config.get('trusted', False)
serve_port = config.get('port', 8080)
serve_address = config.get('address', 'localhost')
clear_search_index = config.get('clear_search_indexes', False)
search_index_path = config.get('search_indexes_path', None)
_use_atexit_for_datastore_stub = config.get('_use_atexit_for_datastore_stub',
False)
port_sqlite_data = config.get('port_sqlite_data', False)
os.environ['APPLICATION_ID'] = app_id
os.environ['REQUEST_ID_HASH'] = ''
if clear_prospective_search and prospective_search_path:
_RemoveFile(prospective_search_path)
if clear_datastore:
_RemoveFile(datastore_path)
if clear_search_index:
_RemoveFile(search_index_path)
if multiprocess.GlobalProcess().MaybeConfigureRemoteDataApis():
apiproxy_stub_map.apiproxy.RegisterStub(
'logservice',
logservice_stub.LogServiceStub(logs_path=':memory:'))
else:
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
apiproxy_stub_map.apiproxy.RegisterStub(
'app_identity_service',
app_identity_stub.AppIdentityServiceStub.Create(
email_address=appidentity_email_address,
private_key_path=appidentity_private_key_path))
apiproxy_stub_map.apiproxy.RegisterStub(
'capability_service',
capability_stub.CapabilityServiceStub())
if use_sqlite:
if port_sqlite_data:
try:
PortAllEntities(datastore_path)
except Error:
logging.Error("Porting the data from the datastore file stub failed")
raise
datastore = datastore_sqlite_stub.DatastoreSqliteStub(
app_id, datastore_path, require_indexes=require_indexes,
trusted=trusted, root_path=root_path,
use_atexit=_use_atexit_for_datastore_stub,
auto_id_policy=auto_id_policy)
else:
logging.warning(FILE_STUB_DEPRECATION_MESSAGE)
datastore = datastore_file_stub.DatastoreFileStub(
app_id, datastore_path, require_indexes=require_indexes,
trusted=trusted, root_path=root_path,
use_atexit=_use_atexit_for_datastore_stub,
auto_id_policy=auto_id_policy)
if high_replication:
datastore.SetConsistencyPolicy(
datastore_stub_util.TimeBasedHRConsistencyPolicy())
apiproxy_stub_map.apiproxy.ReplaceStub(
'datastore_v3', datastore)
apiproxy_stub_map.apiproxy.RegisterStub(
'datastore_v4',
datastore_v4_stub.DatastoreV4Stub(app_id))
apiproxy_stub_map.apiproxy.RegisterStub(
'mail',
mail_stub.MailServiceStub(smtp_host,
smtp_port,
smtp_user,
smtp_password,
enable_sendmail=enable_sendmail,
show_mail_body=show_mail_body,
allow_tls=False))
apiproxy_stub_map.apiproxy.RegisterStub(
'memcache',
memcache_stub.MemcacheServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'taskqueue',
taskqueue_stub.TaskQueueServiceStub(
root_path=root_path,
auto_task_running=(not disable_task_running),
task_retry_seconds=task_retry_seconds,
default_http_server='%s:%s' % (serve_address, serve_port)))
apiproxy_stub_map.apiproxy.RegisterStub(
'urlfetch',
urlfetch_stub.URLFetchServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'xmpp',
xmpp_service_stub.XmppServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'logservice',
logservice_stub.LogServiceStub(logs_path=logs_path))
from google.appengine import api
sys.modules['google.appengine.api.rdbms'] = rdbms_mysqldb
api.rdbms = rdbms_mysqldb
rdbms_mysqldb.SetConnectKwargs(host=mysql_host, port=mysql_port,
user=mysql_user, passwd=mysql_password,
unix_socket=mysql_socket)
fixed_login_url = '%s?%s=%%s' % (login_url,
dev_appserver_login.CONTINUE_PARAM)
fixed_logout_url = '%s&%s' % (fixed_login_url,
dev_appserver_login.LOGOUT_PARAM)
apiproxy_stub_map.apiproxy.RegisterStub(
'user',
user_service_stub.UserServiceStub(login_url=fixed_login_url,
logout_url=fixed_logout_url))
apiproxy_stub_map.apiproxy.RegisterStub(
'channel',
channel_service_stub.ChannelServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'matcher',
prospective_search_stub.ProspectiveSearchStub(
prospective_search_path,
apiproxy_stub_map.apiproxy.GetStub('taskqueue')))
apiproxy_stub_map.apiproxy.RegisterStub(
'remote_socket',
_remote_socket_stub.RemoteSocketServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'search',
simple_search_stub.SearchServiceStub(index_file=search_index_path))
try:
from google.appengine.api.images import images_stub
host_prefix = 'http://%s:%d' % (serve_address, serve_port)
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_stub.ImagesServiceStub(host_prefix=host_prefix))
except ImportError, e:
logging.warning('Could not initialize images API; you are likely missing '
'the Python "PIL" module. ImportError: %s', e)
from google.appengine.api.images import images_not_implemented_stub
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_not_implemented_stub.ImagesNotImplementedServiceStub())
blob_storage = file_blob_storage.FileBlobStorage(blobstore_path, app_id)
apiproxy_stub_map.apiproxy.RegisterStub(
'blobstore',
blobstore_stub.BlobstoreServiceStub(blob_storage))
apiproxy_stub_map.apiproxy.RegisterStub(
'file',
file_service_stub.FileServiceStub(blob_storage))
system_service_stub = system_stub.SystemServiceStub()
multiprocess.GlobalProcess().UpdateSystemStub(system_service_stub)
apiproxy_stub_map.apiproxy.RegisterStub('system', system_service_stub)
def TearDownStubs():
"""Clean up any stubs that need cleanup."""
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
if isinstance(datastore_stub, datastore_stub_util.BaseTransactionManager):
logging.info('Applying all pending transactions and saving the datastore')
datastore_stub.Write()
search_stub = apiproxy_stub_map.apiproxy.GetStub('search')
if isinstance(search_stub, simple_search_stub.SearchServiceStub):
logging.info('Saving search indexes')
search_stub.Write()
def CreateImplicitMatcher(
config,
module_dict,
root_path,
login_url,
create_path_adjuster=PathAdjuster,
create_local_dispatcher=LocalCGIDispatcher,
create_cgi_dispatcher=CGIDispatcher,
get_blob_storage=dev_appserver_blobstore.GetBlobStorage):
"""Creates a URLMatcher instance that handles internal URLs.
Used to facilitate handling user login/logout, debugging, info about the
currently running app, quitting the dev appserver, etc.
Args:
config: AppInfoExternal instance representing the parsed app.yaml file.
module_dict: Dictionary in the form used by sys.modules.
root_path: Path to the root of the application.
login_url: Relative URL which should be used for handling user login/logout.
create_path_adjuster: Used for dependedency injection.
create_local_dispatcher: Used for dependency injection.
create_cgi_dispatcher: Used for dependedency injection.
get_blob_storage: Used for dependency injection.
Returns:
Instance of URLMatcher with appropriate dispatchers.
"""
url_matcher = URLMatcher()
path_adjuster = create_path_adjuster(root_path)
def _HandleQuit():
raise KeyboardInterrupt
quit_dispatcher = create_local_dispatcher(config, sys.modules, path_adjuster,
_HandleQuit)
url_matcher.AddURL('/_ah/quit?',
quit_dispatcher,
'',
False,
False,
appinfo.AUTH_FAIL_ACTION_REDIRECT)
login_dispatcher = create_local_dispatcher(config, sys.modules, path_adjuster,
dev_appserver_login.main)
url_matcher.AddURL(login_url,
login_dispatcher,
'',
False,
False,
appinfo.AUTH_FAIL_ACTION_REDIRECT)
admin_dispatcher = create_cgi_dispatcher(config, module_dict, root_path,
path_adjuster)
url_matcher.AddURL('/_ah/admin(?:/.*)?',
admin_dispatcher,
DEVEL_CONSOLE_PATH,
False,
False,
appinfo.AUTH_FAIL_ACTION_REDIRECT)
upload_dispatcher = dev_appserver_blobstore.CreateUploadDispatcher(
get_blob_storage)
url_matcher.AddURL(dev_appserver_blobstore.UPLOAD_URL_PATTERN,
upload_dispatcher,
'',
False,
False,
appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED)
blobimage_dispatcher = dev_appserver_blobimage.CreateBlobImageDispatcher(
apiproxy_stub_map.apiproxy.GetStub('images'))
url_matcher.AddURL(dev_appserver_blobimage.BLOBIMAGE_URL_PATTERN,
blobimage_dispatcher,
'',
False,
False,
appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED)
oauth_dispatcher = dev_appserver_oauth.CreateOAuthDispatcher()
url_matcher.AddURL(dev_appserver_oauth.OAUTH_URL_PATTERN,
oauth_dispatcher,
'',
False,
False,
appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED)
channel_dispatcher = dev_appserver_channel.CreateChannelDispatcher(
apiproxy_stub_map.apiproxy.GetStub('channel'))
url_matcher.AddURL(dev_appserver_channel.CHANNEL_POLL_PATTERN,
channel_dispatcher,
'',
False,
False,
appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED)
url_matcher.AddURL(dev_appserver_channel.CHANNEL_JSAPI_PATTERN,
channel_dispatcher,
'',
False,
False,
appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED)
apiserver_dispatcher = dev_appserver_apiserver.CreateApiserverDispatcher()
url_matcher.AddURL(dev_appserver_apiserver.API_SERVING_PATTERN,
apiserver_dispatcher,
'',
False,
False,
appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED)
return url_matcher
def FetchAllEntitites():
"""Returns all datastore entities from all namespaces as a list."""
ns = list(datastore.Query('__namespace__').Run())
original_ns = namespace_manager.get_namespace()
entities_set = []
for namespace in ns:
namespace_manager.set_namespace(namespace.key().name())
kinds_list = list(datastore.Query('__kind__').Run())
for kind_entity in kinds_list:
ents = list(datastore.Query(kind_entity.key().name()).Run())
for ent in ents:
entities_set.append(ent)
namespace_manager.set_namespace(original_ns)
return entities_set
def PutAllEntities(entities):
"""Puts all entities to the current datastore."""
for entity in entities:
datastore.Put(entity)
def PortAllEntities(datastore_path):
"""Copies entities from a DatastoreFileStub to an SQLite stub.
Args:
datastore_path: Path to the file to store Datastore file stub data is.
"""
previous_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
try:
app_id = os.environ['APPLICATION_ID']
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
datastore_stub = datastore_file_stub.DatastoreFileStub(
app_id, datastore_path, trusted=True)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)
entities = FetchAllEntitites()
sqlite_datastore_stub = datastore_sqlite_stub.DatastoreSqliteStub(app_id,
datastore_path + '.sqlite', trusted=True)
apiproxy_stub_map.apiproxy.ReplaceStub('datastore_v3',
sqlite_datastore_stub)
PutAllEntities(entities)
sqlite_datastore_stub.Close()
finally:
apiproxy_stub_map.apiproxy.ReplaceStub('datastore_v3', previous_stub)
shutil.copy(datastore_path, datastore_path + '.filestub')
_RemoveFile(datastore_path)
shutil.move(datastore_path + '.sqlite', datastore_path)
def CreateServer(root_path,
login_url,
port,
template_dir=None,
serve_address='',
allow_skipped_files=False,
static_caching=True,
python_path_list=sys.path,
sdk_dir=SDK_ROOT,
default_partition=None,
frontend_port=None,
interactive_console=True):
"""Creates a new HTTPServer for an application.
The sdk_dir argument must be specified for the directory storing all code for
the SDK so as to allow for the sandboxing of module access to work for any
and all SDK code. While typically this is where the 'google' package lives,
it can be in another location because of API version support.
Args:
root_path: String containing the path to the root directory of the
application where the app.yaml file is.
login_url: Relative URL which should be used for handling user login/logout.
port: Port to start the application server on.
template_dir: Unused.
serve_address: Address on which the server should serve.
allow_skipped_files: True if skipped files should be accessible.
static_caching: True if browser caching of static files should be allowed.
python_path_list: Used for dependency injection.
sdk_dir: Directory where the SDK is stored.
default_partition: Default partition to use for the appid.
frontend_port: A frontend port (so backends can return an address for a
frontend). If None, port will be used.
interactive_console: Whether to add the interactive console.
Returns:
Instance of BaseHTTPServer.HTTPServer that's ready to start accepting.
"""
absolute_root_path = os.path.realpath(root_path)
FakeFile.SetAllowedPaths(absolute_root_path,
[sdk_dir])
FakeFile.SetAllowSkippedFiles(allow_skipped_files)
handler_class = CreateRequestHandler(absolute_root_path,
login_url,
static_caching,
default_partition,
interactive_console)
if absolute_root_path not in python_path_list:
python_path_list.insert(0, absolute_root_path)
if multiprocess.Enabled():
server = HttpServerWithMultiProcess((serve_address, port), handler_class)
else:
server = HTTPServerWithScheduler((serve_address, port), handler_class)
queue_stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')
if queue_stub and hasattr(queue_stub, 'StartBackgroundExecution'):
queue_stub.StartBackgroundExecution()
request_info._local_dispatcher = DevAppserverDispatcher(server,
frontend_port or port)
server.frontend_hostport = '%s:%d' % (serve_address or 'localhost',
frontend_port or port)
return server
class HTTPServerWithScheduler(BaseHTTPServer.HTTPServer):
"""A BaseHTTPServer subclass that calls a method at a regular interval."""
def __init__(self, server_address, request_handler_class):
"""Constructor.
Args:
server_address: the bind address of the server.
request_handler_class: class used to handle requests.
"""
BaseHTTPServer.HTTPServer.__init__(self, server_address,
request_handler_class)
self._events = []
self._stopped = False
def handle_request(self):
"""Override the base handle_request call.
Python 2.6 changed the semantics of handle_request() with r61289.
This patches it back to the Python 2.5 version, which has
helpfully been renamed to _handle_request_noblock.
"""
if hasattr(self, "_handle_request_noblock"):
self._handle_request_noblock()
else:
BaseHTTPServer.HTTPServer.handle_request(self)
def get_request(self, time_func=time.time, select_func=select.select):
"""Overrides the base get_request call.
Args:
time_func: used for testing.
select_func: used for testing.
Returns:
a (socket_object, address info) tuple.
"""
while True:
if self._events:
current_time = time_func()
next_eta = self._events[0][0]
delay = next_eta - current_time
else:
delay = DEFAULT_SELECT_DELAY
readable, _, _ = select_func([self.socket], [], [], max(delay, 0))
if readable:
return self.socket.accept()
current_time = time_func()
if self._events and current_time >= self._events[0][0]:
runnable = heapq.heappop(self._events)[1]
request_tuple = runnable()
if request_tuple:
return request_tuple
def serve_forever(self):
"""Handle one request at a time until told to stop."""
while not self._stopped:
self.handle_request()
self.server_close()
def stop_serving_forever(self):
"""Stop the serve_forever() loop.
Stop happens on the next handle_request() loop; it will not stop
immediately. Since dev_appserver.py must run on py2.5 we can't
use newer features of SocketServer (e.g. shutdown(), added in py2.6).
"""
self._stopped = True
def AddEvent(self, eta, runnable, service=None, event_id=None):
"""Add a runnable event to be run at the specified time.
Args:
eta: when to run the event, in seconds since epoch.
runnable: a callable object.
service: the service that owns this event. Should be set if id is set.
event_id: optional id of the event. Used for UpdateEvent below.
"""
heapq.heappush(self._events, (eta, runnable, service, event_id))
def UpdateEvent(self, service, event_id, eta):
"""Update a runnable event in the heap with a new eta.
TODO: come up with something better than a linear scan to
update items. For the case this is used for now -- updating events to
"time out" channels -- this works fine because those events are always
soon (within seconds) and thus found quickly towards the front of the heap.
One could easily imagine a scenario where this is always called for events
that tend to be at the back of the heap, of course...
Args:
service: the service that owns this event.
event_id: the id of the event.
eta: the new eta of the event.
"""
for id in xrange(len(self._events)):
item = self._events[id]
if item[2] == service and item[3] == event_id:
item = (eta, item[1], item[2], item[3])
del(self._events[id])
heapq.heappush(self._events, item)
break
class HttpServerWithMultiProcess(HTTPServerWithScheduler):
"""Class extending HTTPServerWithScheduler with multi-process handling."""
def __init__(self, server_address, request_handler_class):
"""Constructor.
Args:
server_address: the bind address of the server.
request_handler_class: class used to handle requests.
"""
HTTPServerWithScheduler.__init__(self, server_address,
request_handler_class)
multiprocess.GlobalProcess().SetHttpServer(self)
def process_request(self, request, client_address):
"""Overrides the SocketServer process_request call."""
multiprocess.GlobalProcess().ProcessRequest(request, client_address)
class FakeRequestSocket(object):
"""A socket object to fake an HTTP request."""
def __init__(self, method, relative_url, headers, body):
payload = cStringIO.StringIO()
payload.write('%s %s HTTP/1.1\r\n' % (method, relative_url))
payload.write('Content-Length: %d\r\n' % len(body))
for key, value in headers:
payload.write('%s: %s\r\n' % (key, value))
payload.write('\r\n')
payload.write(body)
self.rfile = cStringIO.StringIO(payload.getvalue())
self.wfile = StringIO.StringIO()
self.wfile_close = self.wfile.close
self.wfile.close = self.connection_done
def connection_done(self):
self.wfile_close()
def makefile(self, mode, buffsize):
if mode.startswith('w'):
return self.wfile
else:
return self.rfile
def close(self):
pass
def shutdown(self, how):
pass
class DevAppserverDispatcher(request_info._LocalFakeDispatcher):
"""A dev_appserver Dispatcher implementation."""
def __init__(self, server, port):
self._server = server
self._port = port
def add_event(self, runnable, eta, service=None, event_id=None):
"""Add a callable to be run at the specified time.
Args:
runnable: A callable object to call at the specified time.
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
This should be set if event_id is set.
event_id: A str containing the id of the event. If set, this can be passed
to update_event to change the time at which the event should run.
"""
self._server.AddEvent(eta, runnable, service, event_id)
def update_event(self, eta, service, event_id):
"""Update the eta of a scheduled event.
Args:
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
event_id: A str containing the id of the event to update.
"""
self._server.UpdateEvent(service, event_id, eta)
def add_async_request(self, method, relative_url, headers, body, source_ip,
server_name=None, version=None, instance_id=None):
"""Dispatch an HTTP request asynchronously.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
server_name: An optional str containing the server name to service this
request. If unset, the request will be dispatched to the default
server.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched to the default version.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched to
according to the load-balancing for the server and version.
"""
fake_socket = FakeRequestSocket(method, relative_url, headers, body)
self._server.AddEvent(0, lambda: (fake_socket, (source_ip, self._port)))
def add_request(self, method, relative_url, headers, body, source_ip,
server_name=None, version=None, instance_id=None):
"""Process an HTTP request.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
server_name: An optional str containing the server name to service this
request. If unset, the request will be dispatched to the default
server.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched to the default version.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched to
according to the load-balancing for the server and version.
Returns:
A request_info.ResponseTuple containing the response information for the
HTTP request.
"""
try:
header_dict = wsgiref.headers.Headers(headers)
connection_host = header_dict.get('host')
connection = httplib.HTTPConnection(connection_host)
connection.putrequest(
method, relative_url,
skip_host='host' in header_dict,
skip_accept_encoding='accept-encoding' in header_dict)
for header_key, header_value in headers:
connection.putheader(header_key, header_value)
connection.endheaders()
connection.send(body)
response = connection.getresponse()
response.read()
response.close()
return request_info.ResponseTuple(
'%d %s' % (response.status, response.reason), [], '')
except (httplib.HTTPException, socket.error):
logging.exception(
'An error occured while sending a %s request to "%s%s"',
method, connection_host, relative_url)
return request_info.ResponseTuple('0', [], '')
|
levibostian/myBlanky
|
googleAppEngine/google/appengine/tools/old_dev_appserver.py
|
Python
|
mit
| 133,803 | 0.007593 |
PORT = {}
PORT['metaManager'] = 10087
CGI_SOCK = {}
CGI_SOCK[ 'metaManager' ] = '/tmp/metaManager_fcgi_sock'
SOCK = {}
SOCK[ 'logqueue' ] = '/tmp/logqueue_sock'
PIDPATH = {}
PIDPATH[ 'metaManager' ] = '/var/run/metaManager.server.pid'
PIDPATH[ 'managerChewer' ] = '/var/run/data.chewer.pid'
|
hackshel/metaCollecter
|
src/metaManager/modules/defines.py
|
Python
|
bsd-3-clause
| 312 | 0.032051 |
#!/usr/bin/env python3
"""
bootstrap.py will set up a virtualenv for you and update it as required.
Usage:
bootstrap.py # update virtualenv
bootstrap.py fake # just update the virtualenv timestamps
bootstrap.py clean # delete the virtualenv
bootstrap.py -h | --help # print this message and exit
Options for the plain command:
-f, --force # do the virtualenv update even if it is up to date
-r, --full-rebuild # delete the virtualenv before rebuilding
-q, --quiet # don't ask for user input
"""
# a script to set up the virtualenv so we can use fabric and tasks
import sys
import getopt
import ve_mgr
def print_help_text():
print(__doc__)
def print_error_msg(error_msg):
print(error_msg)
print_help_text()
return 2
def main(argv):
# check python version is high enough
ve_mgr.check_python_version(2, 6, __file__)
force_update = False
full_rebuild = False
fake_update = False
clean_ve = False
devel = False
if argv:
try:
opts, args = getopt.getopt(argv[1:], 'hfqr',
['help', 'force', 'quiet', 'full-rebuild', 'dev'])
except getopt.error as msg:
return print_error_msg('Bad options: %s' % msg)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print_help_text()
return 0
if o in ("-f", "--force"):
force_update = True
if o in ("-r", "--full-rebuild"):
full_rebuild = True
if o in ("-d", "--dev"):
devel = True
if len(args) > 1:
return print_error_msg(
"Can only have one argument - you had %s" % (' '.join(args)))
if len(args) == 1:
if args[0] == 'fake':
fake_update = True
elif args[0] == 'clean':
clean_ve = True
# check for incompatible flags
if force_update and fake_update:
return print_error_msg("Cannot use --force with fake")
if full_rebuild and fake_update:
return print_error_msg("Cannot use --full-rebuild with fake")
if full_rebuild and clean_ve:
return print_error_msg("Cannot use --full-rebuild with clean")
environment = 'dev' if devel is True else None
updater = ve_mgr.UpdateVE(environment=environment)
if fake_update:
return updater.update_ve_timestamp()
elif clean_ve:
return updater.delete_virtualenv()
else:
updater.update_git_submodule()
return updater.update_ve(full_rebuild, force_update)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
oriel-hub/api
|
deploy/bootstrap.py
|
Python
|
gpl-2.0
| 2,753 | 0.00109 |
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from collections import defaultdict
import os
import re
import uuid
from oslo_log import log as logging
import sqlalchemy
from sqlalchemy import exc
from sqlalchemy import interfaces
from sqlalchemy.sql.expression import text
from trove.common import cfg
from trove.common import configurations
from trove.common import exception
from trove.common.exception import PollTimeOut
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common import utils as utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.common import sql_query
from trove.guestagent.datastore import service
from trove.guestagent.db import models
from trove.guestagent import pkg
ADMIN_USER_NAME = "os_admin"
LOG = logging.getLogger(__name__)
FLUSH = text(sql_query.FLUSH)
ENGINE = None
DATADIR = None
PREPARING = False
UUID = False
TMP_MYCNF = "/tmp/my.cnf.tmp"
CONF = cfg.CONF
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mysql'
INCLUDE_MARKER_OPERATORS = {
True: ">=",
False: ">"
}
OS_NAME = operating_system.get_os()
MYSQL_CONFIG = {operating_system.REDHAT: "/etc/my.cnf",
operating_system.DEBIAN: "/etc/mysql/my.cnf",
operating_system.SUSE: "/etc/my.cnf"}[OS_NAME]
MYSQL_SERVICE_CANDIDATES = ["mysql", "mysqld", "mysql-server"]
MYSQL_BIN_CANDIDATES = ["/usr/sbin/mysqld", "/usr/libexec/mysqld"]
MYCNF_OVERRIDES = "/etc/mysql/conf.d/overrides.cnf"
MYCNF_OVERRIDES_TMP = "/tmp/overrides.cnf.tmp"
MYCNF_REPLMASTER = "/etc/mysql/conf.d/0replmaster.cnf"
MYCNF_REPLSLAVE = "/etc/mysql/conf.d/1replslave.cnf"
MYCNF_REPLCONFIG_TMP = "/tmp/replication.cnf.tmp"
# Create a package impl
packager = pkg.Package()
def clear_expired_password():
"""
Some mysql installations generate random root password
and save it in /root/.mysql_secret, this password is
expired and should be changed by client that supports expired passwords.
"""
LOG.debug("Removing expired password.")
secret_file = "/root/.mysql_secret"
try:
out, err = utils.execute("cat", secret_file,
run_as_root=True, root_helper="sudo")
except exception.ProcessExecutionError:
LOG.exception(_("/root/.mysql_secret does not exist."))
return
m = re.match('# The random password set for the root user at .*: (.*)',
out)
if m:
try:
out, err = utils.execute("mysqladmin", "-p%s" % m.group(1),
"password", "", run_as_root=True,
root_helper="sudo")
except exception.ProcessExecutionError:
LOG.exception(_("Cannot change mysql password."))
return
operating_system.remove(secret_file, force=True, as_root=True)
LOG.debug("Expired password removed.")
def get_auth_password():
pwd, err = utils.execute_with_timeout(
"sudo",
"awk",
"/password\\t=/{print $3; exit}",
MYSQL_CONFIG)
if err:
LOG.error(err)
raise RuntimeError("Problem reading my.cnf! : %s" % err)
return pwd.strip()
def get_engine():
"""Create the default engine with the updated admin user."""
# TODO(rnirmal):Based on permissions issues being resolved we may revert
# url = URL(drivername='mysql', host='localhost',
# query={'read_default_file': '/etc/mysql/my.cnf'})
global ENGINE
if ENGINE:
return ENGINE
pwd = get_auth_password()
ENGINE = sqlalchemy.create_engine("mysql://%s:%s@localhost:3306" %
(ADMIN_USER_NAME, pwd.strip()),
pool_recycle=7200,
echo=CONF.sql_query_logging,
listeners=[KeepAliveConnection()])
return ENGINE
def load_mysqld_options():
# find mysqld bin
for bin in MYSQL_BIN_CANDIDATES:
if os.path.isfile(bin):
mysqld_bin = bin
break
else:
return {}
try:
out, err = utils.execute(mysqld_bin, "--print-defaults",
run_as_root=True, root_helper="sudo")
arglist = re.split("\n", out)[1].split()
args = defaultdict(list)
for item in arglist:
if "=" in item:
key, value = item.split("=", 1)
args[key.lstrip("--")].append(value)
else:
args[item.lstrip("--")].append(None)
return args
except exception.ProcessExecutionError:
return {}
def read_mycnf():
with open(MYSQL_CONFIG, 'r') as file:
config_contents = file.read()
return config_contents
def get_datadir(reset_cache=False):
"""Return the data directory currently used by Mysql."""
global DATADIR
if not reset_cache and DATADIR:
return DATADIR
mycnf_contents = read_mycnf()
# look for datadir parameter in my.cnf
mycnf = dict(configurations.MySQLConfParser(mycnf_contents).parse())
DATADIR = mycnf['datadir']
return DATADIR
class MySqlAppStatus(service.BaseDbStatus):
@classmethod
def get(cls):
if not cls._instance:
cls._instance = MySqlAppStatus()
return cls._instance
def _get_actual_db_status(self):
try:
out, err = utils.execute_with_timeout(
"/usr/bin/mysqladmin",
"ping", run_as_root=True, root_helper="sudo",
log_output_on_error=True)
LOG.info(_("MySQL Service Status is RUNNING."))
return rd_instance.ServiceStatuses.RUNNING
except exception.ProcessExecutionError:
LOG.exception(_("Failed to get database status."))
try:
out, err = utils.execute_with_timeout("/bin/ps", "-C",
"mysqld", "h")
pid = out.split()[0]
# TODO(rnirmal): Need to create new statuses for instances
# where the mysql service is up, but unresponsive
LOG.info(_('MySQL Service Status %(pid)s is BLOCKED.') %
{'pid': pid})
return rd_instance.ServiceStatuses.BLOCKED
except exception.ProcessExecutionError:
LOG.exception(_("Process execution failed."))
mysql_args = load_mysqld_options()
pid_file = mysql_args.get('pid_file',
['/var/run/mysqld/mysqld.pid'])[0]
if os.path.exists(pid_file):
LOG.info(_("MySQL Service Status is CRASHED."))
return rd_instance.ServiceStatuses.CRASHED
else:
LOG.info(_("MySQL Service Status is SHUTDOWN."))
return rd_instance.ServiceStatuses.SHUTDOWN
class LocalSqlClient(object):
"""A sqlalchemy wrapper to manage transactions."""
def __init__(self, engine, use_flush=True):
self.engine = engine
self.use_flush = use_flush
def __enter__(self):
self.conn = self.engine.connect()
self.trans = self.conn.begin()
return self.conn
def __exit__(self, type, value, traceback):
if self.trans:
if type is not None: # An error occurred
self.trans.rollback()
else:
if self.use_flush:
self.conn.execute(FLUSH)
self.trans.commit()
self.conn.close()
def execute(self, t, **kwargs):
try:
return self.conn.execute(t, kwargs)
except Exception:
self.trans.rollback()
self.trans = None
raise
class MySqlAdmin(object):
"""Handles administrative tasks on the MySQL database."""
def _associate_dbs(self, user):
"""Internal. Given a MySQLUser, populate its databases attribute."""
LOG.debug("Associating dbs to user %s at %s." %
(user.name, user.host))
with LocalSqlClient(get_engine()) as client:
q = sql_query.Query()
q.columns = ["grantee", "table_schema"]
q.tables = ["information_schema.SCHEMA_PRIVILEGES"]
q.group = ["grantee", "table_schema"]
q.where = ["privilege_type != 'USAGE'"]
t = text(str(q))
db_result = client.execute(t)
for db in db_result:
LOG.debug("\t db: %s." % db)
if db['grantee'] == "'%s'@'%s'" % (user.name, user.host):
mysql_db = models.MySQLDatabase()
mysql_db.name = db['table_schema']
user.databases.append(mysql_db.serialize())
def change_passwords(self, users):
"""Change the passwords of one or more existing users."""
LOG.debug("Changing the password of some users.")
with LocalSqlClient(get_engine()) as client:
for item in users:
LOG.debug("Changing password for user %s." % item)
user_dict = {'_name': item['name'],
'_host': item['host'],
'_password': item['password']}
user = models.MySQLUser()
user.deserialize(user_dict)
LOG.debug("\tDeserialized: %s." % user.__dict__)
uu = sql_query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
def update_attributes(self, username, hostname, user_attrs):
"""Change the attributes of an existing user."""
LOG.debug("Changing user attributes for user %s." % username)
user = self._get_user(username, hostname)
db_access = set()
grantee = set()
with LocalSqlClient(get_engine()) as client:
q = sql_query.Query()
q.columns = ["grantee", "table_schema"]
q.tables = ["information_schema.SCHEMA_PRIVILEGES"]
q.group = ["grantee", "table_schema"]
q.where = ["privilege_type != 'USAGE'"]
t = text(str(q))
db_result = client.execute(t)
for db in db_result:
grantee.add(db['grantee'])
if db['grantee'] == "'%s'@'%s'" % (user.name, user.host):
db_name = db['table_schema']
db_access.add(db_name)
with LocalSqlClient(get_engine()) as client:
uu = sql_query.UpdateUser(user.name, host=user.host,
clear=user_attrs.get('password'),
new_user=user_attrs.get('name'),
new_host=user_attrs.get('host'))
t = text(str(uu))
client.execute(t)
uname = user_attrs.get('name') or username
host = user_attrs.get('host') or hostname
find_user = "'%s'@'%s'" % (uname, host)
if find_user not in grantee:
self.grant_access(uname, host, db_access)
def create_database(self, databases):
"""Create the list of specified databases."""
with LocalSqlClient(get_engine()) as client:
for item in databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(item)
cd = sql_query.CreateDatabase(mydb.name,
mydb.character_set,
mydb.collate)
t = text(str(cd))
client.execute(t)
def create_user(self, users):
"""Create users and grant them privileges for the
specified databases.
"""
with LocalSqlClient(get_engine()) as client:
for item in users:
user = models.MySQLUser()
user.deserialize(item)
# TODO(cp16net):Should users be allowed to create users
# 'os_admin' or 'debian-sys-maint'
g = sql_query.Grant(user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
for database in user.databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
g = sql_query.Grant(permissions='ALL', database=mydb.name,
user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
def delete_database(self, database):
"""Delete the specified database."""
with LocalSqlClient(get_engine()) as client:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
dd = sql_query.DropDatabase(mydb.name)
t = text(str(dd))
client.execute(t)
def delete_user(self, user):
"""Delete the specified user."""
mysql_user = models.MySQLUser()
mysql_user.deserialize(user)
self.delete_user_by_name(mysql_user.name, mysql_user.host)
def delete_user_by_name(self, name, host='%'):
with LocalSqlClient(get_engine()) as client:
du = sql_query.DropUser(name, host=host)
t = text(str(du))
LOG.debug("delete_user_by_name: %s", t)
client.execute(t)
def get_user(self, username, hostname):
user = self._get_user(username, hostname)
if not user:
return None
return user.serialize()
def _get_user(self, username, hostname):
"""Return a single user matching the criteria."""
user = models.MySQLUser()
try:
user.name = username # Could possibly throw a BadRequest here.
except ValueError as ve:
LOG.exception(_("Error Getting user information"))
raise exception.BadRequest(_("Username %(user)s is not valid"
": %(reason)s") %
{'user': username, 'reason': ve.message}
)
with LocalSqlClient(get_engine()) as client:
q = sql_query.Query()
q.columns = ['User', 'Host', 'Password']
q.tables = ['mysql.user']
q.where = ["Host != 'localhost'",
"User = '%s'" % username,
"Host = '%s'" % hostname]
q.order = ['User', 'Host']
t = text(str(q))
result = client.execute(t).fetchall()
LOG.debug("Getting user information %s." % result)
if len(result) != 1:
return None
found_user = result[0]
user.password = found_user['Password']
user.host = found_user['Host']
self._associate_dbs(user)
return user
def grant_access(self, username, hostname, databases):
"""Grant a user permission to use a given database."""
user = self._get_user(username, hostname)
mydb = models.ValidatedMySQLDatabase()
with LocalSqlClient(get_engine()) as client:
for database in databases:
try:
mydb.name = database
except ValueError:
LOG.exception(_("Error granting access"))
raise exception.BadRequest(_(
"Grant access to %s is not allowed") % database)
g = sql_query.Grant(permissions='ALL', database=mydb.name,
user=user.name, host=user.host,
hashed=user.password)
t = text(str(g))
client.execute(t)
def is_root_enabled(self):
"""Return True if root access is enabled; False otherwise."""
return MySqlRootAccess.is_root_enabled()
def enable_root(self, root_password=None):
"""Enable the root user global access and/or
reset the root password.
"""
return MySqlRootAccess.enable_root(root_password)
def list_databases(self, limit=None, marker=None, include_marker=False):
"""List databases the user created on this mysql instance."""
LOG.debug("---Listing Databases---")
ignored_database_names = "'%s'" % "', '".join(CONF.ignore_dbs)
LOG.debug("The following database names are on ignore list and will "
"be omitted from the listing: %s" % ignored_database_names)
databases = []
with LocalSqlClient(get_engine()) as client:
q = sql_query.Query()
q.columns = [
'schema_name as name',
'default_character_set_name as charset',
'default_collation_name as collation',
]
q.tables = ['information_schema.schemata']
q.where = ["schema_name NOT IN (" + ignored_database_names + ")"]
q.order = ['schema_name ASC']
if limit:
q.limit = limit + 1
if marker:
q.where.append("schema_name %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
t = text(str(q))
database_names = client.execute(t)
next_marker = None
LOG.debug("database_names = %r." % database_names)
for count, database in enumerate(database_names):
if count >= limit:
break
LOG.debug("database = %s." % str(database))
mysql_db = models.MySQLDatabase()
mysql_db.name = database[0]
next_marker = mysql_db.name
mysql_db.character_set = database[1]
mysql_db.collate = database[2]
databases.append(mysql_db.serialize())
LOG.debug("databases = " + str(databases))
if database_names.rowcount <= limit:
next_marker = None
return databases, next_marker
def list_users(self, limit=None, marker=None, include_marker=False):
"""List users that have access to the database."""
'''
SELECT
User,
Host,
Marker
FROM
(SELECT
User,
Host,
CONCAT(User, '@', Host) as Marker
FROM mysql.user
ORDER BY 1, 2) as innerquery
WHERE
Marker > :marker
ORDER BY
Marker
LIMIT :limit;
'''
LOG.debug("---Listing Users---")
users = []
with LocalSqlClient(get_engine()) as client:
mysql_user = models.MySQLUser()
iq = sql_query.Query() # Inner query.
iq.columns = ['User', 'Host', "CONCAT(User, '@', Host) as Marker"]
iq.tables = ['mysql.user']
iq.order = ['User', 'Host']
innerquery = str(iq).rstrip(';')
oq = sql_query.Query() # Outer query.
oq.columns = ['User', 'Host', 'Marker']
oq.tables = ['(%s) as innerquery' % innerquery]
oq.where = ["Host != 'localhost'"]
oq.order = ['Marker']
if marker:
oq.where.append("Marker %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
if limit:
oq.limit = limit + 1
t = text(str(oq))
result = client.execute(t)
next_marker = None
LOG.debug("result = " + str(result))
for count, row in enumerate(result):
if count >= limit:
break
LOG.debug("user = " + str(row))
mysql_user = models.MySQLUser()
mysql_user.name = row['User']
mysql_user.host = row['Host']
self._associate_dbs(mysql_user)
next_marker = row['Marker']
users.append(mysql_user.serialize())
if result.rowcount <= limit:
next_marker = None
LOG.debug("users = " + str(users))
return users, next_marker
def revoke_access(self, username, hostname, database):
"""Revoke a user's permission to use a given database."""
user = self._get_user(username, hostname)
with LocalSqlClient(get_engine()) as client:
r = sql_query.Revoke(database=database,
user=user.name,
host=user.host)
t = text(str(r))
client.execute(t)
def list_access(self, username, hostname):
"""Show all the databases to which the user has more than
USAGE granted.
"""
user = self._get_user(username, hostname)
return user.databases
class KeepAliveConnection(interfaces.PoolListener):
"""
A connection pool listener that ensures live connections are returned
from the connection pool at checkout. This alleviates the problem of
MySQL connections timing out.
"""
def checkout(self, dbapi_con, con_record, con_proxy):
"""Event triggered when a connection is checked out from the pool."""
try:
try:
dbapi_con.ping(False)
except TypeError:
dbapi_con.ping()
except dbapi_con.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
raise exc.DisconnectionError()
else:
raise
class MySqlApp(object):
"""Prepares DBaaS on a Guest container."""
TIME_OUT = 1000
def __init__(self, status):
"""By default login with root no password for initial setup."""
self.state_change_wait_time = CONF.state_change_wait_time
self.status = status
def _create_admin_user(self, client, password):
"""
Create a os_admin user with a random password
with all privileges similar to the root user.
"""
localhost = "localhost"
g = sql_query.Grant(permissions='ALL', user=ADMIN_USER_NAME,
host=localhost, grant_option=True, clear=password)
t = text(str(g))
client.execute(t)
@staticmethod
def _generate_root_password(client):
"""Generate and set a random root password and forget about it."""
localhost = "localhost"
uu = sql_query.UpdateUser("root", host=localhost,
clear=utils.generate_random_password())
t = text(str(uu))
client.execute(t)
def install_if_needed(self, packages):
"""Prepare the guest machine with a secure
mysql server installation.
"""
LOG.info(_("Preparing Guest as MySQL Server."))
if not packager.pkg_is_installed(packages):
LOG.debug("Installing MySQL server.")
self._clear_mysql_config()
# set blank password on pkg configuration stage
pkg_opts = {'root_password': '',
'root_password_again': ''}
packager.pkg_install(packages, pkg_opts, self.TIME_OUT)
self._create_mysql_confd_dir()
LOG.info(_("Finished installing MySQL server."))
self.start_mysql()
def complete_install_or_restart(self):
self.status.end_install_or_restart()
def secure(self, config_contents, overrides):
LOG.info(_("Generating admin password."))
admin_password = utils.generate_random_password()
clear_expired_password()
engine = sqlalchemy.create_engine("mysql://root:@localhost:3306",
echo=True)
with LocalSqlClient(engine) as client:
self._remove_anonymous_user(client)
self._create_admin_user(client, admin_password)
self.stop_db()
self._write_mycnf(admin_password, config_contents, overrides)
self.start_mysql()
LOG.debug("MySQL secure complete.")
def secure_root(self, secure_remote_root=True):
with LocalSqlClient(get_engine()) as client:
LOG.info(_("Preserving root access from restore."))
self._generate_root_password(client)
if secure_remote_root:
self._remove_remote_root_access(client)
def _clear_mysql_config(self):
"""Clear old configs, which can be incompatible with new version."""
LOG.debug("Clearing old MySQL config.")
random_uuid = str(uuid.uuid4())
configs = ["/etc/my.cnf", "/etc/mysql/conf.d", "/etc/mysql/my.cnf"]
for config in configs:
try:
old_conf_backup = "%s_%s" % (config, random_uuid)
operating_system.move(config, old_conf_backup, as_root=True)
LOG.debug("%s saved to %s_%s." %
(config, config, random_uuid))
except exception.ProcessExecutionError:
pass
def _create_mysql_confd_dir(self):
conf_dir = "/etc/mysql/conf.d"
LOG.debug("Creating %s." % conf_dir)
operating_system.create_directory(conf_dir, as_root=True)
def _enable_mysql_on_boot(self):
LOG.debug("Enabling MySQL on boot.")
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_enable'], shell=True)
except KeyError:
LOG.exception(_("Error enabling MySQL start on boot."))
raise RuntimeError("Service is not discovered.")
def _disable_mysql_on_boot(self):
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_disable'],
shell=True)
except KeyError:
LOG.exception(_("Error disabling MySQL start on boot."))
raise RuntimeError("Service is not discovered.")
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
LOG.info(_("Stopping MySQL."))
if do_not_start_on_reboot:
self._disable_mysql_on_boot()
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_stop'], shell=True)
except KeyError:
LOG.exception(_("Error stopping MySQL."))
raise RuntimeError("Service is not discovered.")
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop MySQL."))
self.status.end_install_or_restart()
raise RuntimeError("Could not stop MySQL!")
def _remove_anonymous_user(self, client):
t = text(sql_query.REMOVE_ANON)
client.execute(t)
def _remove_remote_root_access(self, client):
t = text(sql_query.REMOVE_ROOT)
client.execute(t)
def restart(self):
try:
self.status.begin_restart()
self.stop_db()
self.start_mysql()
finally:
self.status.end_install_or_restart()
def update_overrides(self, override_values):
"""
This function will update the MySQL overrides.cnf file
if there is content to write.
:param override_values:
:return:
"""
if override_values:
LOG.debug("Writing new overrides.cnf config file.")
self._write_config_overrides(override_values)
def apply_overrides(self, overrides):
LOG.debug("Applying overrides to MySQL.")
with LocalSqlClient(get_engine()) as client:
LOG.debug("Updating override values in running MySQL.")
for k, v in overrides.iteritems():
q = sql_query.SetServerVariable(key=k, value=v)
t = text(str(q))
try:
client.execute(t)
except exc.OperationalError:
output = {'key': k, 'value': v}
LOG.exception(_("Unable to set %(key)s with value "
"%(value)s.") % output)
def make_read_only(self, read_only):
with LocalSqlClient(get_engine()) as client:
q = "set global read_only = %s" % read_only
client.execute(text(str(q)))
def _write_temp_mycnf_with_admin_account(self, original_file_path,
temp_file_path, password):
mycnf_file = open(original_file_path, 'r')
tmp_file = open(temp_file_path, 'w')
for line in mycnf_file:
tmp_file.write(line)
if "[client]" in line:
tmp_file.write("user\t\t= %s\n" % ADMIN_USER_NAME)
tmp_file.write("password\t= %s\n" % password)
mycnf_file.close()
tmp_file.close()
def wipe_ib_logfiles(self):
"""Destroys the iblogfiles.
If for some reason the selected log size in the conf changes from the
current size of the files MySQL will fail to start, so we delete the
files to be safe.
"""
LOG.info(_("Wiping ib_logfiles."))
for index in range(2):
try:
# On restarts, sometimes these are wiped. So it can be a race
# to have MySQL start up before it's restarted and these have
# to be deleted. That's why its ok if they aren't found and
# that is why we use the "force" option to "remove".
operating_system.remove("%s/ib_logfile%d"
% (get_datadir(), index), force=True,
as_root=True)
except exception.ProcessExecutionError:
LOG.exception("Could not delete logfile.")
raise
def _write_mycnf(self, admin_password, config_contents, overrides=None):
"""
Install the set of mysql my.cnf templates.
Update the os_admin user and password to the my.cnf
file for direct login from localhost.
"""
LOG.info(_("Writing my.cnf templates."))
if admin_password is None:
admin_password = get_auth_password()
try:
with open(TMP_MYCNF, 'w') as t:
t.write(config_contents)
operating_system.move(TMP_MYCNF, MYSQL_CONFIG, as_root=True)
self._write_temp_mycnf_with_admin_account(MYSQL_CONFIG,
TMP_MYCNF,
admin_password)
operating_system.move(TMP_MYCNF, MYSQL_CONFIG, as_root=True)
except Exception:
os.unlink(TMP_MYCNF)
raise
self.wipe_ib_logfiles()
# write configuration file overrides
if overrides:
self._write_config_overrides(overrides)
def _write_config_overrides(self, overrideValues):
LOG.info(_("Writing new temp overrides.cnf file."))
with open(MYCNF_OVERRIDES_TMP, 'w') as overrides:
overrides.write(overrideValues)
LOG.info(_("Moving overrides.cnf into correct location."))
operating_system.move(MYCNF_OVERRIDES_TMP, MYCNF_OVERRIDES,
as_root=True)
LOG.info(_("Setting permissions on overrides.cnf."))
operating_system.chmod(MYCNF_OVERRIDES, FileMode.SET_GRP_RW_OTH_R,
as_root=True)
def remove_overrides(self):
LOG.info(_("Removing overrides configuration file."))
if os.path.exists(MYCNF_OVERRIDES):
operating_system.remove(MYCNF_OVERRIDES, as_root=True)
def _write_replication_overrides(self, overrideValues, cnf_file):
LOG.info(_("Writing replication.cnf file."))
with open(MYCNF_REPLCONFIG_TMP, 'w') as overrides:
overrides.write(overrideValues)
LOG.debug("Moving temp replication.cnf into correct location.")
operating_system.move(MYCNF_REPLCONFIG_TMP, cnf_file, as_root=True)
LOG.debug("Setting permissions on replication.cnf.")
operating_system.chmod(cnf_file, FileMode.SET_GRP_RW_OTH_R,
as_root=True)
def _remove_replication_overrides(self, cnf_file):
LOG.info(_("Removing replication configuration file."))
if os.path.exists(cnf_file):
operating_system.remove(cnf_file, as_root=True)
def exists_replication_source_overrides(self):
return os.path.exists(MYCNF_REPLMASTER)
def write_replication_source_overrides(self, overrideValues):
self._write_replication_overrides(overrideValues, MYCNF_REPLMASTER)
def write_replication_replica_overrides(self, overrideValues):
self._write_replication_overrides(overrideValues, MYCNF_REPLSLAVE)
def remove_replication_source_overrides(self):
self._remove_replication_overrides(MYCNF_REPLMASTER)
def remove_replication_replica_overrides(self):
self._remove_replication_overrides(MYCNF_REPLSLAVE)
def grant_replication_privilege(self, replication_user):
LOG.info(_("Granting Replication Slave privilege."))
LOG.debug("grant_replication_privilege: %s" % replication_user)
with LocalSqlClient(get_engine()) as client:
g = sql_query.Grant(permissions=['REPLICATION SLAVE'],
user=replication_user['name'],
clear=replication_user['password'])
t = text(str(g))
client.execute(t)
def get_port(self):
with LocalSqlClient(get_engine()) as client:
result = client.execute('SELECT @@port').first()
return result[0]
def get_binlog_position(self):
with LocalSqlClient(get_engine()) as client:
result = client.execute('SHOW MASTER STATUS').first()
binlog_position = {
'log_file': result['File'],
'position': result['Position']
}
return binlog_position
def execute_on_client(self, sql_statement):
LOG.debug("Executing SQL: %s" % sql_statement)
with LocalSqlClient(get_engine()) as client:
return client.execute(sql_statement)
def start_slave(self):
LOG.info(_("Starting slave replication."))
with LocalSqlClient(get_engine()) as client:
client.execute('START SLAVE')
self._wait_for_slave_status("ON", client, 60)
def stop_slave(self, for_failover):
replication_user = None
LOG.info(_("Stopping slave replication."))
with LocalSqlClient(get_engine()) as client:
result = client.execute('SHOW SLAVE STATUS')
replication_user = result.first()['Master_User']
client.execute('STOP SLAVE')
client.execute('RESET SLAVE ALL')
self._wait_for_slave_status("OFF", client, 30)
if not for_failover:
client.execute('DROP USER ' + replication_user)
return {
'replication_user': replication_user
}
def stop_master(self):
LOG.info(_("Stopping replication master."))
with LocalSqlClient(get_engine()) as client:
client.execute('RESET MASTER')
def _wait_for_slave_status(self, status, client, max_time):
def verify_slave_status():
actual_status = client.execute(
"SHOW GLOBAL STATUS like 'slave_running'").first()[1]
return actual_status.upper() == status.upper()
LOG.debug("Waiting for SLAVE_RUNNING to change to %s.", status)
try:
utils.poll_until(verify_slave_status, sleep_time=3,
time_out=max_time)
LOG.info(_("Replication is now %s.") % status.lower())
except PollTimeOut:
raise RuntimeError(
_("Replication is not %(status)s after %(max)d seconds.") % {
'status': status.lower(), 'max': max_time})
def start_mysql(self, update_db=False):
LOG.info(_("Starting MySQL."))
# This is the site of all the trouble in the restart tests.
# Essentially what happens is that mysql start fails, but does not
# die. It is then impossible to kill the original, so
self._enable_mysql_on_boot()
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_start'], shell=True)
except KeyError:
raise RuntimeError("Service is not discovered.")
except exception.ProcessExecutionError:
# it seems mysql (percona, at least) might come back with [Fail]
# but actually come up ok. we're looking into the timing issue on
# parallel, but for now, we'd like to give it one more chance to
# come up. so regardless of the execute_with_timeout() response,
# we'll assume mysql comes up and check it's status for a while.
pass
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
LOG.error(_("Start up of MySQL failed."))
# If it won't start, but won't die either, kill it by hand so we
# don't let a rouge process wander around.
try:
utils.execute_with_timeout("sudo", "pkill", "-9", "mysql")
except exception.ProcessExecutionError:
LOG.exception(_("Error killing stalled MySQL start command."))
# There's nothing more we can do...
self.status.end_install_or_restart()
raise RuntimeError("Could not start MySQL!")
def start_db_with_conf_changes(self, config_contents):
LOG.info(_("Starting MySQL with conf changes."))
LOG.debug("Inside the guest - Status is_running = (%s)."
% self.status.is_running)
if self.status.is_running:
LOG.error(_("Cannot execute start_db_with_conf_changes because "
"MySQL state == %s.") % self.status)
raise RuntimeError("MySQL not stopped.")
LOG.info(_("Resetting configuration."))
self._write_mycnf(None, config_contents)
self.start_mysql(True)
def reset_configuration(self, configuration):
config_contents = configuration['config_contents']
LOG.info(_("Resetting configuration."))
self._write_mycnf(None, config_contents)
# DEPRECATED: Mantain for API Compatibility
def get_txn_count(self):
LOG.info(_("Retrieving latest txn id."))
txn_count = 0
with LocalSqlClient(get_engine()) as client:
result = client.execute('SELECT @@global.gtid_executed').first()
for uuid_set in result[0].split(','):
for interval in uuid_set.split(':')[1:]:
if '-' in interval:
iparts = interval.split('-')
txn_count += int(iparts[1]) - int(iparts[0])
else:
txn_count += 1
return txn_count
def _get_slave_status(self):
with LocalSqlClient(get_engine()) as client:
return client.execute('SHOW SLAVE STATUS').first()
def _get_master_UUID(self):
slave_status = self._get_slave_status()
return slave_status and slave_status['Master_UUID'] or None
def _get_gtid_executed(self):
with LocalSqlClient(get_engine()) as client:
return client.execute('SELECT @@global.gtid_executed').first()[0]
def get_last_txn(self):
master_UUID = self._get_master_UUID()
last_txn_id = '0'
gtid_executed = self._get_gtid_executed()
for gtid_set in gtid_executed.split(','):
uuid_set = gtid_set.split(':')
if uuid_set[0] == master_UUID:
last_txn_id = uuid_set[-1].split('-')[-1]
break
return master_UUID, int(last_txn_id)
def get_latest_txn_id(self):
LOG.info(_("Retrieving latest txn id."))
return self._get_gtid_executed()
def wait_for_txn(self, txn):
LOG.info(_("Waiting on txn '%s'.") % txn)
with LocalSqlClient(get_engine()) as client:
client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')"
% txn)
class MySqlRootAccess(object):
@classmethod
def is_root_enabled(cls):
"""Return True if root access is enabled; False otherwise."""
with LocalSqlClient(get_engine()) as client:
t = text(sql_query.ROOT_ENABLED)
result = client.execute(t)
LOG.debug("Found %s with remote root access." % result.rowcount)
return result.rowcount != 0
@classmethod
def enable_root(cls, root_password=None):
"""Enable the root user global access and/or
reset the root password.
"""
user = models.RootUser()
user.name = "root"
user.host = "%"
user.password = root_password or utils.generate_random_password()
with LocalSqlClient(get_engine()) as client:
print(client)
try:
cu = sql_query.CreateUser(user.name, host=user.host)
t = text(str(cu))
client.execute(t, **cu.keyArgs)
except exc.OperationalError as err:
# Ignore, user is already created, just reset the password
# TODO(rnirmal): More fine grained error checking later on
LOG.debug(err)
with LocalSqlClient(get_engine()) as client:
print(client)
uu = sql_query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
LOG.debug("CONF.root_grant: %s CONF.root_grant_option: %s." %
(CONF.root_grant, CONF.root_grant_option))
g = sql_query.Grant(permissions=CONF.root_grant,
user=user.name,
host=user.host,
grant_option=CONF.root_grant_option,
clear=user.password)
t = text(str(g))
client.execute(t)
return user.serialize()
|
cp16net/trove
|
trove/guestagent/datastore/mysql/service.py
|
Python
|
apache-2.0
| 43,814 | 0.000023 |
# The MIT License (MIT)
#
# Copyright (c) 2016 WUSTL ZPLAB
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors: Erik Hvatum <ice.rikh@gmail.com>
from PyQt5 import Qt
from ..shared_resources import UNIQUE_QGRAPHICSITEM_TYPE
class PointItem(Qt.QGraphicsRectItem):
# Omitting .type() or failing to return a unique causes PyQt to return a wrapper of the wrong type when retrieving an instance of this item as a base
# class pointer from C++. For example, if this item has a child and that child calls self.parentItem(), it would receive a Python object of type
# Qt.QGraphicsRectItem rather than PointItem unless PointItem has a correct .type() implementation.
QGRAPHICSITEM_TYPE = UNIQUE_QGRAPHICSITEM_TYPE()
def __init__(self, picker, x, y, w, h, parent_item):
super().__init__(x, y, w, h, parent_item)
self.picker = picker
flags = self.flags()
self.setFlags(
flags |
Qt.QGraphicsItem.ItemIsFocusable | # Necessary in order for item to receive keyboard events
Qt.QGraphicsItem.ItemIsSelectable |
Qt.QGraphicsItem.ItemIsMovable |
Qt.QGraphicsItem.ItemSendsGeometryChanges # Necessary in order for .itemChange to be called when item is moved
)
def itemChange(self, change, value):
if change == Qt.QGraphicsItem.ItemPositionHasChanged:
self.picker.point_item_position_has_changed.emit(self)
return super().itemChange(change, value)
def keyPressEvent(self, event):
if event.key() == Qt.Qt.Key_Delete and event.modifiers() == Qt.Qt.NoModifier:
self.picker.delete_selected()
def type(self):
return self.QGRAPHICSITEM_TYPE
# NB: deriving from Qt.QGraphicsObject is necessary in order to be a scene event filter target
class SimplePointPicker(Qt.QGraphicsObject):
"""ex:
from ris_widget.ris_widget import RisWidget
from ris_widget.examples.simple_point_picker import SimplePointPicker
rw = RisWidget()
simple_point_picker = SimplePointPicker(rw.main_view, rw.main_scene.layer_stack_item)"""
QGRAPHICSITEM_TYPE = UNIQUE_QGRAPHICSITEM_TYPE()
point_item_position_has_changed = Qt.pyqtSignal(PointItem)
point_item_list_content_reset = Qt.pyqtSignal()
def __init__(self, general_view, parent_item, points=None):
super().__init__(parent_item)
self.view = general_view
self.view.viewport_rect_item.size_changed.connect(self.on_viewport_size_changed)
self.point_items = []
self.pen = Qt.QPen(Qt.Qt.red)
self.pen.setWidth(2)
color = Qt.QColor(Qt.Qt.yellow)
color.setAlphaF(0.5)
self.brush = Qt.QBrush(color)
self.brush_selected = Qt.QBrush(Qt.QColor(255, 0, 255, 127))
parent_item.installSceneEventFilter(self)
if points:
for point in points:
self.make_and_store_point_item(Qt.QPointF(point[0], point[1]))
def boundingRect(self):
return Qt.QRectF()
def paint(self, QPainter, QStyleOptionGraphicsItem, QWidget_widget=None):
pass
def type(self):
return self.QGRAPHICSITEM_TYPE
def make_and_store_point_item(self, pos):
point_item = PointItem(self, -7, -7, 15, 15, self.parentItem())
point_item.setScale(1 / self.view.transform().m22())
point_item.setPen(self.pen)
point_item.setBrush(self.brush)
flags = point_item.flags()
point_item.setFlags(
flags |
Qt.QGraphicsItem.ItemIsFocusable | # Necessary in order for item to receive keyboard events
Qt.QGraphicsItem.ItemIsSelectable |
Qt.QGraphicsItem.ItemIsMovable |
Qt.QGraphicsItem.ItemSendsGeometryChanges
)
point_item.installSceneEventFilter(self)
self.point_items.append(point_item)
point_item.setPos(pos)
def delete_selected(self):
for idx, item in reversed(list(enumerate((self.point_items)))):
if item.isSelected():
self.scene().removeItem(item)
del self.point_items[idx]
self.point_item_list_content_reset.emit()
def sceneEventFilter(self, watched, event):
if watched is self.parentItem():
if event.type() == Qt.QEvent.GraphicsSceneMousePress and event.button() == Qt.Qt.RightButton:
self.make_and_store_point_item(event.pos())
return True
if event.type() == Qt.QEvent.KeyPress and event.key() == Qt.Qt.Key_Delete and event.modifiers() == Qt.Qt.NoModifier:
self.delete_selected()
return False
def on_viewport_size_changed(self):
scale = 1 / self.view.transform().m22()
for point_item in self.point_items:
point_item.setScale(scale)
def clear(self):
for point_item in self.point_items:
self.view.scene().removeItem(point_item)
self.point_items = []
self.point_item_list_content_reset.emit()
@property
def points(self):
return [(point_item.pos().x(), point_item.pos().y()) for point_item in self.point_items]
@points.setter
def points(self, points):
self.clear()
for point in points:
self.make_and_store_point_item(Qt.QPointF(point[0], point[1]))
|
erikhvatum/RisWidget
|
ris_widget/examples/simple_point_picker.py
|
Python
|
mit
| 6,323 | 0.003163 |
import abc
import re
from typing import Union as U, Iterator, Optional as O
from pymongo import ASCENDING, DESCENDING
from sqlparse import tokens, parse as sqlparse
from sqlparse.sql import Token, Identifier, Function, Comparison, Parenthesis, IdentifierList, Statement
from . import query as query_module
from ..exceptions import SQLDecodeError, NotSupportedError
all_token_types = U['SQLConstIdentifier',
'djongo.sql2mongo.functions.CountFunc',
'djongo.sql2mongo.functions.SimpleFunc',
'SQLIdentifier',
'SQLComparison',
'SQLPlaceholder']
class SQLToken:
@abc.abstractmethod
def __init__(self,
token: Token,
query: 'query_module.BaseQuery'):
self._token = token
self.query = query
def __repr__(self):
return f'{self._token}'
@staticmethod
def tokens2sql(token: Token,
query: 'query_module.BaseQuery'
) -> Iterator[all_token_types]:
from .functions import SQLFunc
if isinstance(token, Identifier):
# Bug fix for sql parse
if isinstance(token[0], Parenthesis):
try:
int(token[0][1].value)
except ValueError:
yield SQLIdentifier(token[0][1], query)
else:
yield SQLConstIdentifier(token, query)
elif isinstance(token[0], Function):
yield SQLFunc.token2sql(token, query)
else:
yield SQLIdentifier(token, query)
elif isinstance(token, Function):
yield SQLFunc.token2sql(token, query)
elif isinstance(token, Comparison):
yield SQLComparison(token, query)
elif isinstance(token, IdentifierList):
for tok in token.get_identifiers():
yield from SQLToken.tokens2sql(tok, query)
elif isinstance(token, Parenthesis):
yield SQLPlaceholder(token, query)
else:
raise SQLDecodeError(f'Unsupported: {token.value}')
@staticmethod
def token2sql(token: Token,
query: 'query_module.BaseQuery'
) -> all_token_types:
return next(SQLToken.tokens2sql(token, query))
@staticmethod
def placeholder_index(token) -> int:
return int(re.match(r'%\(([0-9]+)\)s', token.value, flags=re.IGNORECASE).group(1))
class AliasableToken(SQLToken):
@abc.abstractmethod
def __init__(self, *args):
super().__init__(*args)
self.token_alias: 'query_module.TokenAlias' = self.query.token_alias
if self.alias:
self.token_alias.alias2token[self.alias] = self
self.token_alias.token2alias[self] = self.alias
if self.is_explicit_alias():
self.token_alias.aliased_names.add(self.alias)
def __hash__(self):
if self.is_explicit_alias():
return hash(self._token[0].value)
return hash(self._token.value)
def __eq__(self, other):
return hash(self) == hash(other)
def is_explicit_alias(self):
return len(self._token.tokens) == 5 and self._token[2].match(tokens.Keyword, 'AS')
@property
def alias(self) -> str:
# bug fix sql parse
if not self._token.get_ordering():
return self._token.get_alias()
class SQLIdentifier(AliasableToken):
def __init__(self, *args):
super().__init__(*args)
self._ord = None
if self._token.get_ordering():
# Bug fix for sql parse
self._ord = self._token.get_ordering()
self._token = self._token[0]
@property
def order(self):
if self._ord is None:
raise SQLDecodeError
return ORDER_BY_MAP[self._ord]
@property
def field(self) -> str:
if self.given_table in self.query.token_alias.aliased_names:
return self.given_table
if self.table == self.query.left_table:
return self.column
else:
return f'{self.table}.{self.column}'
@property
def table(self) -> str:
name = self.given_table
alias2token = self.token_alias.alias2token
try:
return alias2token[name].table
except KeyError:
return name
@property
def given_table(self) -> str:
name = self._token.get_parent_name()
if name is None:
name = self._token.get_real_name()
if name is None:
raise SQLDecodeError
return name
@property
def column(self) -> str:
name = self._token.get_real_name()
if name is None:
raise SQLDecodeError
return name
class SQLConstIdentifier(AliasableToken):
def __init__(self, *args):
super().__init__(*args)
@property
def value(self) -> int:
return int(self._token[0][1].value)
def to_mongo(self) -> dict:
return {'$literal': self.value}
class SQLComparison(SQLToken):
@property
def left_table(self):
lhs = SQLIdentifier(self._token.left, self.query)
return lhs.table
@property
def left_column(self):
lhs = SQLIdentifier(self._token.left, self.query)
return lhs.column
@property
def right_table(self):
rhs = SQLIdentifier(self._token.right, self.query)
return rhs.table
@property
def right_column(self):
rhs = SQLIdentifier(self._token.right, self.query)
return rhs.column
@property
def rhs_indexes(self):
if not self._token.right.ttype == tokens.Name.Placeholder:
if self._token.right.match(tokens.Keyword, 'NULL'):
return None
raise SQLDecodeError
index = self.placeholder_index(self._token.right)
return index
class SQLPlaceholder(SQLToken):
def __iter__(self):
tok = self._token[1:-1][0]
if isinstance(tok, IdentifierList):
for aid in tok.get_identifiers():
yield self.get_value(aid)
else:
yield self.get_value(tok)
def __init__(self, token: Token, query: 'query_module.BaseQuery'):
super().__init__(token, query)
def get_value(self, tok: Token):
if tok.ttype == tokens.Name.Placeholder:
return self.placeholder_index(tok)
elif tok.match(tokens.Keyword, 'NULL'):
return None
elif tok.match(tokens.Keyword, 'DEFAULT'):
return 'DEFAULT'
else:
raise SQLDecodeError
class SQLStatement:
@property
def current_token(self) -> Token:
return self._statement[self._tok_id]
def __init__(self, statement: U[Statement, Token]):
self._statement = statement
self._tok_id = 0
self._gen_inst = self._generator()
def __getattr__(self, item):
return getattr(self._statement, item)
def __iter__(self) -> Token:
yield from self._gen_inst
def __repr__(self):
return str(self._statement)
def __getitem__(self, item: slice):
start = (item.start or 0) + self._tok_id
stop = item.stop and self._tok_id + item.stop
sql = ''.join(str(tok) for tok in self._statement[start:stop])
sql = sqlparse(sql)[0]
return SQLStatement(sql)
def next(self) -> O[Token]:
# self._tok_id, token = self._statement.token_next(self._tok_id)
try:
return next(self._gen_inst)
except StopIteration:
return None
def skip(self, num):
self._tok_id += num
@property
def prev_token(self) -> Token:
return self._statement.token_prev(self._tok_id)[1]
@property
def next_token(self) -> Token:
return self._statement.token_next(self._tok_id)[1]
def _generator(self):
token = self._statement[self._tok_id]
while self._tok_id is not None:
yield token
self._tok_id, token = self._statement.token_next(self._tok_id)
class SQLColumnDef:
not_null = object()
unique = object()
autoincrement = object()
primarykey = object()
null = object()
_map = {
'UNIQUE': unique,
'AUTOINCREMENT': autoincrement,
'PRIMARY KEY': primarykey,
'NOT NULL': not_null,
'NULL': null
}
supported_data_types = None
def __init__(self,
name: str = None,
data_type: str = None,
col_constraints: set = None):
self.name = name
self.data_type = data_type
self.col_constraints = col_constraints
@staticmethod
def _get_constraints(others: str):
while others:
try:
name, others = others.split(' ', 1)
except ValueError:
name = others
others = None
try:
yield SQLColumnDef._map[name]
except KeyError:
if others:
try:
part2, others = others.split(' ', 1)
except ValueError:
part2 = others
others = None
name = f'{name} {part2}'
try:
yield SQLColumnDef._map[name]
except KeyError:
raise SQLDecodeError(f'Unknown column constraint: {name}')
else:
raise SQLDecodeError(f'Unknown column constraint: {name}')
@staticmethod
def sql2col_defs(sql: str):
sql = sql[1:-1]
while sql:
if sql[0] == '"':
try:
def_str, sql = sql.split(',', 1)
except ValueError:
def_str = sql
sql = None
else:
sql = sql.strip()
yield SQLColumnDef.def_str2col_def(def_str)
elif sql.startswith('CONSTRAINT'):
# Temporary hack
indexes = [None]
i = 0
for i, c in enumerate(sql):
if c == '(':
if indexes[0] is None:
indexes.pop()
indexes.append(i)
elif c == ')':
indexes.pop()
if not indexes:
break
if len(sql[i:]) > 1:
sql = sql[i+3:]
else:
sql = None
yield SQLColumnConstraint()
else:
raise SQLDecodeError(f'Syntax Error: {sql}')
@classmethod
def def_str2col_def(cls, def_str: str):
if not cls.supported_data_types:
from djongo.base import DatabaseWrapper
cls.supported_data_types = set(DatabaseWrapper.data_types.values())
name, other = def_str[1:].split('"', 1)
other = other.strip()
data_type, constraint_sql = other.split(' ', 1)
if data_type not in cls.supported_data_types:
raise NotSupportedError(f'Data of type: {data_type}')
col_constraints = set(SQLColumnDef._get_constraints(constraint_sql))
return SQLColumnDef(name=name,
data_type=data_type,
col_constraints=col_constraints)
@classmethod
def statement2col_defs(cls, token: Token):
from djongo.base import DatabaseWrapper
supported_data_types = set(DatabaseWrapper.data_types.values())
defs = token.value.strip('()').split(',')
for col in defs:
col = col.strip()
name, other = col.split(' ', 1)
if name == 'CONSTRAINT':
yield SQLColumnConstraint()
else:
if col[0] != '"':
raise SQLDecodeError('Column identifier not quoted')
name, other = col[1:].split('"', 1)
other = other.strip()
data_type, constraint_sql = other.split(' ', 1)
if data_type not in supported_data_types:
raise NotSupportedError(f'Data of type: {data_type}')
col_constraints = set(SQLColumnDef._get_constraints(constraint_sql))
yield SQLColumnDef(name=name,
data_type=data_type,
col_constraints=col_constraints)
class SQLColumnConstraint(SQLColumnDef):
pass
ORDER_BY_MAP = {
'ASC': ASCENDING,
'DESC': DESCENDING
}
|
nesdis/djongo
|
djongo/sql2mongo/sql_tokens.py
|
Python
|
agpl-3.0
| 12,682 | 0.000552 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import argparse
import kudu
from kudu.client import Partitioning
# Parse arguments
parser = argparse.ArgumentParser(description='Basic Example for Kudu Python.')
parser.add_argument('--masters', '-m', nargs='+', default='localhost',
help='The master address(es) to connect to Kudu.')
parser.add_argument('--ports', '-p', nargs='+', default='7051',
help='The master server port(s) to connect to Kudu.')
args = parser.parse_args()
# Connect to Kudu master server(s).
client = kudu.connect(host=args.masters, port=args.ports)
# Define a schema for a new table.
builder = kudu.schema_builder()
builder.add_column('key').type(kudu.int64).nullable(False).primary_key()
builder.add_column('ts_val', type_=kudu.unixtime_micros, nullable=False, compression='lz4')
schema = builder.build()
# Define the partitioning schema.
partitioning = Partitioning().add_hash_partitions(column_names=['key'], num_buckets=3)
# Delete table if it already exists.
if client.table_exists('python-example'):
client.delete_table('python-example')
# Create a new table.
client.create_table('python-example', schema, partitioning)
# Open a table.
table = client.table('python-example')
# Create a new session so that we can apply write operations.
session = client.new_session()
# Insert a row.
op = table.new_insert({'key': 1, 'ts_val': datetime.utcnow()})
session.apply(op)
# Upsert a row.
op = table.new_upsert({'key': 2, 'ts_val': "2016-01-01T00:00:00.000000"})
session.apply(op)
# Update a row.
op = table.new_update({'key': 1, 'ts_val': ("2017-01-01", "%Y-%m-%d")})
session.apply(op)
# Delete a row.
op = table.new_delete({'key': 2})
session.apply(op)
# Flush write operations, if failures occur, print them.
try:
session.flush()
except kudu.KuduBadStatus:
print(session.get_pending_errors())
# Create a scanner and add a predicate.
scanner = table.scanner()
scanner.add_predicate(table['ts_val'] == datetime(2017, 1, 1))
# Open scanner and print all tuples.
# Note: This doesn't scale for large scans
# The expected output: [(1, datetime.datetime(2017, 1, 1, 0, 0, tzinfo=<UTC>))]
print(scanner.open().read_all_tuples())
|
helifu/kudu
|
examples/python/basic-python-example/basic_example.py
|
Python
|
apache-2.0
| 2,977 | 0.00168 |
import os
import json
from ss_rule_scheme import update_outbound_rules, init_inbound_rules, init_outbound_rules, msg_clear_all_outbound, ss_process_policy_change
base_path = os.path.abspath(os.path.join(os.path.realpath(__file__),
".."))
test_file = os.path.join(base_path, "blackholing_test.py")
with open(test_file, 'r') as f:
data = json.load(f)
inbound_policies = []
outbound_policies = []
for element in data['policy']:
if 'inbound' in element:
inbound_policies = element
if 'outbound' in element:
outbound_policies = element
#print inbound_policies
final_switch = "main-in"
rule_msgs = init_inbound_rules(1, inbound_policies,[],final_switch)
print "Rule Messages to be removed INBOUND:: "+str(rule_msgs)
#rule_msgs2 = init_outbound_rules(1, outbound_policies, [], final_switch)
#print ("Rule Messages OUTBOUND:: "+str(rule_msgs2))
#if 'changes' in rule_msgs2:
# if 'changes' not in rule_msgs:
# rule_msgs['changes'] = []
# rule_msgs['changes'] += rule_msgs2['changes']
#TODO: Initialize Outbound Policies from RIB
print ("Rule Messages:: "+str(rule_msgs))
for rule in rule_msgs['changes']:
rule['mod_type'] = "remove"
print ("XRS_Test: Rule Msgs: %s" % rule_msgs)
|
h2020-endeavour/iSDX
|
pctrl/policy_loader.py
|
Python
|
apache-2.0
| 1,264 | 0.015032 |
##############################################################################
#
# Copyright (C) 2018-2020 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from . import reports
|
CompassionCH/compassion-accounting
|
donation_report_compassion/__init__.py
|
Python
|
agpl-3.0
| 409 | 0 |
# -*- coding: utf-8 -*-
""" Validators for wx widgets.
Copyright (c) Karol Będkowski, 2006-2013
This file is part of wxGTD
This is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, version 2.
"""
__author__ = "Karol Będkowski"
__copyright__ = "Copyright (c) Karol Będkowski, 2006-2013"
__version__ = '2013-04-21'
__all__ = ['ValidatorDv', 'Validator', 'ValidatorDate', 'ValidatorTime',
'ValidatorColorStr']
from .validator import Validator, ValidatorDv, ValidatorDate, ValidatorTime, \
ValidatorColorStr
|
KarolBedkowski/wxgtd
|
wxgtd/wxtools/validators/__init__.py
|
Python
|
gpl-2.0
| 638 | 0.004724 |
#!/usr/bin/env python
import scipy.sparse as sps
from apgl.graph.GeneralVertexList import GeneralVertexList
from apgl.graph.SparseGraph import SparseGraph
numVertices = 10
vList = GeneralVertexList(numVertices)
Wght = sps.lil_matrix((numVertices, numVertices))
graph = SparseGraph(vList, W=Wght, undirected=False)
# Add some edges to the graph.
# Vertices are indexed starting from 0.
graph[0, 1] = 1
graph[0, 2] = 1
# Set the label of the 0th vertex to [2, 3].
graph.setVertex(0, "abc")
graph.setVertex(1, 123)
print(graph.inDegreeDistribution())
|
showa-yojyo/notebook
|
source/_sample/apgl/distribution.py
|
Python
|
mit
| 553 | 0 |
import numpy as np
from scipy.stats import skew, kurtosis, shapiro, pearsonr, ansari, mood, levene, fligner, bartlett, mannwhitneyu
from scipy.spatial.distance import braycurtis, canberra, chebyshev, cityblock, correlation, cosine, euclidean, hamming, jaccard, kulsinski, matching, russellrao, sqeuclidean
from sklearn.preprocessing import LabelBinarizer
from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, accuracy_score, roc_auc_score, average_precision_score, f1_score, hinge_loss, matthews_corrcoef, precision_score, recall_score, zero_one_loss
from sklearn.metrics.cluster import adjusted_mutual_info_score, adjusted_rand_score, completeness_score, homogeneity_completeness_v_measure, homogeneity_score, mutual_info_score, normalized_mutual_info_score, v_measure_score
from boomlet.utils.aggregators import to_aggregator
from boomlet.metrics import max_error, error_variance, relative_error_variance, gini_loss, categorical_gini_loss
from boomlet.transform.type_conversion import Discretizer
from autocause.feature_functions import *
from autocause.converters import NUMERICAL_TO_NUMERICAL, NUMERICAL_TO_CATEGORICAL, BINARY_TO_NUMERICAL, BINARY_TO_CATEGORICAL, CATEGORICAL_TO_NUMERICAL, CATEGORICAL_TO_CATEGORICAL
"""
Functions used to combine a list of features into one coherent one.
Sample use:
1. to convert categorical to numerical, we perform a one hot encoding
2. treat each binary column as a separate numerical feature
3. compute numerical features as usual
4. use each of the following functions to create a new feature
(with the input as the nth feature for each of the columns)
WARNING: these will be used in various locations throughout the code base
and will result in feature size growing at faster than a linear rate
"""
AGGREGATORS = [
to_aggregator("max"),
to_aggregator("min"),
to_aggregator("median"),
to_aggregator("mode"),
to_aggregator("mean"),
to_aggregator("sum"),
]
"""
Boolean flags specifying whether or not to perform conversions
"""
CONVERT_TO_NUMERICAL = True
CONVERT_TO_CATEGORICAL = True
"""
Functions that compute a metric on a single 1-D array
"""
UNARY_NUMERICAL_FEATURES = [
normalized_entropy,
skew,
kurtosis,
np.std,
shapiro,
]
UNARY_CATEGORICAL_FEATURES = [
lambda x: len(set(x)), # number of unique
]
"""
Functions that compute a metric on two 1-D arrays
"""
BINARY_NN_FEATURES = [
independent_component,
chi_square,
pearsonr,
correlation_magnitude,
braycurtis,
canberra,
chebyshev,
cityblock,
correlation,
cosine,
euclidean,
hamming,
sqeuclidean,
ansari,
mood,
levene,
fligner,
bartlett,
mannwhitneyu,
]
BINARY_NC_FEATURES = [
]
BINARY_CN_FEATURES = [
categorical_numerical_homogeneity,
bucket_variance,
anova,
]
BINARY_CC_FEATURES = [
categorical_categorical_homogeneity,
anova,
dice_,
jaccard,
kulsinski,
matching,
rogerstanimoto_,
russellrao,
sokalmichener_,
sokalsneath_,
yule_,
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
v_measure_score,
]
"""
Dictionaries of input type (e.g. B corresponds to pairs where binary
data is the input) to pairs of converter functions and a boolean flag
of whether or not to aggregate over the output of the converter function
converter functions should have the type signature:
converter(X_raw, X_current_type, Y_raw, Y_type)
where X_raw is the data to convert
"""
NUMERICAL_CONVERTERS = dict(
N=NUMERICAL_TO_NUMERICAL["identity"],
B=BINARY_TO_NUMERICAL["identity"],
C=CATEGORICAL_TO_NUMERICAL["binarize"],
)
CATEGORICAL_CONVERTERS = dict(
N=NUMERICAL_TO_CATEGORICAL["discretizer10"],
B=BINARY_TO_CATEGORICAL["identity"],
C=CATEGORICAL_TO_CATEGORICAL["identity"],
)
"""
Whether or not the converters can result in a 2D output. This must be set to True
if any of the respective converts can return a 2D output.
"""
NUMERICAL_CAN_BE_2D = True
CATEGORICAL_CAN_BE_2D = False
"""
Estimators used to provide a fit for a variable
"""
REGRESSION_ESTIMATORS = [
Ridge(),
LinearRegression(),
DecisionTreeRegressor(random_state=0),
RandomForestRegressor(random_state=0),
GradientBoostingRegressor(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsRegressor(),
]
CLASSIFICATION_ESTIMATORS = [
LogisticRegression(random_state=0),
DecisionTreeClassifier(random_state=0),
RandomForestClassifier(random_state=0),
GradientBoostingClassifier(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsClassifier(),
GaussianNB(),
]
"""
Functions to provide a value of how good a fit on a variable is
"""
REGRESSION_METRICS = [
explained_variance_score,
mean_absolute_error,
mean_squared_error,
r2_score,
max_error,
error_variance,
relative_error_variance,
gini_loss,
] + BINARY_NN_FEATURES
REGRESSION_RESIDUAL_METRICS = [
] + UNARY_NUMERICAL_FEATURES
BINARY_PROBABILITY_CLASSIFICATION_METRICS = [
roc_auc_score,
hinge_loss,
] + REGRESSION_METRICS
RESIDUAL_PROBABILITY_CLASSIFICATION_METRICS = [
] + REGRESSION_RESIDUAL_METRICS
BINARY_CLASSIFICATION_METRICS = [
accuracy_score,
average_precision_score,
f1_score,
matthews_corrcoef,
precision_score,
recall_score,
zero_one_loss,
categorical_gini_loss,
]
ND_CLASSIFICATION_METRICS = [ # metrics for N-dimensional classification
] + BINARY_CC_FEATURES
"""
Functions to assess the model (e.g. complexity) of the fit on a numerical variable
of type signature:
metric(clf, X, y)
"""
REGRESSION_MODEL_METRICS = [
# TODO model complexity metrics
]
CLASSIFICATION_MODEL_METRICS = [
# TODO use regression model metrics on predict_proba
]
"""
The operations to perform on the A->B features and B->A features.
"""
RELATIVE_FEATURES = [
# Identity functions, comment out the next 2 lines for only relative features
lambda x, y: x,
lambda x, y: y,
lambda x, y: x - y,
]
"""
Whether or not to treat each observation (A,B) as two observations: (A,B) and (B,A)
If this is done and training labels are given, those labels will have to be
reflected as well. The reflection is performed through appending at the end.
(e.g. if we have N training examples, observation N+1 in the output will be
the first example reflected)
"""
REFLECT_DATA = False
"""
Whether or not metafeatures based on the types of A and B are generated.
e.g. 1/0 feature on whether or not A is Numerical, etc.
"""
ADD_METAFEATURES = True
"""
Whether or not to generate combination features between the computed
features and metafeatures.
e.g. for each feature and metafeature, generate a new feature which is the
product of the two
WARNING: will generate a LOT of features (approximately 21 times as many)
"""
COMPUTE_METAFEATURE_COMBINATIONS = False
|
diogo149/autocause
|
autocause/autocause_settings.py
|
Python
|
mit
| 7,414 | 0.001619 |
# Copyright (C) 2014-2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014-2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import transaction
from django.db import connection
@transaction.atomic
def bulk_update_userstory_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_userstorycustomattribute set "order" = $1
where custom_attributes_userstorycustomattribute.id = $2 and
custom_attributes_userstorycustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_task_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_taskcustomattribute set "order" = $1
where custom_attributes_taskcustomattribute.id = $2 and
custom_attributes_taskcustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_issue_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_issuecustomattribute set "order" = $1
where custom_attributes_issuecustomattribute.id = $2 and
custom_attributes_issuecustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
|
bdang2012/taiga-back-casting
|
taiga/projects/custom_attributes/services.py
|
Python
|
agpl-3.0
| 2,749 | 0.001092 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import os
import logging
import tempfile
from flexget import plugin
from flexget.event import event
log = logging.getLogger('check_subtitles')
class MetainfoSubs(object):
"""
Set 'subtitles' field for entries, if they are local video files with subs.
The field is a list of language codes (3-letter ISO-639-3) for each subtitles
file found on disk and/or subs track found inside video (for MKVs).
Special "und" code is for unidentified language (i.e. files without language
code before extension).
"""
schema = {'type': 'boolean'}
def on_task_start(self, task, config):
try:
import subliminal
except ImportError as e:
log.debug('Error importing Subliminal: %s' % e)
raise plugin.DependencyError('subliminal', 'subliminal',
'Subliminal module required. ImportError: %s' % e)
from subliminal.cli import MutexLock
from dogpile.cache.exception import RegionAlreadyConfigured
try:
subliminal.region.configure('dogpile.cache.dbm',
arguments={'filename': os.path.join(tempfile.gettempdir(), 'cachefile.dbm'),
'lock_factory': MutexLock})
except RegionAlreadyConfigured:
pass
logging.getLogger("subliminal").setLevel(logging.CRITICAL)
logging.getLogger("enzyme").setLevel(logging.WARNING)
def on_task_metainfo(self, task, config):
# check if explicitly disabled (value set to false)
if config is False:
return
for entry in task.entries:
entry.register_lazy_func(self.get_subtitles, ['subtitles'])
def get_subtitles(self, entry):
if entry.get('subtitles', eval_lazy=False) or not ('location' in entry) or \
('$RECYCLE.BIN' in entry['location']) or not os.path.exists(entry['location']):
return
from subliminal.core import search_external_subtitles
try:
subtitles = list(search_external_subtitles(entry['location']).values())
if subtitles:
entry['subtitles'] = subtitles
log.debug('Found subtitles %s for %s', '/'.join(subtitles), entry['title'])
except Exception as e:
log.debug('Error checking local subtitles for %s: %s' % (entry['title'], e))
@event('plugin.register')
def register_plugin():
plugin.register(MetainfoSubs, 'check_subtitles', api_ver=2)
|
oxc/Flexget
|
flexget/plugins/metainfo/subtitles_check.py
|
Python
|
mit
| 2,678 | 0.003361 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
# This is a simple test variable for the interaction of gridcells and households.
from opus_core.variables.variable import Variable
from urbansim.functions import attribute_label
class hhnper2_nbnper2(Variable):
"""Test variable for the interaction of neighborhoods and households.
Computes household.poor * neighborhood.poor."""
def dependencies(self):
return [attribute_label("neighborhood", "nper2_m"),
attribute_label("household", "nper2")]
def compute(self, dataset_pool):
return self.get_dataset().multiply("nper2", "nper2_m")
#if __name__=='__main__':
#from opus_core.tests import opus_unittest
#from urbansim.variable_test_toolbox import VariableTestToolbox
#from numpy import array
#from numpy import ma
#class Tests(opus_unittest.OpusTestCase):
#variable_name = "urbansim.household_x_neighborhood.hhrich_nbpoor"
#def test_full_tree(self):
#dept = array([10, 20, 30])
#prev_dept = array([10, 4, 20, 30])
#values = VariableTestToolbox().compute_variable(self.variable_name,
#{"neighborhood":{
#"dept":dept},
#"household":{
#"prev_dept":prev_dept}},
#dataset = "household_x_neighborhood")
#should_be = array([[1, 0, 0],
#[0, 0, 0],
#[0, 1, 0],
#[0, 0, 1]])
#self.assertEqual(ma.allclose(values, should_be, rtol=1e-20),
#True, msg = "Error in " + self.variable_name)
#opus_unittest.main()
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/paris/household_x_neighborhood/hhnper2_nbnper2.py
|
Python
|
gpl-2.0
| 1,879 | 0.031932 |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_lt():
s1 = pd.Series([5, 4, 3, 2, 1])
s2 = pd.Series([0, 2, 3, 6, 8])
return s1.lt(s2) # Expect series of False, False, False, True, True
print(series_lt())
|
IntelLabs/hpat
|
examples/series/series_lt.py
|
Python
|
bsd-2-clause
| 1,748 | 0 |
#!/usr/bin/env python
'''
##BOILERPLATE_COPYRIGHT
##BOILERPLATE_COPYRIGHT_END
'''
import unittest, copy
from testRoot import RootClass
from noink.user_db import UserDB
from noink.entry_db import EntryDB
class AddEntry(RootClass):
def test_AddEntry(self):
userDB = UserDB()
entryDB = EntryDB()
u = userDB.add("jontest", "pass", "Jon Q. Testuser")
title = 'Little Buttercup'
entry = 'There once was a man from Nantucket,' + \
'who kept his wife in a Bucket.' + \
"Wait... how'd she fit in that bucket anyway?"
e = entryDB.add(copy.deepcopy(title), entry, u)
self.assertTrue(e.title == title)
if __name__ == '__main__':
unittest.main()
|
criswell/noink
|
src/tests/test_DelEntry.py
|
Python
|
agpl-3.0
| 740 | 0.005405 |
import io
import pytest
from contextlib import redirect_stdout
from mock import patch
from mythril.mythril import MythrilLevelDB, MythrilConfig
from mythril.exceptions import CriticalError
@patch("mythril.ethereum.interface.leveldb.client.EthLevelDB.search")
@patch("mythril.ethereum.interface.leveldb.client.ETH_DB", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None)
def test_leveldb_code_search(mock_leveldb, f1, f2, f3):
config = MythrilConfig()
config.set_api_leveldb("some path")
leveldb_search = MythrilLevelDB(leveldb=config.eth_db)
leveldb_search.search_db("code#PUSH#")
mock_leveldb.assert_called()
@patch("mythril.ethereum.interface.leveldb.client.ETH_DB", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None)
def test_leveldb_hash_search_incorrect_input(f1, f2, f3):
config = MythrilConfig()
config.set_api_leveldb("some path")
leveldb_search = MythrilLevelDB(leveldb=config.eth_db)
with pytest.raises(CriticalError):
leveldb_search.contract_hash_to_address("0x23")
@patch(
"mythril.ethereum.interface.leveldb.client.EthLevelDB.contract_hash_to_address",
return_value="0xddbb615cb2ffaff7233d8a6f3601621de94795e1",
)
@patch("mythril.ethereum.interface.leveldb.client.ETH_DB", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBReader", return_value=None)
@patch("mythril.ethereum.interface.leveldb.client.LevelDBWriter", return_value=None)
def test_leveldb_hash_search_correct_input(mock_hash_to_address, f1, f2, f3):
config = MythrilConfig()
config.set_api_leveldb("some path")
leveldb_search = MythrilLevelDB(leveldb=config.eth_db)
f = io.StringIO()
with redirect_stdout(f):
leveldb_search.contract_hash_to_address(
"0x0464e651bcc40de28fc7fcde269218d16850bac9689da5f4a6bd640fd3cdf6aa"
)
out = f.getvalue()
mock_hash_to_address.assert_called()
assert out == "0xddbb615cb2ffaff7233d8a6f3601621de94795e1\n"
|
b-mueller/mythril
|
tests/mythril/mythril_leveldb_test.py
|
Python
|
mit
| 2,235 | 0.003579 |
# -*- coding: utf-8 -*-
"""
Contains functions to plot the results of the dustydiffusion test.
@author: ibackus
"""
import matplotlib.pyplot as plt
import numpy as np
import pynbody
import diskpy
#sim, epsEstimator, ts, runpars = analyze.loadSim(simdir)
def crossSection(sim, ts, crossSectionTimes=[0, 1, 10]):
"""
Reproduces the cross-section plot of dust density of
Price & Laibe 2015, fig. 5
Note, sim and ts can be loaded with analyze.loadSim(...)
Parameters
----------
sim : list
List of SimSnaps for the simulation
ts : array-like
Snapshot times
crossSectionTimes : array-like
(optional) Sim times to plot (approximate)
"""
# Select times to plot at
crossSectionTimes = np.asarray(crossSectionTimes)
crossSectionTimes = crossSectionTimes.reshape(crossSectionTimes.size)
if np.ndim(crossSectionTimes) == 0:
crossSectionTimes = crossSectionTimes[None]
nPlots = len(crossSectionTimes)
# Plot
axs = diskpy.plot.gridplot(1, nPlots, square=True)
fig = plt.gcf()
for iPlot in range(nPlots):
ax = axs[iPlot]
iTime = abs(ts - crossSectionTimes[iPlot]).argmin()
t = ts[iTime]
f = sim[iTime]
im=pynbody.plot.sph.image(f, 'dustFrac', width=1, log=False, vmin=0,
vmax = 0.11, cmap='cubehelix_r',
show_cbar=False, subplot=ax, ret_im=True)
ax.set_xlabel('t={:.2g}'.format(float(t)))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.2, 0.05, 0.6])
fig.colorbar(im, cax=cbar_ax)
fig.set_size_inches(8.5, 3.4, forward=True)
plt.suptitle('Cross section of dust fraction in z=0 plane\n'\
'See Price & Laibe (2015)')
def dustFracProfile(sim, ts, epsEstimator,
epsPlotTimes=[0., 0.1, 0.3, 1, 3, 10], nr=200,
colorcode=True, legend=True, rasterized=True):
"""
Note, sim and ts and epsEstimator can be loaded with analyze.loadSim(...)
Parameters
----------
sim : list
List of SimSnaps for the simulation
ts : array-like
Snapshot times
epsEstimator : function
A function of (r, t) that returns the analytic estimate of the dust
fraction density profile of P&L15 dustydiffusion
epsPlotTimes : array-like
Approximate times to plot at
nr : int
Number of radial bins
colorcode : bool
Color-code the times
legend : bool
Display legend
rasterized : bool
Rasterize the dots. Useful for saving figures as vector graphics
"""
# Make plot times an array
epsPlotTimes = np.asarray(epsPlotTimes)
epsPlotTimes = epsPlotTimes.reshape(epsPlotTimes.size)
nt = len(epsPlotTimes)
actualPlotTimes = np.zeros(nt)
title = 'plot times: '
if colorcode:
markercolor = None
else:
markercolor = 'k'
for iPlot in range(nt):
iTime = abs(ts - epsPlotTimes[iPlot]).argmin()
# Calculate stuff
f = sim[iTime]
t = ts[[iTime]]
actualPlotTimes[iPlot] = t
print t
r = np.linspace(0, f['r'].max(), nr)
epsAnalytic = epsEstimator(r, t)
# Plot
scatter=plt.plot(f['r'], f['dustFrac'], 'o', markersize=3,
markeredgecolor='none', label='t={:.2g}'.format(float(t)),
color=markercolor, rasterized=rasterized)
line=plt.plot(r, epsAnalytic, 'r')
if colorcode:
# Make lines and points the same color
line[0].set_color(scatter[0].get_color())
title += '{:.2g}, '.format(float(t))
# Set-up plot
plt.ylim(0, 0.11)
plt.xlim(0, 0.5)
plt.ylabel('Dust fraction')
plt.xlabel('r')
if legend:
plt.legend(loc='best', markerscale=2)
plt.title(title)
|
ibackus/testdust
|
testdust/diffusion/plot.py
|
Python
|
mit
| 4,000 | 0.009 |
import sys, unittest, struct, math, ctypes
from binascii import hexlify
from ctypes import *
def bin(s):
return hexlify(memoryview(s)).decode().upper()
# Each *simple* type that supports different byte orders has an
# __ctype_be__ attribute that specifies the same type in BIG ENDIAN
# byte order, and a __ctype_le__ attribute that is the same type in
# LITTLE ENDIAN byte order.
#
# For Structures and Unions, these types are created on demand.
class Test(unittest.TestCase):
@unittest.skip('test disabled')
def test_X(self):
print(sys.byteorder, file=sys.stderr)
for i in range(32):
bits = BITS()
setattr(bits, "i%s" % i, 1)
dump(bits)
def test_slots(self):
class BigPoint(BigEndianStructure):
__slots__ = ()
_fields_ = [("x", c_int), ("y", c_int)]
class LowPoint(LittleEndianStructure):
__slots__ = ()
_fields_ = [("x", c_int), ("y", c_int)]
big = BigPoint()
little = LowPoint()
big.x = 4
big.y = 2
little.x = 2
little.y = 4
with self.assertRaises(AttributeError):
big.z = 42
with self.assertRaises(AttributeError):
little.z = 24
def test_endian_short(self):
if sys.byteorder == "little":
self.assertIs(c_short.__ctype_le__, c_short)
self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short)
else:
self.assertIs(c_short.__ctype_be__, c_short)
self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short)
s = c_short.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_short.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
def test_endian_int(self):
if sys.byteorder == "little":
self.assertIs(c_int.__ctype_le__, c_int)
self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int)
else:
self.assertIs(c_int.__ctype_be__, c_int)
self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int)
s = c_int.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_int.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<i", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">I", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<I", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
def test_endian_longlong(self):
if sys.byteorder == "little":
self.assertIs(c_longlong.__ctype_le__, c_longlong)
self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong)
else:
self.assertIs(c_longlong.__ctype_be__, c_longlong)
self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong)
s = c_longlong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_longlong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
def test_endian_float(self):
if sys.byteorder == "little":
self.assertIs(c_float.__ctype_le__, c_float)
self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float)
else:
self.assertIs(c_float.__ctype_be__, c_float)
self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float)
s = c_float(math.pi)
self.assertEqual(bin(struct.pack("f", math.pi)), bin(s))
# Hm, what's the precision of a float compared to a double?
self.assertAlmostEqual(s.value, math.pi, places=6)
s = c_float.__ctype_le__(math.pi)
self.assertAlmostEqual(s.value, math.pi, places=6)
self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s))
s = c_float.__ctype_be__(math.pi)
self.assertAlmostEqual(s.value, math.pi, places=6)
self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s))
def test_endian_double(self):
if sys.byteorder == "little":
self.assertIs(c_double.__ctype_le__, c_double)
self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double)
else:
self.assertIs(c_double.__ctype_be__, c_double)
self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double)
s = c_double(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("d", math.pi)), bin(s))
s = c_double.__ctype_le__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s))
s = c_double.__ctype_be__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s))
def test_endian_other(self):
self.assertIs(c_byte.__ctype_le__, c_byte)
self.assertIs(c_byte.__ctype_be__, c_byte)
self.assertIs(c_ubyte.__ctype_le__, c_ubyte)
self.assertIs(c_ubyte.__ctype_be__, c_ubyte)
self.assertIs(c_char.__ctype_le__, c_char)
self.assertIs(c_char.__ctype_be__, c_char)
def test_struct_fields_1(self):
if sys.byteorder == "little":
base = BigEndianStructure
else:
base = LittleEndianStructure
class T(base):
pass
_fields_ = [("a", c_ubyte),
("b", c_byte),
("c", c_short),
("d", c_ushort),
("e", c_int),
("f", c_uint),
("g", c_long),
("h", c_ulong),
("i", c_longlong),
("k", c_ulonglong),
("l", c_float),
("m", c_double),
("n", c_char),
("b1", c_byte, 3),
("b2", c_byte, 3),
("b3", c_byte, 2),
("a", c_int * 3 * 3 * 3)]
T._fields_ = _fields_
# these fields do not support different byte order:
for typ in c_wchar, c_void_p, POINTER(c_int):
_fields_.append(("x", typ))
class T(base):
pass
self.assertRaises(TypeError, setattr, T, "_fields_", [("x", typ)])
def test_struct_struct(self):
# nested structures with different byteorders
# create nested structures with given byteorders and set memory to data
for nested, data in (
(BigEndianStructure, b'\0\0\0\1\0\0\0\2'),
(LittleEndianStructure, b'\1\0\0\0\2\0\0\0'),
):
for parent in (
BigEndianStructure,
LittleEndianStructure,
Structure,
):
class NestedStructure(nested):
_fields_ = [("x", c_uint32),
("y", c_uint32)]
class TestStructure(parent):
_fields_ = [("point", NestedStructure)]
self.assertEqual(len(data), sizeof(TestStructure))
ptr = POINTER(TestStructure)
s = cast(data, ptr)[0]
del ctypes._pointer_type_cache[TestStructure]
self.assertEqual(s.point.x, 1)
self.assertEqual(s.point.y, 2)
def test_struct_fields_2(self):
# standard packing in struct uses no alignment.
# So, we have to align using pad bytes.
#
# Unaligned accesses will crash Python (on those platforms that
# don't allow it, like sparc solaris).
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">bxhid"
else:
base = LittleEndianStructure
fmt = "<bxhid"
class S(base):
_fields_ = [("b", c_byte),
("h", c_short),
("i", c_int),
("d", c_double)]
s1 = S(0x12, 0x1234, 0x12345678, 3.14)
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_nonnative_struct_fields(self):
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">b h xi xd"
else:
base = LittleEndianStructure
fmt = "<b h xi xd"
class S(base):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_native_struct_fields(self):
if sys.byteorder == "little":
fmt = "<b h xi xd"
else:
base = LittleEndianStructure
fmt = ">b h xi xd"
class S(Structure):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
if __name__ == "__main__":
unittest.main()
|
Suwmlee/XX-Net
|
Python3/lib/ctypes/test/test_byteswap.py
|
Python
|
bsd-2-clause
| 11,726 | 0.000768 |
# coding: utf-8
"""
An API to insert and retrieve metadata on cloud artifacts.
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DiscoveryDiscoveredDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'operation': 'GooglelongrunningOperation'
}
attribute_map = {
'operation': 'operation'
}
def __init__(self, operation=None): # noqa: E501
"""DiscoveryDiscoveredDetails - a model defined in Swagger""" # noqa: E501
self._operation = None
self.discriminator = None
if operation is not None:
self.operation = operation
@property
def operation(self):
"""Gets the operation of this DiscoveryDiscoveredDetails. # noqa: E501
Output only. An operation that indicates the status of the current scan. # noqa: E501
:return: The operation of this DiscoveryDiscoveredDetails. # noqa: E501
:rtype: GooglelongrunningOperation
"""
return self._operation
@operation.setter
def operation(self, operation):
"""Sets the operation of this DiscoveryDiscoveredDetails.
Output only. An operation that indicates the status of the current scan. # noqa: E501
:param operation: The operation of this DiscoveryDiscoveredDetails. # noqa: E501
:type: GooglelongrunningOperation
"""
self._operation = operation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DiscoveryDiscoveredDetails, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiscoveryDiscoveredDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
grafeas/client-python
|
grafeas/models/discovery_discovered_details.py
|
Python
|
apache-2.0
| 3,542 | 0.000282 |
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import stat
import socket
from irma.common.base.exceptions import IrmaSFTPv2Error
from irma.common.ftp.ftp import FTPInterface
from ssh2.session import Session
from ssh2.sftp import LIBSSH2_FXF_CREAT, LIBSSH2_FXF_WRITE,\
LIBSSH2_SFTP_S_IRUSR, LIBSSH2_SFTP_S_IWUSR,\
LIBSSH2_SFTP_S_IRGRP, LIBSSH2_SFTP_S_IROTH,\
LIBSSH2_SFTP_S_IXUSR
class IrmaSFTPv2(FTPInterface):
"""Irma SFTPv2 handler
This class handles the connection with a sftp server
functions for interacting with it.
"""
_Exception = IrmaSFTPv2Error
# ==================================
# Constructor and Destructor stuff
# ==================================
def __init__(self, host, port, auth, key_path, user, passwd,
dst_user=None, upload_path='uploads', hash_check=False,
autoconnect=True):
self._sess = None
self._client = None
super().__init__(host, port, auth, key_path, user, passwd, dst_user,
upload_path, hash_check, autoconnect)
def connected(self):
return self._sess is not None
# ============================
# Overridden private methods
# ============================
def _connect(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self._host, self._port))
self._sess = Session()
self._sess.handshake(sock)
if self._auth == 'key':
# self._pubkey_path must be generated from private key
# s.userauth_publickey_fromfile(self._user, self._pubkey_path,
# self._key_path, '')
raise IrmaSFTPv2Error("Pub key authentication not implemented")
else:
self._sess.userauth_password(self._user, self._passwd)
self._client = self._sess.sftp_init()
def _disconnect(self, *, force=False):
self._client = None
if not force:
self._sess.disconnect()
self._sess = None
def _upload(self, remote, fobj):
mode = LIBSSH2_SFTP_S_IRUSR | LIBSSH2_SFTP_S_IWUSR | \
LIBSSH2_SFTP_S_IRGRP | LIBSSH2_SFTP_S_IROTH
opt = LIBSSH2_FXF_CREAT | LIBSSH2_FXF_WRITE
with self._client.open(remote, opt, mode) as rfh:
for chunk in iter(lambda: fobj.read(1024*1024), b""):
rfh.write(chunk)
def _download(self, remote, fobj):
with self._client.open(remote, 0, 0) as rfh:
for size, data in rfh:
fobj.write(data)
def _ls(self, remote):
with self._client.opendir(remote) as rfh:
paths = (p[1].decode('utf-8') for p in rfh.readdir())
return [p for p in paths if p not in ['.', '..']]
def _is_file(self, remote):
return not self._is_dir(remote)
def _is_dir(self, remote):
st = self._client.stat(remote)
return stat.S_ISDIR(st.st_mode)
def _rm(self, remote):
self._client.unlink(remote)
def _rmdir(self, remote):
self._client.rmdir(remote)
def _mkdir(self, remote):
mode = LIBSSH2_SFTP_S_IRUSR | \
LIBSSH2_SFTP_S_IWUSR | \
LIBSSH2_SFTP_S_IXUSR
self._client.mkdir(remote, mode)
def _mv(self, oldremote, newremote):
self._client.rename(oldremote, newremote)
|
quarkslab/irma
|
common/src/ftp/sftpv2.py
|
Python
|
apache-2.0
| 3,911 | 0 |
#/usr/bin/env python
import QtTesting
import QtTestingImage
object1 = 'pqClientMainWindow/MainControlsToolbar/actionOpenData'
QtTesting.playCommand(object1, 'activate', '')
object2 = 'pqClientMainWindow/FileOpenDialog'
QtTesting.playCommand(object2, 'filesSelected', '$PARAVIEW_DATA_ROOT/SPCTH/Dave_Karelitz_Small/spcth_a')
object3 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/ScrollArea/qt_scrollarea_viewport/PanelArea/Editor/CellArrayStatus/1QHeaderView0'
QtTesting.playCommand(object3, 'mousePress', '1,1,0,0,0,0')
QtTesting.playCommand(object3, 'mouseRelease', '1,0,0,0,0,0')
object4 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/Accept'
QtTesting.playCommand(object4, 'activate', '')
object5 = 'pqClientMainWindow/representationToolbar/displayRepresentation/comboBox'
QtTesting.playCommand(object5, 'set_string', 'Surface')
object6 = 'pqClientMainWindow/variableToolbar/displayColor/Variables'
QtTesting.playCommand(object6, 'set_string', 'Pressure (dynes/cm^2^)')
object7 = 'pqClientMainWindow/cameraToolbar/actionPositiveX'
QtTesting.playCommand(object7, 'activate', '')
object8 = 'pqClientMainWindow/menubar/menuFilters/pqProxyGroupMenuManager0/Cut'
QtTesting.playCommand(object8, 'activate', '')
QtTesting.playCommand(object4, 'activate', '')
object9 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/ScrollArea/qt_scrollarea_viewport/PanelArea/Editor/CutFunction/pqImplicitPlaneWidget/show3DWidget'
QtTesting.playCommand(object9, 'set_boolean', 'false')
# DO_IMAGE_COMPARE
snapshotWidget = 'pqClientMainWindow/centralwidget/MultiViewWidget/CoreWidget/qt_tabwidget_stackedwidget/MultiViewWidget1/Frame.0/Viewport'
QtTestingImage.compareImage(snapshotWidget, 'CTHAMRClip.png', 300, 300)
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/Applications/ParaView/Testing/Python/CTHAMRClip.py
|
Python
|
gpl-3.0
| 1,836 | 0.003813 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class PluginrepoConfig(AppConfig):
name = 'pluginrepo'
|
weichen2046/IntellijPluginDevDemo
|
enterprise-repo/enterprepo/pluginrepo/apps.py
|
Python
|
apache-2.0
| 160 | 0 |
from ritoapi.endpoints.match_v3 import MatchV3
import threading
def _load_matches(match_v3, sample_region, sample_match_id, count):
for i in range(count):
data = match_v3.matches(sample_region, sample_match_id)
assert(data['gameId'] == sample_match_id)
def test_matches_stress(sample_api_key, sample_rate_limit, sample_region, sample_match_id):
match_v3 = MatchV3(sample_api_key, sample_rate_limit)
threads = []
for i in range(10):
t = threading.Thread(target=_load_matches, args=(match_v3, sample_region, sample_match_id, 20))
threads.append(t)
t.start()
for t in threads:
t.join()
|
FancyRice/RitoAPI
|
ritoapi/tests/test_stress.py
|
Python
|
mit
| 655 | 0.003053 |
from future import standard_library
standard_library.install_aliases()
from builtins import object
import threading
from time import time
import random
import queue
from ..common import log
class Scheduler(object):
"""
A simple scheduler which schedules the periodic or once event
"""
import sortedcontainers as sc
max_delay_time = 60
def __init__(self):
self._jobs = Scheduler.sc.SortedSet()
self._wakeup_q = queue.Queue()
self._lock = threading.Lock()
self._thr = threading.Thread(target=self._do_jobs)
self._thr.deamon = True
self._started = False
def start(self):
"""
Start the schduler which will start the internal thread for scheduling
jobs. Please do tear_down when doing cleanup
"""
if self._started:
log.logger.info("Scheduler already started.")
return
self._started = True
self._thr.start()
def tear_down(self):
"""
Stop the schduler which will stop the internal thread for scheduling
jobs.
"""
if not self._started:
log.logger.info("Scheduler already tear down.")
return
self._wakeup_q.put(True)
def _do_jobs(self):
while 1:
(sleep_time, jobs) = self.get_ready_jobs()
self._do_execution(jobs)
try:
done = self._wakeup_q.get(timeout=sleep_time)
except queue.Empty:
pass
else:
if done:
break
self._started = False
log.logger.info("Scheduler exited.")
def get_ready_jobs(self):
"""
@return: a 2 element tuple. The first element is the next ready
duration. The second element is ready jobs list
"""
now = time()
ready_jobs = []
sleep_time = 1
with self._lock:
job_set = self._jobs
total_jobs = len(job_set)
for job in job_set:
if job.get_expiration() <= now:
ready_jobs.append(job)
if ready_jobs:
del job_set[:len(ready_jobs)]
for job in ready_jobs:
if job.get_interval() != 0 and not job.stopped():
# repeated job, calculate next due time and enqueue
job.update_expiration()
job_set.add(job)
if job_set:
sleep_time = job_set[0].get_expiration() - now
if sleep_time < 0:
log.logger.warn("Scheduler satuation, sleep_time=%s",
sleep_time)
sleep_time = 0.1
if ready_jobs:
log.logger.info("Get %d ready jobs, next duration is %f, "
"and there are %s jobs scheduling",
len(ready_jobs), sleep_time, total_jobs)
ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True)
return (sleep_time, ready_jobs)
def add_jobs(self, jobs):
with self._lock:
now = time()
job_set = self._jobs
for job in jobs:
delay_time = random.randrange(0, self.max_delay_time)
job.set_initial_due_time(now + delay_time)
job_set.add(job)
self._wakeup()
def update_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
job_set.discard(njob)
job_set.add(njob)
self._wakeup()
def remove_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
njob.stop()
job_set.discard(njob)
self._wakeup()
def number_of_jobs(self):
with self._lock:
return len(self._jobs)
def disable_randomization(self):
self.max_delay_time = 1
def _wakeup(self):
self._wakeup_q.put(None)
def _do_execution(self, jobs):
for job in jobs:
job()
|
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks
|
Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/schedule/scheduler.py
|
Python
|
isc
| 4,153 | 0.001445 |
import unittest
import warnings
from collections import OrderedDict
import numpy as np
import numpy.testing as np_test
from pgmpy.extern.six.moves import range
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.factors.discrete import JointProbabilityDistribution as JPD
from pgmpy.factors import factor_divide
from pgmpy.factors import factor_product
from pgmpy.factors.discrete.CPD import TabularCPD
from pgmpy.independencies import Independencies
from pgmpy.models import BayesianModel
from pgmpy.models import MarkovModel
class TestFactorInit(unittest.TestCase):
def test_class_init(self):
phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
self.assertEqual(phi.variables, ['x1', 'x2', 'x3'])
np_test.assert_array_equal(phi.cardinality, np.array([2, 2, 2]))
np_test.assert_array_equal(phi.values, np.ones(8).reshape(2, 2, 2))
def test_class_init1(self):
phi = DiscreteFactor([1, 2, 3], [2, 3, 2], np.arange(12))
self.assertEqual(phi.variables, [1, 2, 3])
np_test.assert_array_equal(phi.cardinality, np.array([2, 3, 2]))
np_test.assert_array_equal(phi.values, np.arange(12).reshape(2, 3, 2))
def test_class_init_sizeerror(self):
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(9))
def test_class_init_typeerror(self):
self.assertRaises(TypeError, DiscreteFactor, 'x1', [3], [1, 2, 3])
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x1', 'x3'], [2, 3, 2], range(12))
def test_init_size_var_card_not_equal(self):
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x2'], [2], np.ones(2))
class TestFactorMethods(unittest.TestCase):
def setUp(self):
self.phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.random.uniform(5, 10, size=8))
self.phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
self.phi2 = DiscreteFactor([('x1', 0), ('x2', 0), ('x3', 0)], [2, 3, 2], range(12))
# This larger factor (phi3) caused a bug in reduce
card3 = [3, 3, 3, 2, 2, 2, 2, 2, 2]
self.phi3 = DiscreteFactor(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],
card3, np.arange(np.prod(card3), dtype=np.float))
self.tup1 = ('x1', 'x2')
self.tup2 = ('x2', 'x3')
self.tup3 = ('x3', (1, 'x4'))
self.phi4 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 3, 4], np.random.uniform(3, 10, size=24))
self.phi5 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 3, 4], range(24))
self.card6 = [4, 2, 1, 3, 5, 6]
self.phi6 = DiscreteFactor([self.tup1, self.tup2, self.tup3, self.tup1 + self.tup2,
self.tup2 + self.tup3, self.tup3 + self.tup1], self.card6,
np.arange(np.prod(self.card6), dtype=np.float))
self.var1 = 'x1'
self.var2 = ('x2', 1)
self.var3 = frozenset(['x1', 'x2'])
self.phi7 = DiscreteFactor([self.var1, self.var2], [3, 2], [3, 2, 4, 5, 9, 8])
self.phi8 = DiscreteFactor([self.var2, self.var3], [2, 2], [2, 1, 5, 6])
self.phi9 = DiscreteFactor([self.var1, self.var3], [3, 2], [3, 2, 4, 5, 9, 8])
self.phi10 = DiscreteFactor([self.var3], [2], [3, 6])
def test_scope(self):
self.assertListEqual(self.phi.scope(), ['x1', 'x2', 'x3'])
self.assertListEqual(self.phi1.scope(), ['x1', 'x2', 'x3'])
self.assertListEqual(self.phi4.scope(), [self.tup1, self.tup2, self.tup3])
def test_assignment(self):
self.assertListEqual(self.phi.assignment([0]), [[('x1', 0), ('x2', 0), ('x3', 0)]])
self.assertListEqual(self.phi.assignment([4, 5, 6]), [[('x1', 1), ('x2', 0), ('x3', 0)],
[('x1', 1), ('x2', 0), ('x3', 1)],
[('x1', 1), ('x2', 1), ('x3', 0)]])
self.assertListEqual(self.phi1.assignment(np.array([4, 5, 6])), [[('x1', 0), ('x2', 2), ('x3', 0)],
[('x1', 0), ('x2', 2), ('x3', 1)],
[('x1', 1), ('x2', 0), ('x3', 0)]])
self.assertListEqual(self.phi4.assignment(np.array([11, 12, 23])),
[[(self.tup1, 0), (self.tup2, 2), (self.tup3, 3)],
[(self.tup1, 1), (self.tup2, 0), (self.tup3, 0)],
[(self.tup1, 1), (self.tup2, 2), (self.tup3, 3)]])
def test_assignment_indexerror(self):
self.assertRaises(IndexError, self.phi.assignment, [10])
self.assertRaises(IndexError, self.phi.assignment, [1, 3, 10, 5])
self.assertRaises(IndexError, self.phi.assignment, np.array([1, 3, 10, 5]))
self.assertRaises(IndexError, self.phi4.assignment, [2, 24])
self.assertRaises(IndexError, self.phi4.assignment, np.array([24, 2, 4, 30]))
def test_get_cardinality(self):
self.assertEqual(self.phi.get_cardinality(['x1']), {'x1': 2})
self.assertEqual(self.phi.get_cardinality(['x2']), {'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x3']), {'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2']), {'x1': 2, 'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x3']), {'x1': 2, 'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 2, 'x3': 2})
self.assertEqual(self.phi4.get_cardinality([self.tup1, self.tup3]),
{self.tup1: 2, self.tup3: 4})
def test_get_cardinality_scopeerror(self):
self.assertRaises(ValueError, self.phi.get_cardinality, ['x4'])
self.assertRaises(ValueError, self.phi4.get_cardinality, [('x1', 'x4')])
self.assertRaises(ValueError, self.phi4.get_cardinality, [('x3', (2, 'x4'))])
def test_get_cardinality_typeerror(self):
self.assertRaises(TypeError, self.phi.get_cardinality, 'x1')
def test_marginalize(self):
self.phi1.marginalize(['x1'])
np_test.assert_array_equal(self.phi1.values, np.array([[6, 8],
[10, 12],
[14, 16]]))
self.phi1.marginalize(['x2'])
np_test.assert_array_equal(self.phi1.values, np.array([30, 36]))
self.phi1.marginalize(['x3'])
np_test.assert_array_equal(self.phi1.values, np.array(66))
self.phi5.marginalize([self.tup1])
np_test.assert_array_equal(self.phi5.values, np.array([[12, 14, 16, 18],
[20, 22, 24, 26],
[28, 30, 32, 34]]))
self.phi5.marginalize([self.tup2])
np_test.assert_array_equal(self.phi5.values, np.array([60, 66, 72, 78]))
self.phi5.marginalize([self.tup3])
np_test.assert_array_equal(self.phi5.values, np.array([276]))
def test_marginalize_scopeerror(self):
self.assertRaises(ValueError, self.phi.marginalize, ['x4'])
self.phi.marginalize(['x1'])
self.assertRaises(ValueError, self.phi.marginalize, ['x1'])
self.assertRaises(ValueError, self.phi4.marginalize, [('x1', 'x3')])
self.phi4.marginalize([self.tup2])
self.assertRaises(ValueError, self.phi4.marginalize, [self.tup2])
def test_marginalize_typeerror(self):
self.assertRaises(TypeError, self.phi.marginalize, 'x1')
def test_marginalize_shape(self):
values = ['A', 'D', 'F', 'H']
phi3_mar = self.phi3.marginalize(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_mar.values.shape, phi3_mar.cardinality)
phi6_mar = self.phi6.marginalize([self.tup1, self.tup2], inplace=False)
np_test.assert_array_equal(phi6_mar.values.shape, phi6_mar.cardinality)
self.phi6.marginalize([self.tup1, self.tup3 + self.tup1], inplace=True)
np_test.assert_array_equal(self.phi6.values.shape, self.phi6.cardinality)
def test_normalize(self):
self.phi1.normalize()
np_test.assert_almost_equal(self.phi1.values,
np.array([[[0, 0.01515152],
[0.03030303, 0.04545455],
[0.06060606, 0.07575758]],
[[0.09090909, 0.10606061],
[0.12121212, 0.13636364],
[0.15151515, 0.16666667]]]))
self.phi5.normalize()
np_test.assert_almost_equal(self.phi5.values,
[[[0., 0.00362319, 0.00724638, 0.01086957],
[0.01449275, 0.01811594, 0.02173913, 0.02536232],
[0.02898551, 0.0326087, 0.03623188, 0.03985507]],
[[0.04347826, 0.04710145, 0.05072464, 0.05434783],
[0.05797101, 0.0615942, 0.06521739, 0.06884058],
[0.07246377, 0.07608696, 0.07971014, 0.08333333]]])
def test_reduce(self):
self.phi1.reduce([('x1', 0), ('x2', 0)])
np_test.assert_array_equal(self.phi1.values, np.array([0, 1]))
self.phi5.reduce([(self.tup1, 0), (self.tup3, 1)])
np_test.assert_array_equal(self.phi5.values, np.array([1, 5, 9]))
def test_reduce1(self):
self.phi1.reduce([('x2', 0), ('x1', 0)])
np_test.assert_array_equal(self.phi1.values, np.array([0, 1]))
self.phi5.reduce([(self.tup3, 1), (self.tup1, 0)])
np_test.assert_array_equal(self.phi5.values, np.array([1, 5, 9]))
def test_reduce_shape(self):
values = [('A', 0), ('D', 0), ('F', 0), ('H', 1)]
phi3_reduced = self.phi3.reduce(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_reduced.values.shape, phi3_reduced.cardinality)
values = [(self.tup1, 2), (self.tup3, 0)]
phi6_reduced = self.phi6.reduce(values, inplace=False)
np_test.assert_array_equal(phi6_reduced.values.shape, phi6_reduced.cardinality)
self.phi6.reduce(values, inplace=True)
np_test.assert_array_equal(self.phi6.values.shape, self.phi6.cardinality)
def test_complete_reduce(self):
self.phi1.reduce([('x1', 0), ('x2', 0), ('x3', 1)])
np_test.assert_array_equal(self.phi1.values, np.array([1]))
np_test.assert_array_equal(self.phi1.cardinality, np.array([]))
np_test.assert_array_equal(self.phi1.variables, OrderedDict())
self.phi5.reduce([(('x1', 'x2'), 1), (('x2', 'x3'), 0), (('x3', (1, 'x4')), 3)])
np_test.assert_array_equal(self.phi5.values, np.array([15]))
np_test.assert_array_equal(self.phi5.cardinality, np.array([]))
np_test.assert_array_equal(self.phi5.variables, OrderedDict())
def test_reduce_typeerror(self):
self.assertRaises(TypeError, self.phi1.reduce, 'x10')
self.assertRaises(TypeError, self.phi1.reduce, ['x10'])
self.assertRaises(TypeError, self.phi1.reduce, [('x1', 'x2')])
self.assertRaises(TypeError, self.phi1.reduce, [(0, 'x1')])
self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 'x1')])
self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 0.1)])
self.assertRaises(TypeError, self.phi1.reduce, [('x1', 0.1)])
self.assertRaises(TypeError, self.phi5.reduce, [(('x1', 'x2'), 0), (('x2', 'x3'), 0.2)])
def test_reduce_scopeerror(self):
self.assertRaises(ValueError, self.phi1.reduce, [('x4', 1)])
self.assertRaises(ValueError, self.phi5.reduce, [((('x1', 0.1), 0))])
def test_reduce_sizeerror(self):
self.assertRaises(IndexError, self.phi1.reduce, [('x3', 5)])
self.assertRaises(IndexError, self.phi5.reduce, [(('x2', 'x3'), 3)])
def test_identity_factor(self):
identity_factor = self.phi.identity_factor()
self.assertEqual(list(identity_factor.variables), ['x1', 'x2', 'x3'])
np_test.assert_array_equal(identity_factor.cardinality, [2, 2, 2])
np_test.assert_array_equal(identity_factor.values, np.ones(8).reshape(2, 2, 2))
identity_factor1 = self.phi5.identity_factor()
self.assertEqual(list(identity_factor1.variables), [self.tup1, self.tup2, self.tup3])
np_test.assert_array_equal(identity_factor1.cardinality, [2, 3, 4])
np_test.assert_array_equal(identity_factor1.values, np.ones(24).reshape(2, 3, 4))
def test_factor_product(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = factor_product(phi, phi1)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3', 'x4'], [2, 2, 2, 2],
[0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3', 'x4'])
phi = DiscreteFactor(['x1', 'x2'], [3, 2], range(6))
phi1 = DiscreteFactor(['x2', 'x3'], [2, 2], range(4))
prod = factor_product(phi, phi1)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2],
[0, 0, 2, 3, 0, 2, 6, 9, 0, 4, 10, 15])
self.assertEqual(prod, expected_factor)
self.assertEqual(prod.variables, expected_factor.variables)
prod = factor_product(self.phi7, self.phi8)
expected_factor = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2],
[6, 3, 10, 12, 8, 4, 25, 30, 18, 9, 40, 48])
self.assertEqual(prod, expected_factor)
self.assertEqual(prod.variables, expected_factor.variables)
def test_product(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = phi.product(phi1, inplace=False)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3', 'x4'], [2, 2, 2, 2],
[0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3', 'x4'])
phi = DiscreteFactor(['x1', 'x2'], [3, 2], range(6))
phi1 = DiscreteFactor(['x2', 'x3'], [2, 2], range(4))
prod = phi.product(phi1, inplace=False)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2],
[0, 0, 2, 3, 0, 2, 6, 9, 0, 4, 10, 15])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3'])
phi7_copy = self.phi7
phi7_copy.product(self.phi8, inplace=True)
expected_factor = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2],
[6, 3, 10, 12, 8, 4, 25, 30, 18, 9, 40, 48])
self.assertEqual(expected_factor, phi7_copy)
self.assertEqual(phi7_copy.variables, [self.var1, self.var2, self.var3])
def test_factor_product_non_factor_arg(self):
self.assertRaises(TypeError, factor_product, 1, 2)
def test_factor_mul(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = phi * phi1
sorted_vars = ['x1', 'x2', 'x3', 'x4']
for axis in range(prod.values.ndim):
exchange_index = prod.variables.index(sorted_vars[axis])
prod.variables[axis], prod.variables[exchange_index] = prod.variables[exchange_index], prod.variables[axis]
prod.values = prod.values.swapaxes(axis, exchange_index)
np_test.assert_almost_equal(prod.values.ravel(),
np.array([0, 0, 0, 0, 0, 1, 2, 3,
0, 2, 4, 6, 0, 3, 6, 9]))
self.assertEqual(prod.variables, ['x1', 'x2', 'x3', 'x4'])
def test_factor_divide(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 2, 4])
phi2 = DiscreteFactor(['x1'], [2], [1, 2])
expected_factor = phi1.divide(phi2, inplace=False)
phi3 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 1, 2])
self.assertEqual(phi3, expected_factor)
self.phi9.divide(self.phi10, inplace=True)
np_test.assert_array_almost_equal(self.phi9.values, np.array([1.000000, 0.333333, 1.333333,
0.833333, 3.000000, 1.333333]).reshape(3, 2))
self.assertEqual(self.phi9.variables, [self.var1, self.var3])
def test_factor_divide_truediv(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 2, 4])
phi2 = DiscreteFactor(['x1'], [2], [1, 2])
div = phi1 / phi2
phi3 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 1, 2])
self.assertEqual(phi3, div)
self.phi9 = self.phi9 / self.phi10
np_test.assert_array_almost_equal(self.phi9.values, np.array([1.000000, 0.333333, 1.333333,
0.833333, 3.000000, 1.333333]).reshape(3, 2))
self.assertEqual(self.phi9.variables, [self.var1, self.var3])
def test_factor_divide_invalid(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x1'], [2], [0, 2])
div = phi1.divide(phi2, inplace=False)
np_test.assert_array_equal(div.values.ravel(), np.array([np.inf, np.inf, 1.5, 2]))
def test_factor_divide_no_common_scope(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x3'], [2], [0, 2])
self.assertRaises(ValueError, factor_divide, phi1, phi2)
phi2 = DiscreteFactor([self.var3], [2], [2, 1])
self.assertRaises(ValueError, factor_divide, self.phi7, phi2)
def test_factor_divide_non_factor_arg(self):
self.assertRaises(TypeError, factor_divide, 1, 1)
def test_eq(self):
self.assertFalse(self.phi == self.phi1)
self.assertTrue(self.phi == self.phi)
self.assertTrue(self.phi1 == self.phi1)
self.assertTrue(self.phi5 == self.phi5)
self.assertFalse(self.phi5 == self.phi6)
self.assertTrue(self.phi6 == self.phi6)
def test_eq1(self):
phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 4, 3], range(24))
phi2 = DiscreteFactor(['x2', 'x1', 'x3'], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17, 6, 7,
8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertTrue(phi1 == phi2)
self.assertEqual(phi2.variables, ['x2', 'x1', 'x3'])
phi3 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 4, 3], range(24))
phi4 = DiscreteFactor([self.tup2, self.tup1, self.tup3], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17,
6, 7, 8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertTrue(phi3 == phi4)
def test_hash(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x2', 'x1'], [2, 2], [1, 3, 2, 4])
self.assertEqual(hash(phi1), hash(phi2))
phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], range(8))
phi2 = DiscreteFactor(['x3', 'x1', 'x2'], [2, 2, 2], [0, 2, 4, 6, 1, 3, 5, 7])
self.assertEqual(hash(phi1), hash(phi2))
var1 = TestHash(1, 2)
phi3 = DiscreteFactor([var1, self.var2, self.var3], [2, 4, 3], range(24))
phi4 = DiscreteFactor([self.var2, var1, self.var3], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17,
6, 7, 8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertEqual(hash(phi3), hash(phi4))
var1 = TestHash(2, 3)
var2 = TestHash('x2', 1)
phi3 = DiscreteFactor([var1, var2, self.var3], [2, 2, 2], range(8))
phi4 = DiscreteFactor([self.var3, var1, var2], [2, 2, 2], [0, 2, 4, 6, 1, 3, 5, 7])
self.assertEqual(hash(phi3), hash(phi4))
def test_maximize_single(self):
self.phi1.maximize(['x1'])
self.assertEqual(self.phi1, DiscreteFactor(['x2', 'x3'], [3, 2], [6, 7, 8, 9, 10, 11]))
self.phi1.maximize(['x2'])
self.assertEqual(self.phi1, DiscreteFactor(['x3'], [2], [10, 11]))
self.phi2 = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2], [0.25, 0.35, 0.08, 0.16, 0.05, 0.07,
0.00, 0.00, 0.15, 0.21, 0.08, 0.18])
self.phi2.maximize(['x2'])
self.assertEqual(self.phi2, DiscreteFactor(['x1', 'x3'], [3, 2], [0.25, 0.35, 0.05,
0.07, 0.15, 0.21]))
self.phi5.maximize([('x1', 'x2')])
self.assertEqual(self.phi5, DiscreteFactor([('x2', 'x3'), ('x3', (1, 'x4'))], [3, 4],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]))
self.phi5.maximize([('x2', 'x3')])
self.assertEqual(self.phi5, DiscreteFactor([('x3', (1, 'x4'))], [4], [20, 21, 22, 23]))
def test_maximize_list(self):
self.phi1.maximize(['x1', 'x2'])
self.assertEqual(self.phi1, DiscreteFactor(['x3'], [2], [10, 11]))
self.phi5.maximize([('x1', 'x2'), ('x2', 'x3')])
self.assertEqual(self.phi5, DiscreteFactor([('x3', (1, 'x4'))], [4], [20, 21, 22, 23]))
def test_maximize_shape(self):
values = ['A', 'D', 'F', 'H']
phi3_max = self.phi3.maximize(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_max.values.shape, phi3_max.cardinality)
phi = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2], [3, 2, 4, 5, 9, 8, 3, 2, 4, 5, 9, 8])
phi_max = phi.marginalize([self.var1, self.var2], inplace=False)
np_test.assert_array_equal(phi_max.values.shape, phi_max.cardinality)
def test_maximize_scopeerror(self):
self.assertRaises(ValueError, self.phi.maximize, ['x10'])
def test_maximize_typeerror(self):
self.assertRaises(TypeError, self.phi.maximize, 'x1')
def tearDown(self):
del self.phi
del self.phi1
del self.phi2
del self.phi3
del self.phi4
del self.phi5
del self.phi6
del self.phi7
del self.phi8
del self.phi9
del self.phi10
class TestHash:
# Used to check the hash function of DiscreteFactor class.
def __init__(self, x, y):
self.x = x
self.y = y
def __hash__(self):
return hash(str(self.x) + str(self.y))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.x == other.x and self.y == other.y
class TestTabularCPDInit(unittest.TestCase):
def test_cpd_init(self):
cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1]])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
self.assertEqual(list(cpd.variables), ['grade'])
np_test.assert_array_equal(cpd.cardinality, np.array([3]))
np_test.assert_array_almost_equal(cpd.values, np.array([0.1, 0.1, 0.1]))
values = [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]
evidence = ['intel', 'diff']
evidence_card = [3, 2]
valid_value_inputs = [values, np.asarray(values)]
valid_evidence_inputs = [evidence, set(evidence), np.asarray(evidence)]
valid_evidence_card_inputs = [evidence_card, np.asarray(evidence_card)]
for value in valid_value_inputs:
for evidence in valid_evidence_inputs:
for evidence_card in valid_evidence_card_inputs:
cpd = TabularCPD('grade', 3, values, evidence=['intel', 'diff'], evidence_card=[3, 2])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
np_test.assert_array_equal(cpd.cardinality, np.array([3, 3, 2]))
self.assertListEqual(list(cpd.variables), ['grade', 'intel', 'diff'])
np_test.assert_array_equal(cpd.values, np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.8, 0.8, 0.8, 0.8, 0.8, 0.8]).reshape(3, 3, 2))
cpd = TabularCPD('grade', 3, [[0.1, 0.1],
[0.1, 0.1],
[0.8, 0.8]],
evidence=['evi1'], evidence_card=[2.0])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
np_test.assert_array_equal(cpd.cardinality, np.array([3, 2]))
self.assertListEqual(list(cpd.variables), ['grade', 'evi1'])
np_test.assert_array_equal(cpd.values, np.array([0.1, 0.1,
0.1, 0.1,
0.8, 0.8]).reshape(3, 2))
def test_cpd_init_event_card_not_int(self):
self.assertRaises(TypeError, TabularCPD, 'event', '2', [[0.1, 0.9]])
def test_cpd_init_cardinality_not_specified(self):
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1', 'evi2'], [5])
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1', 'evi2'], [5.0])
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1'], [5, 6])
self.assertRaises(TypeError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
'evi1', [5, 6])
def test_cpd_init_value_not_2d(self):
self.assertRaises(TypeError, TabularCPD, 'event', 3, [[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]],
['evi1', 'evi2'], [5, 6])
class TestTabularCPDMethods(unittest.TestCase):
def setUp(self):
self.cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['intel', 'diff'], evidence_card=[3, 2])
self.cpd2 = TabularCPD('J', 2, [[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]],
evidence=['A', 'B', 'C'], evidence_card=[2, 2, 2])
def test_marginalize_1(self):
self.cpd.marginalize(['diff'])
self.assertEqual(self.cpd.variable, 'grade')
self.assertEqual(self.cpd.variable_card, 3)
self.assertListEqual(list(self.cpd.variables), ['grade', 'intel'])
np_test.assert_array_equal(self.cpd.cardinality, np.array([3, 3]))
np_test.assert_array_equal(self.cpd.values.ravel(), np.array([0.1, 0.1, 0.1,
0.1, 0.1, 0.1,
0.8, 0.8, 0.8]))
def test_marginalize_2(self):
self.assertRaises(ValueError, self.cpd.marginalize, ['grade'])
def test_marginalize_3(self):
copy_cpd = self.cpd.copy()
copy_cpd.marginalize(['intel', 'diff'])
self.cpd.marginalize(['intel'])
self.cpd.marginalize(['diff'])
np_test.assert_array_almost_equal(self.cpd.values, copy_cpd.values)
def test_normalize(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
cpd_un_normalized.normalize()
np_test.assert_array_almost_equal(cpd_un_normalized.values, np.array([[[0.63636364, 0.33333333],
[0.6, 0.2]],
[[0.36363636, 0.66666667],
[0.4, 0.8]]]))
def test_normalize_not_in_place(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
np_test.assert_array_almost_equal(cpd_un_normalized.normalize(inplace=False).values,
np.array([[[0.63636364, 0.33333333],
[0.6, 0.2]],
[[0.36363636, 0.66666667],
[0.4, 0.8]]]))
def test_normalize_original_safe(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
cpd_un_normalized.normalize(inplace=False)
np_test.assert_array_almost_equal(cpd_un_normalized.values, np.array([[[0.7, 0.2], [0.6, 0.2]],
[[0.4, 0.4], [0.4, 0.8]]]))
def test__repr__(self):
grade_cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['intel', 'diff'], evidence_card=[3, 2])
intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
diff_cpd = TabularCPD('grade', 3, [[0.1, 0.1], [0.1, 0.1], [0.8, 0.8]], evidence=['diff'], evidence_card=[2])
self.assertEqual(repr(grade_cpd), '<TabularCPD representing P(grade:3 | intel:3, diff:2) at {address}>'
.format(address=hex(id(grade_cpd))))
self.assertEqual(repr(intel_cpd), '<TabularCPD representing P(intel:3) at {address}>'
.format(address=hex(id(intel_cpd))))
self.assertEqual(repr(diff_cpd), '<TabularCPD representing P(grade:3 | diff:2) at {address}>'
.format(address=hex(id(diff_cpd))))
def test_copy(self):
copy_cpd = self.cpd.copy()
np_test.assert_array_equal(self.cpd.get_values(), copy_cpd.get_values())
def test_copy_original_safe(self):
copy_cpd = self.cpd.copy()
copy_cpd.reorder_parents(['diff', 'intel'])
np_test.assert_array_equal(self.cpd.get_values(),
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]))
def test_reduce_1(self):
self.cpd.reduce([('diff', 0)])
np_test.assert_array_equal(self.cpd.get_values(), np.array([[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.8, 0.8, 0.8]]))
def test_reduce_2(self):
self.cpd.reduce([('intel', 0)])
np_test.assert_array_equal(self.cpd.get_values(), np.array([[0.1, 0.1],
[0.1, 0.1],
[0.8, 0.8]]))
def test_reduce_3(self):
self.cpd.reduce([('intel', 0), ('diff', 0)])
np_test.assert_array_equal(self.cpd.get_values(), np.array([[0.1],
[0.1],
[0.8]]))
def test_reduce_4(self):
self.assertRaises(ValueError, self.cpd.reduce, [('grade', 0)])
def test_reduce_5(self):
copy_cpd = self.cpd.copy()
copy_cpd.reduce([('intel', 2), ('diff', 1)])
self.cpd.reduce([('intel', 2)])
self.cpd.reduce([('diff', 1)])
np_test.assert_array_almost_equal(self.cpd.values, copy_cpd.values)
def test_get_values(self):
np_test.assert_array_equal(self.cpd.get_values(),
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]))
def test_reorder_parents_inplace(self):
new_vals = self.cpd2.reorder_parents(['B', 'A', 'C'])
np_test.assert_array_equal(new_vals, np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
np_test.assert_array_equal(self.cpd2.get_values(),
np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
def test_reorder_parents(self):
new_vals = self.cpd2.reorder_parents(['B', 'A', 'C'])
np_test.assert_array_equal(new_vals, np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
def test_reorder_parents_no_effect(self):
self.cpd2.reorder_parents(['C', 'A', 'B'], inplace=False)
np_test.assert_array_equal(self.cpd2.get_values(),
np.array([[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]]))
def test_reorder_parents_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.cpd2.reorder_parents(['A', 'B', 'C'], inplace=False)
assert("Same ordering provided as current" in str(w[-1].message))
np_test.assert_array_equal(self.cpd2.get_values(),
np.array([[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]]))
def tearDown(self):
del self.cpd
class TestJointProbabilityDistributionInit(unittest.TestCase):
def test_jpd_init(self):
jpd = JPD(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12) / 12)
np_test.assert_array_equal(jpd.cardinality, np.array([2, 3, 2]))
np_test.assert_array_equal(jpd.values, np.ones(12).reshape(2, 3, 2) / 12)
self.assertEqual(jpd.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 3, 'x3': 2})
def test_jpd_init_exception(self):
self.assertRaises(ValueError, JPD, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
class TestJointProbabilityDistributionMethods(unittest.TestCase):
def setUp(self):
self.jpd = JPD(['x1', 'x2', 'x3'], [2, 3, 2], values=np.ones(12) / 12)
self.jpd1 = JPD(['x1', 'x2', 'x3'], [2, 3, 2], values=np.ones(12) / 12)
self.jpd2 = JPD(['x1', 'x2', 'x3'], [2, 2, 3],
[0.126, 0.168, 0.126, 0.009, 0.045, 0.126, 0.252, 0.0224, 0.0056, 0.06, 0.036, 0.024])
self.jpd3 = JPD(['x1', 'x2', 'x3'], [2, 2, 2],
[5.0e-04, 5.225e-04, 0.00, 8.9775e-03, 9.9e-03, 5.39055e-02, 0.00, 9.261945e-01])
def test_jpd_marginal_distribution_list(self):
self.jpd.marginal_distribution(['x1', 'x2'])
np_test.assert_array_almost_equal(self.jpd.values,
np.array([[0.16666667, 0.16666667, 0.16666667],
[0.16666667, 0.16666667, 0.16666667]]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2, 3]))
dic = {'x1': 2, 'x2': 3}
self.assertEqual(self.jpd.get_cardinality(['x1', 'x2']), dic)
self.assertEqual(self.jpd.scope(), ['x1', 'x2'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.marginal_distribution(['x1', 'x2'], inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(new_jpd == self.jpd)
def test_marginal_distribution_str(self):
self.jpd.marginal_distribution('x1')
np_test.assert_array_almost_equal(self.jpd.values, np.array([0.5, 0.5]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2]))
self.assertEqual(self.jpd.scope(), ['x1'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.marginal_distribution('x1', inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(self.jpd == new_jpd)
def test_conditional_distribution_list(self):
self.jpd = self.jpd1.copy()
self.jpd.conditional_distribution([('x1', 1), ('x2', 0)])
np_test.assert_array_almost_equal(self.jpd.values, np.array([0.5, 0.5]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2]))
self.assertEqual(self.jpd.scope(), ['x3'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.conditional_distribution([('x1', 1), ('x2', 0)], inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(self.jpd == new_jpd)
def test_check_independence(self):
self.assertTrue(self.jpd2.check_independence(['x1'], ['x2']))
self.assertRaises(TypeError, self.jpd2.check_independence, 'x1', ['x2'])
self.assertRaises(TypeError, self.jpd2.check_independence, ['x1'], 'x2')
self.assertRaises(TypeError, self.jpd2.check_independence, ['x1'], ['x2'], 'x3')
self.assertFalse(self.jpd2.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
self.assertFalse(self.jpd2.check_independence(['x1'], ['x2'], [('x3', 0)]))
self.assertTrue(self.jpd1.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
self.assertTrue(self.jpd1.check_independence(['x1'], ['x2'], [('x3', 1)]))
self.assertTrue(self.jpd3.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
def test_get_independencies(self):
independencies = Independencies(['x1', 'x2'], ['x2', 'x3'], ['x3', 'x1'])
independencies1 = Independencies(['x1', 'x2'])
self.assertEqual(self.jpd1.get_independencies(), independencies)
self.assertEqual(self.jpd2.get_independencies(), independencies1)
self.assertEqual(self.jpd1.get_independencies([('x3', 0)]), independencies1)
self.assertEqual(self.jpd2.get_independencies([('x3', 0)]), Independencies())
def test_minimal_imap(self):
bm = self.jpd1.minimal_imap(order=['x1', 'x2', 'x3'])
self.assertEqual(sorted(bm.edges()), sorted([('x1', 'x3'), ('x2', 'x3')]))
bm = self.jpd1.minimal_imap(order=['x2', 'x3', 'x1'])
self.assertEqual(sorted(bm.edges()), sorted([('x2', 'x1'), ('x3', 'x1')]))
bm = self.jpd2.minimal_imap(order=['x1', 'x2', 'x3'])
self.assertEqual(list(bm.edges()), [])
bm = self.jpd2.minimal_imap(order=['x1', 'x2'])
self.assertEqual(list(bm.edges()), [])
def test_repr(self):
self.assertEqual(repr(self.jpd1), '<Joint Distribution representing P(x1:2, x2:3, x3:2) at {address}>'.format(
address=hex(id(self.jpd1))))
def test_is_imap(self):
G1 = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
grade_cpd = TabularCPD('grade', 3,
[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['diff', 'intel'],
evidence_card=[2, 3])
G1.add_cpds(diff_cpd, intel_cpd, grade_cpd)
val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
jpd = JPD(['diff', 'intel', 'grade'], [2, 3, 3], val)
self.assertTrue(jpd.is_imap(G1))
self.assertRaises(TypeError, jpd.is_imap, MarkovModel())
def tearDown(self):
del self.jpd
del self.jpd1
del self.jpd2
del self.jpd3
#
# class TestTreeCPDInit(unittest.TestCase):
# def test_init_single_variable_nodes(self):
# tree = TreeCPD([('B', DiscreteFactor(['A'], [2], [0.8, 0.2]), 0),
# ('B', 'C', 1),
# ('C', DiscreteFactor(['A'], [2], [0.1, 0.9]), 0),
# ('C', 'D', 1),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), 0),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), 1)])
#
# self.assertTrue('B' in tree.nodes())
# self.assertTrue('C' in tree.nodes())
# self.assertTrue('D' in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.1, 0.9]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.4, 0.6]) in tree.nodes())
#
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.1, 0.9]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.4, 0.6]) in tree.edges()))
# self.assertTrue(('C', 'D') in tree.edges())
# self.assertTrue(('B', 'C') in tree.edges())
#
# self.assertEqual(tree['B'][DiscreteFactor(['A'], [2], [0.8, 0.2])]['label'], 0)
# self.assertEqual(tree['B']['C']['label'], 1)
# self.assertEqual(tree['C'][DiscreteFactor(['A'], [2], [0.1, 0.9])]['label'], 0)
# self.assertEqual(tree['C']['D']['label'], 1)
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.9, 0.1])]['label'], 0)
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.4, 0.6])]['label'], 1)
#
# self.assertRaises(ValueError, tree.add_edges_from, [('F', 'G')])
#
# def test_init_self_loop(self):
# self.assertRaises(ValueError, TreeCPD, [('B', 'B', 0)])
#
# def test_init_cycle(self):
# self.assertRaises(ValueError, TreeCPD, [('A', 'B', 0), ('B', 'C', 1), ('C', 'A', 0)])
#
# def test_init_multi_variable_nodes(self):
# tree = TreeCPD([(('B', 'C'), DiscreteFactor(['A'], [2], [0.8, 0.2]), (0, 0)),
# (('B', 'C'), 'D', (0, 1)),
# (('B', 'C'), DiscreteFactor(['A'], [2], [0.1, 0.9]), (1, 0)),
# (('B', 'C'), 'E', (1, 1)),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), 0),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), 1),
# ('E', DiscreteFactor(['A'], [2], [0.3, 0.7]), 0),
# ('E', DiscreteFactor(['A'], [2], [0.8, 0.2]), 1)
# ])
#
# self.assertTrue(('B', 'C') in tree.nodes())
# self.assertTrue('D' in tree.nodes())
# self.assertTrue('E' in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.nodes())
#
# self.assertTrue((('B', 'C'), DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.edges()))
# self.assertTrue((('B', 'C'), 'E') in tree.edges())
# self.assertTrue(('D', DiscreteFactor(['A'], [2], [0.4, 0.6])) in tree.edges())
# self.assertTrue(('E', DiscreteFactor(['A'], [2], [0.8, 0.2])) in tree.edges())
#
# self.assertEqual(tree[('B', 'C')][DiscreteFactor(['A'], [2], [0.8, 0.2])]['label'], (0, 0))
# self.assertEqual(tree[('B', 'C')]['D']['label'], (0, 1))
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.9, 0.1])]['label'], 0)
# self.assertEqual(tree['E'][DiscreteFactor(['A'], [2], [0.3, 0.7])]['label'], 0)
#
#
# class TestTreeCPD(unittest.TestCase):
# def setUp(self):
# self.tree1 = TreeCPD([('B', DiscreteFactor(['A'], [2], [0.8, 0.2]), '0'),
# ('B', 'C', '1'),
# ('C', DiscreteFactor(['A'], [2], [0.1, 0.9]), '0'),
# ('C', 'D', '1'),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), '0'),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), '1')])
#
# self.tree2 = TreeCPD([('C','A','0'),('C','B','1'),
# ('A', DiscreteFactor(['J'], [2], [0.9, 0.1]), '0'),
# ('A', DiscreteFactor(['J'], [2], [0.3, 0.7]), '1'),
# ('B', DiscreteFactor(['J'], [2], [0.8, 0.2]), '0'),
# ('B', DiscreteFactor(['J'], [2], [0.4, 0.6]), '1')])
#
# def test_add_edge(self):
# self.tree1.add_edge('yolo', 'yo', 0)
# self.assertTrue('yolo' in self.tree1.nodes() and 'yo' in self.tree1.nodes())
# self.assertTrue(('yolo', 'yo') in self.tree1.edges())
# self.assertEqual(self.tree1['yolo']['yo']['label'], 0)
#
# def test_add_edges_from(self):
# self.tree1.add_edges_from([('yolo', 'yo', 0), ('hello', 'world', 1)])
# self.assertTrue('yolo' in self.tree1.nodes() and 'yo' in self.tree1.nodes() and
# 'hello' in self.tree1.nodes() and 'world' in self.tree1.nodes())
# self.assertTrue(('yolo', 'yo') in self.tree1.edges())
# self.assertTrue(('hello', 'world') in self.tree1.edges())
# self.assertEqual(self.tree1['yolo']['yo']['label'], 0)
# self.assertEqual(self.tree1['hello']['world']['label'], 1)
#
# def test_to_tabular_cpd(self):
# tabular_cpd = self.tree1.to_tabular_cpd()
# self.assertEqual(tabular_cpd.evidence, ['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['A', 'B', 'C', 'D'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([0.8, 0.8, 0.8, 0.8, 0.1, 0.1, 0.9, 0.4,
# 0.2, 0.2, 0.2, 0.2, 0.9, 0.9, 0.1, 0.6]))
#
# tabular_cpd = self.tree2.to_tabular_cpd()
# self.assertEqual(tabular_cpd.evidence, ['A', 'B', 'C'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['J', 'C', 'B', 'A'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([ 0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4,
# 0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]))
#
# @unittest.skip('Not implemented yet')
# def test_to_tabular_cpd_parent_order(self):
# tabular_cpd = self.tree1.to_tabular_cpd('A', parents_order=['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence, ['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['A', 'D', 'C', 'B'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([0.8, 0.1, 0.8, 0.9, 0.8, 0.1, 0.8, 0.4,
# 0.2, 0.9, 0.2, 0.1, 0.2, 0.9, 0.2, 0.6]))
#
# tabular_cpd = self.tree2.to_tabular_cpd('A', parents_order=['E', 'D', 'C', 'B'])
#
# @unittest.skip('Not implemented yet')
# def test_to_rule_cpd(self):
# rule_cpd = self.tree1.to_rule_cpd()
# self.assertEqual(rule_cpd.cardinality(), {'A': 2, 'B': 2, 'C': 2, 'D': 2})
# self.assertEqual(rule_cpd.scope(), {'A', 'B', 'C', 'D'})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.1,
# ('A_0', 'B_1', 'C_1', 'D_0'): 0.9,
# ('A_1', 'B_1', 'C_1', 'D_0'): 0.1,
# ('A_0', 'B_1', 'C_1', 'D_1'): 0.4,
# ('A_1', 'B_!', 'C_1', 'D_1'): 0.6})
#
# rule_cpd = self.tree2.to_rule_cpd()
# self.assertEqual(rule_cpd.cardinality(), {'A': 2, 'B': 2, 'C': 2, 'D': 2, 'E': 2})
# self.assertEqual(rule_cpd.scope(), {'A', 'B', 'C', 'D', 'E'})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0', 'C_0'): 0.8,
# ('A_1', 'B_0', 'C_0'): 0.2,
# ('A_0', 'B_0', 'C_1', 'D_0'): 0.9,
# ('A_1', 'B_0', 'C_1', 'D_0'): 0.1,
# ('A_0', 'B_0', 'C_1', 'D_1'): 0.4,
# ('A_1', 'B_0', 'C_1', 'D_1'): 0.6,
# ('A_0', 'B_1', 'C_0'): 0.1,
# ('A_1', 'B_1', 'C_0'): 0.9,
# ('A_0', 'B_1', 'C_1', 'E_0'): 0.3,
# ('A_1', 'B_1', 'C_1', 'E_0'): 0.7,
# ('A_0', 'B_1', 'C_1', 'E_1'): 0.8,
# ('A_1', 'B_1', 'C_1', 'E_1'): 0.2})
#
#
# class TestRuleCPDInit(unittest.TestCase):
# def test_init_without_errors_rules_none(self):
# rule_cpd = RuleCPD('A')
# self.assertEqual(rule_cpd.variable, 'A')
#
# def test_init_without_errors_rules_not_none(self):
# rule_cpd = RuleCPD('A', {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
#
# def test_init_with_errors(self):
# self.assertRaises(ValueError, RuleCPD, 'A', {('A_0',): 0.5,
# ('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
#
#
# class TestRuleCPDMethods(unittest.TestCase):
# def setUp(self):
# self.rule_cpd_with_rules = RuleCPD('A', {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6})
# self.rule_cpd_without_rules = RuleCPD('A')
#
# def test_add_rules_single(self):
# self.rule_cpd_with_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_with_rules.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_with_rules.variable, 'A')
# self.rule_cpd_without_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_without_rules.rules, {('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_without_rules.variable, 'A')
#
# def test_add_rules_multiple(self):
# self.rule_cpd_with_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_with_rules.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_with_rules.variable, 'A')
# self.rule_cpd_without_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_without_rules.rules, {('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_without_rules.variable, 'A')
#
# def test_add_rules_error(self):
# self.assertRaises(ValueError, self.rule_cpd_with_rules.add_rules, {('A_0',): 0.8})
#
# def test_scope(self):
# self.assertEqual(self.rule_cpd_with_rules.scope(), {'A', 'B', 'C'})
# self.assertEqual(self.rule_cpd_without_rules.scope(), set())
#
# def test_cardinality(self):
# self.assertEqual(self.rule_cpd_with_rules.cardinality(), {'A': 2, 'B': 2, 'C': 1})
# self.assertEqual(self.rule_cpd_without_rules.cardinality(), {})
#
# def tearDown(self):
# del self.rule_cpd_without_rules
#
|
khalibartan/pgmpy
|
pgmpy/tests/test_factors/test_discrete/test_Factor.py
|
Python
|
mit
| 56,244 | 0.004054 |
from collections import deque
from threading import RLock, Condition, currentThread
import sys
import time
class OnRequestQueue:
ListUsedModFunctions = ("append", "popleft")
class QueueEnd:
def __init__(self, queueList=None):
if queueList is not None:
self.q = queueList
else:
self.q = deque()
self.cond = Condition()
self.cancel = False
def __repr__(self):
with self.cond:
return "<QueueEnd %r>" % self.q
def put(self, item):
with self.cond:
if self.cancel: return False
self.q.append(item)
self.cond.notifyAll()
def setCancel(self):
with self.cond:
self.cancel = True
self.cond.notifyAll()
def __init__(self):
self.queues = set()
def put(self, item):
for q in list(self.queues):
q.put(item)
def cancelAll(self):
for q in list(self.queues):
q.setCancel()
def read(self, *otherQueues, **kwargs):
q = self.QueueEnd(**kwargs)
thread = currentThread()
thread.waitQueue = q
if thread.cancel:
# This is to avoid a small race condition for the case
# that the thread which wants to join+cancel us was faster
# and didn't got the waitQueue. In that case, it would
# have set the cancel already to True.
return
for reqqu in otherQueues: assert(isinstance(reqqu, OnRequestQueue))
reqQueues = (self,) + otherQueues
for reqqu in reqQueues: reqqu.queues.add(q)
while True:
with q.cond:
# Note on cancel-behavior:
# Earlier, we always still yielded all left items in the queue
# before breaking out here. This behavior doesn't fit if you
# want to cancel as fast as possible and when you have a
# persistent queue anyway - while you might hang at some entry.
if q.cancel: break
l = list(q.q)
if not l:
q.cond.wait()
for item in l:
if q.cancel: break
yield item
with q.cond:
popitem = q.q.popleft()
assert popitem is item
for reqqu in reqQueues: reqqu.queues.remove(q)
class EventCallback:
def __init__(self, targetQueue, name=None, reprname=None, extraCall=None):
self.targetQueue = targetQueue
self.name = name
self.reprname = reprname
self.extraCall = extraCall
def __call__(self, *args, **kwargs):
if not "timestamp" in kwargs:
kwargs["timestamp"] = time.time()
if self.extraCall:
self.extraCall(*args, **kwargs)
self.targetQueue.put((self, args, kwargs))
def __repr__(self):
if self.reprname:
return self.reprname
else:
return "<EventCallback %s>" % self.name
class Event:
def __init__(self):
self.lock = RLock()
self.targets = []
def push(self, *args):
with self.lock:
targets = self.targets
for weakt in targets:
t = weakt() # resolve weakref
if t: t(*args)
else: self.targets.remove(weakt)
def register(self, target):
assert sys.getrefcount(target) > 1, "target will be weakrefed, thus we need more references to it"
import weakref
with self.lock:
self.targets.append(weakref.ref(target))
|
albertz/music-player
|
src/Events.py
|
Python
|
bsd-2-clause
| 2,934 | 0.039877 |
import random
from cloudbot.util import http, formatting
def api_get(kind, query):
"""Use the RESTful Google Search API"""
url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \
'v=1.0&safe=moderate'
return http.get_json(url % kind, q=query)
# @hook.command("googleimage", "gis", "image")
def googleimage(text):
"""<query> - returns the first google image result for <query>"""
parsed = api_get('images', text)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no images found'
return random.choice(parsed['responseData']['results'][:10])['unescapedUrl']
# @hook.command("google", "g", "search")
def google(text):
"""<query> - returns the first google search result for <query>"""
parsed = api_get('web', text)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for pages: {}: {}'.format(parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'No results found.'
result = parsed['responseData']['results'][0]
title = http.unescape(result['titleNoFormatting'])
title = formatting.truncate_str(title, 60)
content = http.unescape(result['content'])
if not content:
content = "No description available."
else:
content = http.html.fromstring(content).text_content()
content = formatting.truncate_str(content, 150).replace('\n', '')
return '{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)
|
weylin/CloudBot
|
plugins/google.py
|
Python
|
gpl-3.0
| 1,653 | 0.00242 |
#!/usr/bin/env python2
import os
import sys
import json
import yaml
def main():
with open(sys.argv[1], 'rb') as f:
known_issues = yaml.safe_load(f.read())
skipstrings = [
'passed in strict mode',
'passed in non-strict mode',
'failed in strict mode as expected',
'failed in non-strict mode as expected'
]
in_failed_tests = False
tofix_count = 0 # count of bugs that will be fixed (no uncertainty about proper behavior etc)
known_errors = []
diagnosed_errors = []
unknown_errors = []
other_errors = []
for line in sys.stdin:
if len(line) > 1 and line[-1] == '\n':
line = line[:-1]
# Skip success cases
skip = False
for sk in skipstrings:
if sk in line:
skip = True
if skip:
continue
# Augment error list with "known bugs"
print(line) # print error list as is, then refined version later
if 'failed tests' in line.lower():
in_failed_tests = True
continue
if in_failed_tests and line.strip() == '':
in_failed_tests = False
continue
if in_failed_tests:
# " intl402/ch12/12.2/12.2.3_c in non-strict mode"
tmp = line.strip().split(' ')
test = tmp[0]
matched = False
for kn in known_issues:
if kn.get('test', None) != test:
continue
if kn.has_key('diagnosed'):
tofix_count += 1
diagnosed_errors.append(line + ' // diagnosed: ' + kn['diagnosed'])
elif kn.has_key('knownissue'):
# Don't bump tofix_count, as testcase expected result is not certain
known_errors.append(line + ' // KNOWN: ' + kn['knownissue'])
else:
tofix_count += 1
unknown_errors.append(line + ' // ??? (rule matches)')
kn['used'] = True # mark rule used
matched = True
break
if matched:
continue
# no match, to fix
other_errors.append(line)
tofix_count += 1
print('')
print('=== CATEGORISED ERRORS ===')
print('')
# With ES2015+ semantic changes to ES5 there are too many known
# issues to print by default.
#for i in known_errors:
# print(i)
for i in diagnosed_errors:
print(i)
for i in unknown_errors:
print(i)
for i in other_errors:
print(i)
# Check for unused rules (e.g. bugs fixed)
print('')
for kn in known_issues:
if not kn.has_key('used'):
print('WARNING: unused rule: ' + json.dumps(kn))
# Used by testclient
if len(unknown_errors) > 0 or len(other_errors) > 0:
print('TEST262 FAILED')
elif len(known_errors) > 0 or len(diagnosed_errors) > 0:
# Known and diagnosed errors don't indicate test failure
# as far as GitHub status is concerned.
print('TEST262 SUCCESS')
else:
print('TEST262 SUCCESS')
# To fix count
print('')
print('KNOWN ISSUE COUNT: ' + str(len(known_errors)))
print('TO-FIX COUNT: ' + str(tofix_count))
print(' = test case failures which need fixing (Duktape bugs, uninvestigated)')
if __name__ == '__main__':
main()
|
svaarala/duktape
|
util/filter_test262_log.py
|
Python
|
mit
| 3,455 | 0.002894 |
import unittest
from katas.kyu_6.longest_2_character_substring import substring
class SubstringTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(substring(''), '')
def test_equals_2(self):
self.assertEqual(substring('a'), 'a')
def test_equals_3(self):
self.assertEqual(substring('aa'), 'aa')
def test_equals_4(self):
self.assertEqual(substring('aaa'), 'aaa')
def test_equals_5(self):
self.assertEqual(substring('ab'), 'ab')
def test_equals_6(self):
self.assertEqual(substring('aba'), 'aba')
def test_equals_7(self):
self.assertEqual(substring('abc'), 'ab')
def test_equals_8(self):
self.assertEqual(substring('abacd'), 'aba')
def test_equals_9(self):
self.assertEqual(substring('abcba'), 'bcb')
def test_equals_10(self):
self.assertEqual(substring('bbacc'), 'bba')
def test_equals_11(self):
self.assertEqual(substring('ccddeeff'), 'ccdd')
def test_equals_12(self):
self.assertEqual(substring('abacddcd'), 'cddcd')
def test_equals_13(self):
self.assertEqual(substring('cefageaacceaccacca'), 'accacca')
|
the-zebulan/CodeWars
|
tests/kyu_6_tests/test_longest_2_character_substring.py
|
Python
|
mit
| 1,191 | 0 |
import sip
sip.setdestroyonexit(0)
import os, shutil
import numpy as np
from sparkle.QtWrapper import QtGui
tempfolder = os.path.join(os.path.abspath(os.path.dirname(__file__)), u"tmp")
app = None
# executes once before all tests
def setup():
if not os.path.exists(tempfolder):
os.mkdir(tempfolder)
np.warnings.filterwarnings('ignore', "All-NaN axis encountered", RuntimeWarning)
global app
app = QtGui.QApplication([])
def teardown():
shutil.rmtree(tempfolder, ignore_errors=True)
np.warnings.resetwarnings()
global app
app.exit(0)
app = None
|
portfors-lab/sparkle
|
test/tests/gui/__init__.py
|
Python
|
gpl-3.0
| 599 | 0.013356 |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.7.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x05\x96\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0\x77\x3d\xf8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x18\x74\x45\
\x58\x74\x54\x69\x74\x6c\x65\x00\x47\x49\x53\x20\x69\x63\x6f\x6e\
\x20\x74\x68\x65\x6d\x65\x20\x30\x2e\x32\xee\x53\xa0\xa0\x00\x00\
\x00\x18\x74\x45\x58\x74\x41\x75\x74\x68\x6f\x72\x00\x52\x6f\x62\
\x65\x72\x74\x20\x53\x7a\x63\x7a\x65\x70\x61\x6e\x65\x6b\x5f\x56\
\xb1\x08\x00\x00\x00\x27\x74\x45\x58\x74\x44\x65\x73\x63\x72\x69\
\x70\x74\x69\x6f\x6e\x00\x68\x74\x74\x70\x3a\x2f\x2f\x72\x6f\x62\
\x65\x72\x74\x2e\x73\x7a\x63\x7a\x65\x70\x61\x6e\x65\x6b\x2e\x70\
\x6c\x90\x59\x48\x60\x00\x00\x00\x18\x74\x45\x58\x74\x43\x72\x65\
\x61\x74\x69\x6f\x6e\x20\x54\x69\x6d\x65\x00\x32\x30\x30\x38\x2d\
\x31\x32\x2d\x31\x32\x58\x2e\x3b\xbf\x00\x00\x00\x52\x74\x45\x58\
\x74\x43\x6f\x70\x79\x72\x69\x67\x68\x74\x00\x43\x43\x20\x41\x74\
\x74\x72\x69\x62\x75\x74\x69\x6f\x6e\x2d\x53\x68\x61\x72\x65\x41\
\x6c\x69\x6b\x65\x20\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\x65\x61\
\x74\x69\x76\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\x67\x2f\
\x6c\x69\x63\x65\x6e\x73\x65\x73\x2f\x62\x79\x2d\x73\x61\x2f\x33\
\x2e\x30\x2f\x5e\x83\x5a\xbc\x00\x00\x04\x16\x49\x44\x41\x54\x48\
\x89\x95\x93\x6d\x6c\x53\x55\x18\xc7\x7f\xf7\xde\xde\x7b\xdb\xb5\
\xb7\x4c\xde\xc6\x14\x37\x32\x12\x08\x0b\x64\xf8\x01\xb2\xb8\xad\
\xc9\x3e\x30\x12\x83\xd9\x10\x05\xb7\x25\x0c\x21\x42\x0c\x0c\x48\
\xcc\x28\x16\x0d\x01\x92\x45\x8a\x1f\xe4\x25\x82\xa0\x4b\xc0\x28\
\xcc\x28\x26\xc2\x17\x08\xfb\x00\x63\xc9\xfa\xc1\xc0\x4c\xd4\xac\
\xdb\x9c\x61\x71\x05\x9c\xb3\x8c\x5b\xa0\x5d\x6f\x8f\x1f\x48\x47\
\x5b\xee\xdc\xfc\x27\x27\xf7\xe5\xf9\x9f\xf3\x3b\xcf\xf3\x9c\x23\
\xbd\xb5\x61\xdd\x25\x24\x0a\x99\x4a\x82\xc8\xb7\x17\xbe\x7b\x7d\
\x4a\x5f\x8e\x1c\x48\x14\xbe\xdd\xb0\x01\xc3\x6d\x00\xf0\x30\xf6\
\x90\x0b\xdf\xb4\x63\xf3\x6f\xea\x4d\xd8\x02\x60\x62\xa1\x47\x4f\
\xc6\x67\x27\x92\xca\x3c\x21\x39\xee\x1a\x6e\x63\x24\x6d\x4a\xc7\
\x27\xd3\x9a\x1d\x07\x36\x63\x59\x01\x14\xa5\xf5\xf2\x89\xfd\x6d\
\x99\x31\x39\xf3\xe3\x71\x7c\x7c\xde\xec\x39\x73\x74\x21\x29\xff\
\x6f\xb7\x96\xb5\xbf\x65\x4f\xcb\x42\x2c\x2b\x90\x1b\x92\xe1\x69\
\x09\x00\x5c\xba\x7a\xf7\xf7\xc1\x41\x00\x69\xcc\x1c\x93\xd2\xa6\
\x74\xdc\x4e\xd5\xd5\x07\x1c\xe3\x56\xd2\x71\xf8\xe3\xc3\xa0\x28\
\xad\xb9\x71\x07\x82\x48\x46\x7d\x47\xc6\x51\x8b\x9d\x4e\x5d\x39\
\x7f\xfe\xfb\x17\x65\xac\x3f\x27\x9c\x82\x88\x1d\x40\x29\x36\x0f\
\xce\x9f\xbf\x60\x46\xb8\x37\x4c\xe7\xd7\x47\xdb\x9e\x33\x08\x21\
\xb2\x46\x65\xc3\x4e\x71\x2d\x3c\x2a\x56\x6d\xf1\xc7\x2a\x1a\x9b\
\xcb\x73\xe3\x99\xa3\xa2\xb1\xb9\x7c\xd5\x16\x7f\xec\x5a\x78\x54\
\x54\x36\xec\x14\x76\x9e\xac\x1e\xac\xd9\x71\x60\xb3\xe1\x31\xe8\
\x1f\x18\xa0\xbe\xbe\x3e\xcf\xa9\xea\x17\xab\xd7\x6f\xf7\xd8\x96\
\x66\xfd\x76\x8f\x53\xd5\x2f\xd6\xd7\xd7\xe7\xf5\x0f\x0c\x60\x78\
\x8c\xa7\xcd\xce\x51\x16\x00\xcb\x0a\xf8\xf7\xfa\xe9\xbc\x7e\x83\
\xd2\xd2\x52\xca\xca\x96\xe7\x2b\x86\xfb\x94\x6d\x69\x0c\xf7\xa9\
\xb2\xb2\xe5\xf9\xa5\xa5\xa5\x74\x5e\xbf\x81\x7f\xaf\x9f\x49\x9b\
\xfc\x6c\x96\xd2\x1a\x0c\x06\x1f\x18\x5e\x4f\x12\xa0\x6e\x6d\x9d\
\xcb\xa9\xeb\x75\xbe\xc6\x5d\xb5\x99\x36\x5f\xe3\xae\x5a\xa7\xae\
\xd7\xd5\xad\xad\x73\x01\x18\x5e\x4f\x32\x18\x0c\x3e\xb0\x6b\x72\
\x16\xe0\xf2\x89\xfd\x6d\xd1\xd8\xc8\xa2\x70\xb8\x2f\x61\x9a\x26\
\x9a\xa6\xd1\xd4\xb4\xc9\xad\x6a\xda\xd9\xf2\x86\xdd\x05\x00\xe5\
\x0d\xbb\x0b\x54\x4d\x3b\xdb\xd4\xb4\xc9\xad\x69\x1a\xa6\x69\x12\
\x0e\xf7\x25\xa2\xb1\x91\x45\xb9\x77\xe0\xf9\x0c\x80\xae\x73\x27\
\xef\xcb\x92\xdc\xd6\xd1\xd1\x11\x07\x28\x2a\x2a\xc2\xe7\xab\xca\
\x33\x9c\x7a\xbb\x24\x49\x92\xe1\xd4\xdb\x7d\xbe\xaa\xbc\xa2\xa2\
\x22\x00\x3a\x3a\x3a\xe2\xb2\x24\xb7\x75\x9d\x3b\x79\xdf\xae\x94\
\xcf\x01\x00\x1e\x25\xc7\x0e\x85\x42\x21\xcb\x34\x4d\x00\x6a\x6a\
\x56\xab\x86\xd7\x5b\xfe\xda\xb6\x7d\x31\xc3\xeb\x2d\xaf\xa9\x59\
\xad\x02\x98\xa6\x49\x28\x14\xb2\x1e\x25\xc7\x0e\xd9\xad\x03\x20\
\x09\x21\x6c\x03\xab\x36\xfb\x8f\xaf\x58\xb9\xe2\xdd\xda\xda\x5a\
\x1d\xe0\xd8\xd1\x63\x6c\xdd\xb6\x95\xd3\x9f\x9f\xa6\xf9\x9d\x4a\
\x52\x7d\x1f\x91\xf8\xbb\x0b\x91\x4a\x24\x34\x39\xf9\xd8\x66\x89\
\x90\x80\xc0\xa4\x80\x8a\x8d\xef\xcd\x75\x2a\x9e\xc1\x40\x20\x90\
\xe7\xf1\x78\xb8\xdd\xd3\xc3\xcd\xce\x4e\x2a\xab\xaa\x58\x16\xdf\
\x8d\x14\xfb\x19\x97\x51\x82\x2c\x3b\x91\x15\x27\x92\xa2\x21\xac\
\x04\x42\x58\xcc\x5a\xd8\xc8\x9d\x9f\x3e\xc0\x4a\x44\x6f\x4e\x0a\
\xb0\xcb\x22\xad\xe4\x55\x0d\x63\x6e\x05\x0e\xed\x85\x2c\xbf\xaa\
\xcf\xa6\x70\xe9\xfb\xb8\xf2\x97\x32\x32\xf0\x15\xfd\x37\x37\xda\
\xf7\x20\xad\xdc\x5e\x64\x4a\x76\x64\xdf\x3f\xe7\x8c\xc5\xcc\x7f\
\xe5\x20\xae\xfc\xa5\xc4\xcd\x41\x46\x87\x2e\x3d\xf5\xfd\x17\x20\
\xf7\x44\x65\x01\x64\x75\xe2\xdd\x53\xe0\xe3\xa5\x65\x01\x34\xf7\
\xcb\x24\xe3\xa3\xdc\xfd\xf5\x18\xc2\x7a\x3c\x35\xc0\x2e\x8b\xdc\
\x6c\xbc\xf3\xaa\x29\x5c\xd2\x8c\x43\x9f\x49\xca\x8a\xf3\x57\xdf\
\x97\x3c\x79\xd8\xff\x6c\x23\x53\x01\x72\xb3\x48\x3f\xa3\xc3\x57\
\x70\xcf\x59\x49\x74\xf8\x2a\xff\x0c\xfd\x08\x08\x22\xbf\x7c\xc2\
\x9d\x5b\xfb\x88\x0e\x5f\x21\x3a\x7c\x65\x7a\x80\xcc\x2c\x22\x91\
\x08\xa1\x50\xc8\x02\x40\xa4\x98\x55\xfc\x26\x25\xaf\x9e\xe6\x5e\
\xef\x29\x06\xbb\x77\x30\x74\xeb\x43\x44\x6a\x7c\x62\x4c\x1b\xd0\
\x75\xee\xe4\x7d\x4d\xd5\xbb\xbf\x38\x73\x06\x4d\xd5\xbb\x01\x66\
\x2d\x58\x8f\x6b\xc6\x12\x24\x49\x61\x66\xf1\x1b\xdc\xeb\xfd\xcc\
\x76\xee\xb4\x00\x00\x8a\x22\x97\xb4\xec\xd9\x83\xa2\xc8\x25\x48\
\xf4\xa8\xae\x02\x06\xbb\xb7\x73\xfb\x87\x45\xfc\x11\x6a\xb6\x9f\
\x24\xd1\xe3\x98\x2e\x00\x45\x39\x74\x24\x78\x24\x80\xa2\xb4\x92\
\x62\x28\xf2\xdb\xa7\xc7\x11\x2c\x9e\xd4\x2f\xd1\x4b\x8a\x96\x7f\
\x01\xb3\x71\xdb\xcb\x12\x7d\x31\x70\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x08\
\x09\xcb\x6f\x53\
\x00\x44\
\x00\x73\x00\x67\x00\x54\x00\x6f\x00\x6f\x00\x6c\x00\x73\
\x00\x0a\
\x0b\x6f\x47\xe0\
\x00\x44\
\x00\x73\x00\x67\x00\x54\x00\x6f\x00\x6f\x00\x6c\x00\x73\x00\x4f\x00\x70\
\x00\x0d\
\x01\xed\x72\x73\
\x00\x4d\
\x00\x69\x00\x6c\x00\x69\x00\x74\x00\x61\x00\x72\x00\x79\x00\x54\x00\x6f\x00\x6f\x00\x6c\x00\x73\
\x00\x13\
\x0c\xc0\x02\x64\
\x00\x6e\
\x00\x75\x00\x6d\x00\x65\x00\x72\x00\x69\x00\x63\x00\x61\x00\x6c\x00\x56\x00\x65\x00\x72\x00\x74\x00\x65\x00\x78\x00\x45\x00\x64\
\x00\x69\x00\x74\
\x00\x18\
\x0a\x0d\x3f\x47\
\x00\x76\
\x00\x65\x00\x63\x00\x74\x00\x6f\x00\x72\x00\x2d\x00\x65\x00\x64\x00\x69\x00\x74\x00\x2d\x00\x6b\x00\x65\x00\x79\x00\x62\x00\x6f\
\x00\x61\x00\x72\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x2a\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\
\x00\x00\x00\x44\x00\x02\x00\x00\x00\x01\x00\x00\x00\x05\
\x00\x00\x00\x64\x00\x02\x00\x00\x00\x01\x00\x00\x00\x06\
\x00\x00\x00\x90\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
phborba/dsgtoolsop
|
numericalVertexEdit/resources.py
|
Python
|
gpl-2.0
| 7,700 | 0.000649 |
#!/usr/bin/env python
from efl import evas
import unittest
class TestBoxBasics(unittest.TestCase):
def setUp(self):
self.canvas = evas.Canvas(method="buffer",
size=(400, 500),
viewport=(0, 0, 400, 500))
self.canvas.engine_info_set(self.canvas.engine_info_get())
def tearDown(self):
self.canvas.delete()
def testConstructor(self):
box = evas.Box(self.canvas)
self.assertEqual(type(box), evas.Box)
box.delete()
def testConstructorBaseParameters(self):
size = (20, 30)
pos = (40, 50)
geometry = (60, 70, 80, 90)
color = (110, 120, 130, 140)
# create box using size/pos
box1 = evas.Box(self.canvas, name="box1", color=color, size=size, pos=pos)
self.assertEqual(box1.name, "box1")
self.assertEqual(box1.color, color)
self.assertEqual(box1.size, size)
self.assertEqual(box1.pos, pos)
box1.delete()
# cleate box2 using geometry
box2 = evas.Box(self.canvas, name="box2", color=color, geometry=geometry)
self.assertEqual(box2.name, "box2")
self.assertEqual(box2.color, color)
self.assertEqual(box2.geometry, geometry)
box2.delete()
def testRemoveAll(self):
box = evas.Box(self.canvas)
r1 = evas.Rectangle(self.canvas)
r2 = evas.Rectangle(self.canvas)
box.append(r1)
box.append(r2)
box.remove_all(True)
self.assertEqual(r1.is_deleted(), True)
self.assertEqual(r2.is_deleted(), True)
box.delete()
if __name__ == '__main__':
unittest.main(verbosity=2)
evas.shutdown()
|
maikodaraine/EnlightenmentUbuntu
|
bindings/python/python-efl/tests/evas/test_04_object_box.py
|
Python
|
unlicense
| 1,720 | 0.001163 |
# Copyright (C) 2012 Alex Nitz, Josh Willis, Andrew Miller, Tito Dal Canton
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides a device independent Array class based on PyCUDA and Numpy.
"""
BACKEND_PREFIX="pycbc.types.array_"
import h5py
import os as _os
from decorator import decorator
import lal as _lal
import numpy as _numpy
from numpy import float32, float64, complex64, complex128, ones
from numpy.linalg import norm
import pycbc.scheme as _scheme
from pycbc.scheme import schemed, cpuonly
from pycbc.types.aligned import ArrayWithAligned
#! FIXME: the uint32 datatype has not been fully tested,
# we should restrict any functions that do not allow an
# array of uint32 integers
_ALLOWED_DTYPES = [_numpy.float32, _numpy.float64, _numpy.complex64,
_numpy.complex128, _numpy.uint32, _numpy.int32, _numpy.int]
try:
_ALLOWED_SCALARS = [int, long, float, complex] + _ALLOWED_DTYPES
except NameError:
_ALLOWED_SCALARS = [int, float, complex] + _ALLOWED_DTYPES
def _convert_to_scheme(ary):
if not isinstance(ary._scheme, _scheme.mgr.state.__class__):
converted_array = Array(ary, dtype=ary._data.dtype)
ary._data = converted_array._data
ary._scheme = _scheme.mgr.state
@decorator
def _convert(fn, self, *args):
# Convert this array to the current processing scheme
_convert_to_scheme(self)
return fn(self, *args)
@decorator
def _nocomplex(fn, self, *args):
if self.kind == 'real':
return fn(self, *args)
else:
raise TypeError( fn.__name__ + " does not support complex types")
@decorator
def _noreal(fn, self, *args):
if self.kind == 'complex':
return fn(self, *args)
else:
raise TypeError( fn.__name__ + " does not support real types")
def force_precision_to_match(scalar, precision):
if _numpy.iscomplex(scalar):
if precision is 'single':
return _numpy.complex64(scalar)
else:
return _numpy.complex128(scalar)
else:
if precision is 'single':
return _numpy.float32(scalar)
else:
return _numpy.float64(scalar)
def common_kind(*dtypes):
for dtype in dtypes:
if dtype.kind is 'c':
return dtype
return dtypes[0]
@schemed(BACKEND_PREFIX)
def _to_device(array):
""" Move input to device """
@schemed(BACKEND_PREFIX)
def _copy_base_array(array):
""" Copy a backend array"""
@schemed(BACKEND_PREFIX)
def _scheme_matches_base_array(array):
""" Check that input matches array type for scheme """
class Array(object):
"""Array used to do numeric calculations on a various compute
devices. It is a convience wrapper around numpy, and
pycuda.
"""
__array_priority__ = 1000
def __init__(self, initial_array, dtype=None, copy=True):
""" initial_array: An array-like object as specified by NumPy, this
also includes instances of an underlying data type as described in
section 3 or an instance of the PYCBC Array class itself. This
object is used to populate the data of the array.
dtype: A NumPy style dtype that describes the type of
encapsulated data (float32,compex64, etc)
copy: This defines whether the initial_array is copied to instantiate
the array or is simply referenced. If copy is false, new data is not
created, and so all arguments that would force a copy are ignored.
The default is to copy the given object.
"""
self._scheme=_scheme.mgr.state
self._saved = {}
#Unwrap initial_array
if isinstance(initial_array, Array):
initial_array = initial_array._data
if not copy:
if not _scheme_matches_base_array(initial_array):
raise TypeError("Cannot avoid a copy of this array")
elif issubclass(type(self._scheme), _scheme.CPUScheme):
# ArrayWithAligned does not copy its memory; all
# the following does is add the 'isaligned' flag
# in case initial_array was a true numpy array
self._data = ArrayWithAligned(initial_array)
else:
self._data = initial_array
# Check that the dtype is supported.
if self._data.dtype not in _ALLOWED_DTYPES:
raise TypeError(str(self._data.dtype) + ' is not supported')
if dtype and dtype != self._data.dtype:
raise TypeError("Can only set dtype when allowed to copy data")
if copy:
# First we will check the dtype that we are given
if not hasattr(initial_array, 'dtype'):
initial_array = _numpy.array(initial_array)
# Determine the dtype to use
if dtype is not None:
dtype = _numpy.dtype(dtype)
if dtype not in _ALLOWED_DTYPES:
raise TypeError(str(dtype) + ' is not supported')
if dtype.kind != 'c' and initial_array.dtype.kind == 'c':
raise TypeError(str(initial_array.dtype) + ' cannot be cast as ' + str(dtype))
elif initial_array.dtype in _ALLOWED_DTYPES:
dtype = initial_array.dtype
else:
if initial_array.dtype.kind == 'c':
dtype = complex128
else:
dtype = float64
# Cast to the final dtype if needed
if initial_array.dtype != dtype:
initial_array = initial_array.astype(dtype)
#Create new instance with initial_array as initialization.
if issubclass(type(self._scheme), _scheme.CPUScheme):
if hasattr(initial_array, 'get'):
self._data = ArrayWithAligned(_numpy.array(initial_array.get()))
else:
self._data = ArrayWithAligned(_numpy.array(initial_array,
dtype=dtype, ndmin=1))
elif _scheme_matches_base_array(initial_array):
self._data = _copy_base_array(initial_array) # pylint:disable=assignment-from-no-return
else:
initial_array = _numpy.array(initial_array, dtype=dtype, ndmin=1)
self._data = _to_device(initial_array) # pylint:disable=assignment-from-no-return
@decorator
def _memoize_single(fn, self, arg):
badh = str(arg)
if badh in self._saved:
return self._saved[badh]
res = fn(self, arg) # pylint:disable=not-callable
self._saved[badh] = res
return res
@decorator
def _returnarray(fn, self, *args):
return Array(fn(self, *args), copy=False) # pylint:disable=not-callable
@decorator
def _returntype(fn, self, *args):
ary = fn(self,*args) # pylint:disable=not-callable
if ary is NotImplemented:
return NotImplemented
return self._return(ary)
def _return(self, ary):
"""Wrap the ary to return an Array type """
if isinstance(ary, Array):
return ary
return Array(ary, copy=False)
@decorator
def _checkother(fn, self, *args):
nargs = ()
for other in args:
self._typecheck(other)
if type(other) in _ALLOWED_SCALARS:
other = force_precision_to_match(other, self.precision)
nargs +=(other,)
elif isinstance(other, type(self)) or type(other) is Array:
if len(other) != len(self):
raise ValueError('lengths do not match')
if other.precision == self.precision:
_convert_to_scheme(other)
nargs += (other._data,)
else:
raise TypeError('precisions do not match')
else:
return NotImplemented
return fn(self, *nargs) # pylint:disable=not-callable
@decorator
def _vcheckother(fn, self, *args):
nargs = ()
for other in args:
self._typecheck(other)
if isinstance(other, type(self)) or type(other) is Array:
if len(other) != len(self):
raise ValueError('lengths do not match')
if other.precision == self.precision:
_convert_to_scheme(other)
nargs += (other._data,)
else:
raise TypeError('precisions do not match')
else:
raise TypeError('array argument required')
return fn(self,*nargs) # pylint:disable=not-callable
@decorator
def _vrcheckother(fn, self,*args):
nargs = ()
for other in args:
if isinstance(other, type(self)) or type(other) is Array:
if len(other) != len(self):
raise ValueError('lengths do not match')
if other.precision == self.precision:
_convert_to_scheme(other)
nargs += (other._data,)
else:
raise TypeError('precisions do not match')
else:
raise TypeError('array argument required')
return fn(self, *nargs) # pylint:disable=not-callable
@decorator
def _icheckother(fn, self, other):
""" Checks the input to in-place operations """
self._typecheck(other)
if type(other) in _ALLOWED_SCALARS:
if self.kind == 'real' and type(other) == complex:
raise TypeError('dtypes are incompatible')
other = force_precision_to_match(other, self.precision)
elif isinstance(other, type(self)) or type(other) is Array:
if len(other) != len(self):
raise ValueError('lengths do not match')
if self.kind == 'real' and other.kind == 'complex':
raise TypeError('dtypes are incompatible')
if other.precision == self.precision:
_convert_to_scheme(other)
other = other._data
else:
raise TypeError('precisions do not match')
else:
return NotImplemented
return fn(self, other) # pylint:disable=not-callable
def _typecheck(self, other):
""" Additional typechecking for other. Placeholder for use by derived
types.
"""
pass
@_returntype
@_convert
@_checkother
def __mul__(self,other):
""" Multiply by an Array or a scalar and return an Array. """
return self._data * other
__rmul__ = __mul__
@_convert
@_icheckother
def __imul__(self,other):
""" Multiply by an Array or a scalar and return an Array. """
self._data *= other
return self
@_returntype
@_convert
@_checkother
def __add__(self,other):
""" Add Array to Array or scalar and return an Array. """
return self._data + other
__radd__ = __add__
def fill(self, value):
self._data.fill(value)
@_convert
@_icheckother
def __iadd__(self,other):
""" Add Array to Array or scalar and return an Array. """
self._data += other
return self
@_convert
@_checkother
@_returntype
def __div__(self,other):
""" Divide Array by Array or scalar and return an Array. """
return self._data / other
@_returntype
@_convert
@_checkother
def __rdiv__(self,other):
""" Divide Array by Array or scalar and return an Array. """
return self._data.__rdiv__(other)
@_convert
@_icheckother
def __idiv__(self,other):
""" Divide Array by Array or scalar and return an Array. """
self._data /= other
return self
@_returntype
@_convert
@_checkother
def __sub__(self,other):
""" Subtract Array or scalar from Array and return an Array. """
return self._data - other
@_returntype
@_convert
@_checkother
def __rsub__(self,other):
""" Subtract Array or scalar from Array and return an Array. """
return self._data.__rsub__(other)
@_convert
@_icheckother
def __isub__(self,other):
""" Subtract Array or scalar from Array and return an Array. """
self._data -= other
return self
@_returntype
@_convert
@_checkother
def __pow__(self,other):
""" Exponentiate Array by scalar """
return self._data ** other
@_returntype
@_convert
def __abs__(self):
""" Return absolute value of Array """
return abs(self._data)
def __len__(self):
""" Return length of Array """
return len(self._data)
def __str__(self):
return str(self._data)
def __eq__(self,other):
"""
This is the Python special method invoked whenever the '=='
comparison is used. It will return true if the data of two
PyCBC arrays are identical, and all of the numeric meta-data
are identical, irrespective of whether or not the two
instances live in the same memory (for that comparison, the
Python statement 'a is b' should be used instead).
Thus, this method returns 'True' if the types of both 'self'
and 'other' are identical, as well as their lengths, dtypes
and the data in the arrays, element by element. It will always
do the comparison on the CPU, but will *not* move either object
to the CPU if it is not already there, nor change the scheme of
either object. It is possible to compare a CPU object to a GPU
object, and the comparison should be true if the data and
meta-data of the two objects are the same.
Note in particular that this function returns a single boolean,
and not an array of booleans as Numpy does. If the numpy
behavior is instead desired it can be obtained using the numpy()
method of the PyCBC type to get a numpy instance from each
object, and invoking '==' on those two instances.
Parameters
----------
other: another Python object, that should be tested for equality
with 'self'.
Returns
-------
boolean: 'True' if the types, dtypes, lengths, and data of the
two objects are each identical.
"""
# Writing the first test as below allows this method to be safely
# called from subclasses.
if type(self) != type(other):
return False
if self.dtype != other.dtype:
return False
if len(self) != len(other):
return False
# Now we've checked meta-data, so look at the actual data itself:
# The numpy() method call will put a copy of GPU data onto a CPU
# array, and could therefore be slow. As noted in the help for
# this function we don't worry about that.
sary = self.numpy()
oary = other.numpy()
# Now we know that both sary and oary are numpy arrays. The
# '==' statement returns an array of booleans, and the all()
# method of that array returns 'True' only if every element
# of that array of booleans is True.
return (sary == oary).all()
def almost_equal_elem(self,other,tol,relative=True):
"""
Compare whether two array types are almost equal, element
by element.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(self[i]-other[i]) <= tol*abs(self[i])
for all elements of the array.
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(self[i]-other[i]) <= tol
for all elements of the array.
Other meta-data (type, dtype, and length) must be exactly equal.
If either object's memory lives on the GPU it will be copied to
the CPU for the comparison, which may be slow. But the original
object itself will not have its memory relocated nor scheme
changed.
Parameters
----------
other
Another Python object, that should be tested for
almost-equality with 'self', element-by-element.
tol
A non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative
A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
Returns
-------
boolean
'True' if the data agree within the tolerance, as
interpreted by the 'relative' keyword, and if the types,
lengths, and dtypes are exactly the same.
"""
# Check that the tolerance is non-negative and raise an
# exception otherwise.
if (tol<0):
raise ValueError("Tolerance cannot be negative")
# Check that the meta-data agree; the type check is written in
# this way so that this method may be safely called from
# subclasses as well.
if type(other) != type(self):
return False
if self.dtype != other.dtype:
return False
if len(self) != len(other):
return False
# The numpy() method will move any GPU memory onto the CPU.
# Slow, but the user was warned.
diff = abs(self.numpy()-other.numpy())
if relative:
cmpary = tol*abs(self.numpy())
else:
cmpary = tol*ones(len(self),dtype=self.dtype)
return (diff<=cmpary).all()
def almost_equal_norm(self,other,tol,relative=True):
"""
Compare whether two array types are almost equal, normwise.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(norm(self-other)) <= tol*abs(norm(self)).
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(norm(self-other)) <= tol
Other meta-data (type, dtype, and length) must be exactly equal.
If either object's memory lives on the GPU it will be copied to
the CPU for the comparison, which may be slow. But the original
object itself will not have its memory relocated nor scheme
changed.
Parameters
----------
other
another Python object, that should be tested for
almost-equality with 'self', based on their norms.
tol
a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative
A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
Returns
-------
boolean
'True' if the data agree within the tolerance, as
interpreted by the 'relative' keyword, and if the types,
lengths, and dtypes are exactly the same.
"""
# Check that the tolerance is non-negative and raise an
# exception otherwise.
if (tol<0):
raise ValueError("Tolerance cannot be negative")
# Check that the meta-data agree; the type check is written in
# this way so that this method may be safely called from
# subclasses as well.
if type(other) != type(self):
return False
if self.dtype != other.dtype:
return False
if len(self) != len(other):
return False
# The numpy() method will move any GPU memory onto the CPU.
# Slow, but the user was warned.
diff = self.numpy()-other.numpy()
dnorm = norm(diff)
if relative:
return (dnorm <= tol*norm(self))
else:
return (dnorm <= tol)
@_returntype
@_convert
def real(self):
""" Return real part of Array """
return Array(self._data.real, copy=True)
@_returntype
@_convert
def imag(self):
""" Return imaginary part of Array """
return Array(self._data.imag, copy=True)
@_returntype
@_convert
def conj(self):
""" Return complex conjugate of Array. """
return self._data.conj()
@_returntype
@_convert
@schemed(BACKEND_PREFIX)
def squared_norm(self):
""" Return the elementwise squared norm of the array """
@_returntype
@_checkother
@_convert
@schemed(BACKEND_PREFIX)
def multiply_and_add(self, other, mult_fac):
""" Return other multiplied by mult_fac and with self added.
Self is modified in place and returned as output.
Precisions of inputs must match.
"""
@_vrcheckother
@_convert
@schemed(BACKEND_PREFIX)
def inner(self, other):
""" Return the inner product of the array with complex conjugation.
"""
@_vrcheckother
@_convert
@schemed(BACKEND_PREFIX)
def vdot(self, other):
""" Return the inner product of the array with complex conjugation.
"""
@_convert
@schemed(BACKEND_PREFIX)
def clear(self):
""" Clear out the values of the array. """
@_vrcheckother
@_convert
@schemed(BACKEND_PREFIX)
def weighted_inner(self, other, weight):
""" Return the inner product of the array with complex conjugation.
"""
@_convert
@schemed(BACKEND_PREFIX)
def sum(self):
""" Return the sum of the the array. """
@_returntype
@_convert
@schemed(BACKEND_PREFIX)
def cumsum(self):
""" Return the cumulative sum of the the array. """
@_convert
@_nocomplex
@schemed(BACKEND_PREFIX)
def max(self):
""" Return the maximum value in the array. """
@_convert
@_nocomplex
@schemed(BACKEND_PREFIX)
def max_loc(self):
"""Return the maximum value in the array along with the index location """
@_convert
@schemed(BACKEND_PREFIX)
def abs_arg_max(self):
""" Return location of the maximum argument max """
@_convert
@schemed(BACKEND_PREFIX)
def abs_max_loc(self):
"""Return the maximum elementwise norm in the array along with the index location"""
@_convert
@_nocomplex
@schemed(BACKEND_PREFIX)
def min(self):
""" Return the maximum value in the array. """
@_returnarray
@_convert
@schemed(BACKEND_PREFIX)
def take(self, indices):
""" Return the values at the given indices. """
@_convert
@_vcheckother
@schemed(BACKEND_PREFIX)
def dot(self, other):
""" Return the dot product"""
@schemed(BACKEND_PREFIX)
def _getvalue(self, index):
"""Helper function to return a single value from an array. May be very
slow if the memory is on a gpu.
"""
@_memoize_single
@_returntype
def _getslice(self, index):
return self._return(self._data[index])
@_convert
def __getitem__(self, index):
""" Return items from the Array. This not guaranteed to be fast for
returning single values.
"""
if isinstance(index, slice):
return self._getslice(index)
else:
return self._getvalue(index)
@_convert
def resize(self, new_size):
"""Resize self to new_size
"""
if new_size == len(self):
return
else:
self._saved = {}
new_arr = zeros(new_size, dtype=self.dtype)
if len(self) <= new_size:
new_arr[0:len(self)] = self
else:
new_arr[:] = self[0:new_size]
self._data = new_arr._data
@_convert
def roll(self, shift):
"""shift vector
"""
self._saved = {}
new_arr = zeros(len(self), dtype=self.dtype)
if shift == 0:
return
if shift < 0:
shift=len(self) + shift
new_arr[0:shift] = self[len(self)-shift: len(self)]
new_arr[shift:len(self)] = self[0:len(self)-shift]
self._data = new_arr._data
@_returntype
@_convert
def astype(self, dtype):
if _numpy.dtype(self.dtype) == _numpy.dtype(dtype):
return self
else:
return self._data.astype(dtype)
@schemed(BACKEND_PREFIX)
def _copy(self, self_ref, other_ref):
"""Helper function to copy between two arrays. The arrays references
should be bare array types and not `Array` class instances.
"""
@_convert
def __setitem__(self, index, other):
if isinstance(other,Array):
_convert_to_scheme(other)
if self.kind is 'real' and other.kind is 'complex':
raise ValueError('Cannot set real value with complex')
if isinstance(index,slice):
self_ref = self._data[index]
other_ref = other._data
else:
self_ref = self._data[index:index+1]
other_ref = other._data
self._copy(self_ref, other_ref)
elif type(other) in _ALLOWED_SCALARS:
if isinstance(index, slice):
self[index].fill(other)
else:
self[index:index+1].fill(other)
else:
raise TypeError('Can only copy data from another Array')
@property
def precision(self):
if self.dtype == float32 or self.dtype == complex64:
return 'single'
else:
return 'double'
@property
def kind(self):
if self.dtype == float32 or self.dtype == float64:
return 'real'
elif self.dtype == complex64 or self.dtype == complex128:
return 'complex'
else:
return 'unknown'
@property
@_convert
def data(self):
"""Returns the internal python array """
return self._data
@data.setter
def data(self,other):
dtype = None
if hasattr(other,'dtype'):
dtype = other.dtype
temp = Array(other, dtype=dtype)
self._data = temp._data
@property
@_convert
@schemed(BACKEND_PREFIX)
def ptr(self):
""" Returns a pointer to the memory of this array """
@property
def itemsize(self):
return self.dtype.itemsize
@property
def nbytes(self):
return len(self.data) * self.itemsize
@property
@cpuonly
@_convert
def _swighelper(self):
""" Used internally by SWIG typemaps to ensure @_convert
is called and scheme is correct
"""
return self;
@_convert
@schemed(BACKEND_PREFIX)
def numpy(self):
""" Returns a Numpy Array that contains this data """
@_convert
def lal(self):
""" Returns a LAL Object that contains this data """
lal_data = None
if self._data.dtype == float32:
lal_data = _lal.CreateREAL4Vector(len(self))
elif self._data.dtype == float64:
lal_data = _lal.CreateREAL8Vector(len(self))
elif self._data.dtype == complex64:
lal_data = _lal.CreateCOMPLEX8Vector(len(self))
elif self._data.dtype == complex128:
lal_data = _lal.CreateCOMPLEX16Vector(len(self))
lal_data.data[:] = self.numpy()
return lal_data
@property
def dtype(self):
return self._data.dtype
def save(self, path, group=None):
"""
Save array to a Numpy .npy, hdf, or text file. When saving a complex array as
text, the real and imaginary parts are saved as the first and second
column respectively. When using hdf format, the data is stored
as a single vector, along with relevant attributes.
Parameters
----------
path: string
Destination file path. Must end with either .hdf, .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
_numpy.save(path, self.numpy())
elif ext == '.txt':
if self.kind == 'real':
_numpy.savetxt(path, self.numpy())
elif self.kind == 'complex':
output = _numpy.vstack((self.numpy().real,
self.numpy().imag)).T
_numpy.savetxt(path, output)
elif ext == '.hdf':
key = 'data' if group is None else group
f = h5py.File(path)
f.create_dataset(key, data=self.numpy(), compression='gzip',
compression_opts=9, shuffle=True)
else:
raise ValueError('Path must end with .npy, .txt, or .hdf')
@_convert
def trim_zeros(self):
"""Remove the leading and trailing zeros.
"""
tmp = self.numpy()
f = len(self)-len(_numpy.trim_zeros(tmp, trim='f'))
b = len(self)-len(_numpy.trim_zeros(tmp, trim='b'))
return self[f:len(self)-b]
@_returntype
@_convert
def view(self, dtype):
"""
Return a 'view' of the array with its bytes now interpreted according
to 'dtype'. The location in memory is unchanged and changing elements
in a view of an array will also change the original array.
Parameters
----------
dtype : numpy dtype (one of float32, float64, complex64 or complex128)
The new dtype that should be used to interpret the bytes of self
"""
return self._data.view(dtype)
def copy(self):
""" Return copy of this array """
return self._return(self.data.copy())
# Convenience functions for determining dtypes
def real_same_precision_as(data):
if data.precision is 'single':
return float32
elif data.precision is 'double':
return float64
def complex_same_precision_as(data):
if data.precision is 'single':
return complex64
elif data.precision is 'double':
return complex128
@decorator
def _return_array(fn, *args, **kwds):
return Array(fn(*args, **kwds), copy=False)
@_return_array
@schemed(BACKEND_PREFIX)
def zeros(length, dtype=float64):
""" Return an Array filled with zeros.
"""
return
def load_array(path, group=None):
"""
Load an Array from a .hdf, .txt or .npy file. The
default data types will be double precision floating point.
Parameters
----------
path : string
source file path. Must end with either .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
data = _numpy.load(path)
elif ext == '.txt':
data = _numpy.loadtxt(path)
elif ext == '.hdf':
key = 'data' if group is None else group
return Array(h5py.File(path)[key])
else:
raise ValueError('Path must end with .npy, .hdf, or .txt')
if data.ndim == 1:
return Array(data)
elif data.ndim == 2:
return Array(data[:,0] + 1j*data[:,1])
else:
raise ValueError('File has %s dimensions, cannot convert to Array, \
must be 1 (real) or 2 (complex)' % data.ndim)
|
hagabbar/pycbc_copy
|
pycbc/types/array.py
|
Python
|
gpl-3.0
| 33,597 | 0.004643 |
#!/usr/bin/env python
import socket, ssl
# This is a copy of _RESTRICTED_SERVER_CIPHERS from the current tip of ssl.py
# <https://hg.python.org/cpython/file/af793c7580f1/Lib/ssl.py#l174> except that
# RC4 has been added back in, since it was removed in Python 2.7.10,
# but SSLStreamConnection only supports RC4 ciphers.
CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5:!DSS:RC4'
)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('localhost', 54443))
s.listen(5)
while True:
newsocket, fromaddr = s.accept()
try:
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile="cert.pem",
keyfile="cert.pem",
ciphers=CIPHERS)
except ssl.SSLError as e:
# Catch occurrences of:
# ssl.SSLEOFError: EOF occurred in violation of protocol (_ssl.c:581)
#
# In theory, setting ssl_version to ssl.PROTOCOL_TLSv1 will resolve
# the problem, but it didn't do so for me, and it caused the error:
# ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:581)
#
# Whereas the SSLEOFError doesn't prevent the server from working
# (it seems to happen only when the server is first started, and it
# stops happening if we simply ignore it and try again a few times)
# so we leave ssl_version at ssl.PROTOCOL_SSLv3 and ignore that error.
#
# If we catch SSLEOFError specifically, then Travis fails with:
# AttributeError: 'module' object has no attribute 'SSLEOFError'
# So we catch the more general exception SSLError.
continue
try:
data = connstream.read()
while data:
connstream.write(data)
data = connstream.read()
finally:
try:
connstream.shutdown(socket.SHUT_RDWR)
except socket.error as e:
# On Mac, if the other side has already closed the connection,
# then socket.shutdown will fail, but we can ignore this failure.
pass
connstream.close()
|
marco-c/pluotsorbet
|
tests/sslEchoServer.py
|
Python
|
gpl-2.0
| 2,382 | 0.00084 |
from pystorm.bolt import BasicBolt
class SentenceSplitterBolt(BasicBolt):
def process(self, tup):
sentence = tup.values[0]
for word in sentence.split(' '):
BasicBolt.emit(word)
if __name__ == '__main__':
SentenceSplitterBolt().run()
|
thedrow/streamparse
|
examples/wordcount/multilang/resources/sentence_splitter.py
|
Python
|
apache-2.0
| 274 | 0 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles request to add a Campaign to a client account."""
__author__ = 'Mark Saniscalchi'
import os
from handlers.api_handler import APIHandler
from handlers.ndb_handler import InitUser
import webapp2
from google.appengine.api import users
from google.appengine.ext.webapp import template
class AddCampaign(webapp2.RequestHandler):
"""View that either adds a Campaign or displays an error message."""
def post(self):
"""Handle post request."""
client_customer_id = self.request.get('clientCustomerId')
campaign_name = self.request.get('campaignName')
ad_channel_type = self.request.get('adChannelType')
budget = self.request.get('budget')
template_values = {
'back_url': '/showCampaigns?clientCustomerId=%s' % client_customer_id,
'back_msg': 'View Campaigns',
'logout_url': users.create_logout_url('/'),
'user_nickname': users.get_current_user().nickname()
}
try:
app_user = InitUser()
# Load Client instance.
handler = APIHandler(app_user.client_id,
app_user.client_secret,
app_user.refresh_token,
app_user.mcc_cid,
app_user.developer_token)
# Create new campaign.
handler.AddCampaign(client_customer_id, campaign_name,
ad_channel_type, budget)
self.redirect('/showCampaigns?clientCustomerId=%s' % client_customer_id)
except Exception, e:
template_values['error'] = str(e)
# Use template to write output to the page.
path = os.path.join(os.path.dirname(__file__),
'../templates/base_template.html')
self.response.out.write(template.render(path, template_values))
|
coxmediagroup/googleads-python-lib
|
examples/adwords/adwords_appengine_demo/views/add_campaign_view.py
|
Python
|
apache-2.0
| 2,387 | 0.005027 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add ti job_id index
Revision ID: 947454bf1dff
Revises: bdaa763e6c56
Create Date: 2017-08-15 15:12:13.845074
"""
# revision identifiers, used by Alembic.
revision = '947454bf1dff'
down_revision = 'bdaa763e6c56'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ti_job_id', 'task_instance', ['job_id'], unique=False)
def downgrade():
op.drop_index('ti_job_id', table_name='task_instance')
|
gilt/incubator-airflow
|
airflow/migrations/versions/947454bf1dff_add_ti_job_id_index.py
|
Python
|
apache-2.0
| 1,041 | 0.001921 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-08 19:59
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Driver',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('is_online', models.BooleanField(default=False)),
('last_seen', models.DateTimeField(blank=True, null=True)),
('auto_back', models.CharField(blank=True, max_length=50, null=True)),
('auto_model', models.CharField(blank=True, max_length=50, null=True)),
('auto_manufacturer', models.CharField(blank=True, max_length=50, null=True)),
],
),
migrations.CreateModel(
name='DriverStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('driver', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='logistics.Driver')),
],
),
migrations.CreateModel(
name='Fleet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(blank=True, null=True)),
('creation_date', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Owner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('is_confirmed', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Trip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(blank=True, null=True)),
('passenger_phone', models.CharField(blank=True, max_length=50, null=True)),
('passenger_name', models.CharField(blank=True, max_length=50, null=True)),
('start_position', models.CharField(blank=True, max_length=50, null=True)),
('end_position', models.CharField(blank=True, max_length=50, null=True)),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField(blank=True, null=True)),
('is_finished', models.BooleanField(default=False)),
('problem', models.IntegerField(choices=[(1, 'none'), (2, 'crash'), (3, 'jam'), (4, 'other')], default=1)),
('problem_description', models.CharField(blank=True, max_length=50, null=True)),
('driver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='logistics.Driver')),
('fleet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='logistics.Fleet')),
],
),
migrations.CreateModel(
name='TripStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('trip', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='logistics.Trip')),
],
),
migrations.AddField(
model_name='fleet',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='logistics.Owner'),
),
migrations.AddField(
model_name='driver',
name='fleets',
field=models.ManyToManyField(blank=True, related_name='fleets', to='logistics.Fleet'),
),
migrations.AddField(
model_name='driver',
name='pending_fleets',
field=models.ManyToManyField(blank=True, related_name='pending_fleets', to='logistics.Fleet'),
),
migrations.AddField(
model_name='driver',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
geo2tag-logistics/Geo2Logistics
|
logistics/migrations/0001_initial.py
|
Python
|
apache-2.0
| 5,033 | 0.004769 |
import sys
def find_motif_locations(dna, motif):
motif_locations = []
if len(dna) < len(motif):
raise ValueError('Motif can\'t be shorter than sequence')
if len(motif) == len(dna) and motif != dna:
return motif_locations
for _ in range(len(dna) - len(motif) + 1):
if dna[_:_ + len(motif)] == motif:
motif_locations.append(_ + 1)
return motif_locations
if __name__ == '__main__':
sequences = open(sys.argv[1]).read().split()
print(
' '.join(
str(_)
for _ in
find_motif_locations(
sequences[0],
sequences[1]
)
)
)
|
afreeorange/rosalind
|
SUBS/motif.py
|
Python
|
mit
| 684 | 0.001462 |
"""
==============
Blob Detection
==============
Blobs are bright on dark or dark on bright regions in an image. In
this example, blobs are detected using 3 algorithms. The image used
in this case is the Hubble eXtreme Deep Field. Each bright dot in the
image is a star or a galaxy.
Laplacian of Gaussian (LoG)
-----------------------------
This is the most accurate and slowest approach. It computes the Laplacian
of Gaussian images with successively increasing standard deviation and
stacks them up in a cube. Blobs are local maximas in this cube. Detecting
larger blobs is especially slower because of larger kernel sizes during
convolution. Only bright blobs on dark backgrounds are detected. See
:py:meth:`skimage.feature.blob_log` for usage.
Difference of Gaussian (DoG)
----------------------------
This is a faster approximation of LoG approach. In this case the image is
blurred with increasing standard deviations and the difference between
two successively blurred images are stacked up in a cube. This method
suffers from the same disadvantage as LoG approach for detecting larger
blobs. Blobs are again assumed to be bright on dark. See
:py:meth:`skimage.feature.blob_dog` for usage.
Determinant of Hessian (DoH)
----------------------------
This is the fastest approach. It detects blobs by finding maximas in the
matrix of the Determinant of Hessian of the image. The detection speed is
independent of the size of blobs as internally the implementation uses
box filters instead of convolutions. Bright on dark as well as dark on
bright blobs are detected. The downside is that small blobs (<3px) are not
detected accurately. See :py:meth:`skimage.feature.blob_doh` for usage.
"""
from math import sqrt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
image = data.hubble_deep_field()[0:500, 0:500]
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()
for idx, (blobs, color, title) in enumerate(sequence):
ax[idx].set_title(title)
ax[idx].imshow(image, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax[idx].add_patch(c)
ax[idx].set_axis_off()
plt.tight_layout()
plt.show()
|
paalge/scikit-image
|
doc/examples/features_detection/plot_blob.py
|
Python
|
bsd-3-clause
| 2,997 | 0 |
__author__ = 'yusaira-khan'
import unittest
import un_iife_ize.un_iife_ize as un_iife_ize
class CheckVar(unittest.TestCase):
def test_simple(self):
statement = [('var hello,world=5;', 0)]
exp = [('hello=undefined,world=5;', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
self.assertEqual(ret, exp)
def test_multiple(self):
statement = [('var hello,world=5;\nvar bye,nope;', 0)]
exp = [('hello=undefined,world=5;', 0), ('bye=undefined,nope=undefined;', 19)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
self.assertEqual(ret, exp)
def test_sections(self):
statement = [('var hello,world=5;\nvar bye,nope;', 0),
('var hello,world=5;\nvar bye,nope;', 30)]
exp = [('hello=undefined,world=5;', 0),
('bye=undefined,nope=undefined;', 19),
('hello=undefined,world=5;', 30),
('bye=undefined,nope=undefined;', 49)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
self.assertEqual(ret, exp)
def test_deliberate_iife(self):
statement = [('var hello=function(){;}', 0)]
exp = [('hello=function(){;}', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
print(ret)
self.assertEqual(ret, exp)
def test_deliberate_iife_barc(self):
statement = [('var hello = (function(){;}())', 0)]
exp = [(' hello = (function(){;}())', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
print(ret, len(exp[0][0]), len(ret[0][0]))
self.assertEqual(ret, exp)
def test_double_assignment(self):
statement = [('var hello=wow=;', 0)]
exp = [('hello=wow=', 0)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.all
print(ret)
self.assertEqual(ret, exp)
def test_inside_function(self):
statement = [('function(a){var hello=5;}', 30)]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.unmodified
print("woadh", ret, v.unmodified)
self.assertEqual(ret, statement)
def test_sections_unmodified(self):
statement = [('var hello,world=5;\nfunction(){}\nvar bye,nope;', 0),
('var hello,world=5;\nvar bye,nope;', 30)]
exp = [('\nfunction(){}\n', 18), ('', len(statement[0][0]) + statement[0][1]),
('\n', 48), ('', len(statement[1][0]) + statement[1][1])]
v = un_iife_ize.Var(statement)
v.extract_all()
ret = v.unmodified
print("ret", ret)
print("expt", exp)
self.assertEqual(ret, exp)
if __name__ == '__main__':
unittest.main()
|
yusaira-khan/un-iife-ize
|
tests/vars.py
|
Python
|
mit
| 2,836 | 0.000705 |
#! /usr/bin/python2.4
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""jdisasm.py: a Java .class file disassembler
by pts@fazekas.hu at Sun Apr 26 20:36:26 CEST 2009
jdisasm can display a Java .class file in a human readable form, showing the
class name, the field names and types, the method names, types and codes
(including instruction memonics). For each item shown, the file offset is
prepended. (Neither javap or jad can display the file offset.)
jdisasm is based on the documentation
http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html
"""
__author__ = 'pts@fazekas.hu'
import struct
import sys
ACCH = {
'PUBLIC': 0x1,
'PRIVATE': 0x2,
'PROTECTED': 0x4,
'STATIC': 0x8,
'FINAL': 0x10,
'SYNCHRONIZED': 0x20, # same as ACC_SUPER
'VOLATILE': 0x40,
'TRANSIENT': 0x80,
'NATIVE': 0x100,
'ABSTRACT': 0x400,
'STRICT': 0x800,
}
TAG_TO_CONSTANT_TYPE = {
7: 'Class_info',
9: 'Fieldref',
10: 'Methodref',
11: 'InterfaceMethodref',
8: 'String',
3: 'Integer',
4: 'Float',
5: 'Long',
6: 'Double',
12: 'NameAndType',
1: 'Utf8',
}
INSTRUCTIONS = {
50: ('aaload', 1),
83: ('aastore', 1),
1: ('aconst_null', 1),
25: ('aload', 2),
42: ('aload_0', 1),
43: ('aload_1', 1),
44: ('aload_2', 1),
45: ('aload_3', 1),
189: ('anewarray', 3),
176: ('areturn', 1),
176: ('areturn', 1),
190: ('arraylength', 1),
190: ('arraylength', 1),
58: ('astore', 2),
75: ('astore_0', 1),
76: ('astore_1', 1),
77: ('astore_2', 1),
78: ('astore_3', 1),
191: ('athrow', 1),
51: ('baload', 1),
84: ('bastore', 1),
16: ('bipush', 2),
52: ('caload', 1),
85: ('castore', 1),
192: ('checkcast', 3),
144: ('d2f', 1),
142: ('d2i', 1),
143: ('d2l', 1),
99: ('dadd', 1),
49: ('daload', 1),
82: ('dastore', 1),
152: ('dcmpg', 1),
151: ('dcmpl', 1),
14: ('dconst_0', 1),
14: ('dconst_0', 1),
111: ('ddiv', 1),
24: ('dload', 2),
24: ('dload', 2),
38: ('dload_0', 1),
39: ('dload_1', 1),
40: ('dload_2', 1),
41: ('dload_3', 1),
107: ('dmul', 1),
119: ('dneg', 1),
115: ('drem', 1),
175: ('dreturn', 1),
57: ('dstore', 2),
71: ('dstore_0', 1),
72: ('dstore_1', 1),
73: ('dstore_2', 1),
74: ('dstore_3', 1),
103: ('dsub', 1),
89: ('dup', 1),
90: ('dup_x1', 1),
91: ('dup_x2', 1),
92: ('dup2', 1),
93: ('dup2_x1', 1),
141: ('f2d', 1),
139: ('f2i', 1),
140: ('f2l', 1),
98: ('fadd', 1),
48: ('faload', 1),
81: ('fastore', 1),
150: ('fcmpg', 1),
149: ('fcmpl', 1),
11: ('fconst_0', 1),
12: ('fconst_1', 1),
13: ('fconst_2', 1),
110: ('fdiv', 1),
23: ('fload', 2),
34: ('fload_0', 1),
35: ('fload_1', 1),
36: ('fload_2', 1),
37: ('fload_3', 1),
106: ('fmul', 1),
118: ('fneg', 1),
114: ('frem', 1),
174: ('freturn', 1),
56: ('fstore', 2),
67: ('fstore_0', 1),
68: ('fstore_1', 1),
69: ('fstore_2', 1),
70: ('fstore_3', 1),
102: ('fsub', 1),
180: ('getfield', 3),
178: ('getstatic', 3),
167: ('goto', 3),
200: ('goto_w', 5),
145: ('i2b', 1),
146: ('i2c', 1),
135: ('i2d', 1),
134: ('i2f', 1),
133: ('i2l', 1),
147: ('i2s', 1),
96: ('iadd', 1),
46: ('iaload', 1),
126: ('iand', 1),
79: ('iastore', 1),
2: ('iconst_m1', 1),
3: ('iconst_0', 1),
4: ('iconst_1', 1),
5: ('iconst_2', 1),
6: ('iconst_3', 1),
7: ('iconst_4', 1),
8: ('iconst_5', 1),
108: ('idiv', 1),
165: ('if_acmpeq', 3),
166: ('if_acmpne', 3),
159: ('if_icmpeq', 3),
160: ('if_icmpne', 3),
161: ('if_icmplt', 3),
162: ('if_icmpge', 3),
163: ('if_icmpgt', 3),
164: ('if_icmple', 3),
153: ('ifeq', 3),
154: ('ifne', 3),
155: ('iflt', 3),
156: ('iffe', 3),
157: ('ifgt', 3),
158: ('ifle', 3),
199: ('ifnonnull', 3),
198: ('ifnull', 3),
132: ('iinc', 3),
21: ('iload', 2),
26: ('iload_0', 1),
27: ('iload_1', 1),
28: ('iload_2', 1),
29: ('iload_3', 1),
104: ('imul', 1),
116: ('ineg', 1),
193: ('instanceof', 3),
185: ('invokeinterface', 5),
183: ('invokespecial', 3),
184: ('invokestatic', 3),
182: ('invokevirtual', 3),
128: ('ior', 1),
112: ('irem', 1),
172: ('ireturn', 1),
120: ('ishl', 1),
122: ('ishr', 1),
54: ('istore', 2),
59: ('istore_0', 1),
60: ('istore_1', 1),
61: ('istore_2', 1),
62: ('istore_3', 1),
100: ('isub', 1),
124: ('iushr', 1),
130: ('ixor', 1),
168: ('jsr', 3),
201: ('jsr_w', 5),
138: ('l2d', 1),
137: ('l2f', 1),
136: ('l2i', 1),
97: ('ladd', 1),
47: ('laload', 1),
127: ('land', 1),
80: ('lastore', 1),
2: ('lconst_m1', 1),
3: ('lconst_0', 1),
4: ('lconst_1', 1),
5: ('lconst_2', 1),
6: ('lconst_3', 1),
7: ('lconst_4', 1),
8: ('lconst_5', 1),
148: ('lcmp', 1),
9: ('lconst_0', 1),
10: ('lconst_1', 1),
18: ('ldc', 2),
19: ('ldc_w', 3),
20: ('ldc2_w', 3),
109: ('ldiv', 1),
22: ('lload', 2),
30: ('lload_0', 1),
31: ('lload_1', 1),
32: ('lload_2', 1),
33: ('lload_3', 1),
105: ('lmul', 1),
117: ('lneg', 1),
171: ('lookupswitch', None), # variable length
129: ('lor', 1),
113: ('lrem', 1),
173: ('lreturn', 1),
121: ('lshl', 1),
123: ('lshr', 1),
55: ('lstore', 2),
63: ('lstore_0', 1),
64: ('lstore_1', 1),
65: ('lstore_2', 1),
66: ('lstore_3', 1),
101: ('lsub', 1),
125: ('lushr', 1),
131: ('lxor', 1),
194: ('monitorenter', 1),
195: ('monitorexit', 1),
197: ('multianewarray', 4),
187: ('new', 3),
188: ('newarray', 2),
0: ('nop', 1),
87: ('pop', 1),
88: ('pop2', 1),
181: ('putfield', 3),
179: ('putstatic', 3),
169: ('ret', 2),
177: ('return', 1),
53: ('saload', 1),
86: ('sastore', 1),
17: ('sipush', 3),
95: ('swap', 1),
170: ('tableswitch', None), # variable length
196: ('wide', None), # variable length, 6 for iinc=132, 4 otherwise
254: ('impdep1', 1),
255: ('impdep2', 1),
202: ('breakpoint', 1),
}
"""Maps an opcode to a (mnemonic, ilength) list.
ilength is the instruction size in bytes, including the opcode.
"""
def FormatAccessFlags(acc, is_class=False):
if not isinstance(acc, int):
raise TypeError
items = []
for name in sorted(ACCH):
if acc & ACCH[name]:
if is_class and name == 'SYNCHRONIZED':
items.append('ACC_SUPER')
else:
items.append('ACC_' + name)
acc &= ~ACCH[name]
if acc:
items.append('0x%x' % acc)
if not items:
items.append(0)
return '|'.join(items)
def DumpCode(s, i, iend, constant_class, constant_utf8,
constant_name_and_type, constant_method_ref,
constant_interface_method_ref):
max_stack, = struct.unpack('>H', s[i : i + 2])
print '0x%08x max_stack=%d' % (i, max_stack)
i += 2
max_locals, = struct.unpack('>H', s[i : i + 2])
print '0x%08x max_locals=%d' % (i, max_locals)
i += 2
code_length, = struct.unpack('>L', s[i : i + 4])
i += 4
code_ofs = i
print '0x%08x code:' % i
j = i
i += code_length
while j < i:
opcode = ord(s[j])
mnemonic, ilength = INSTRUCTIONS[opcode]
if opcode == 185: # invokeinterface
j0 = j
j += 1
interface_method_ref_index, count = struct.unpack('>HB', s[j : j + 3])
j += 4
class_index, name_and_type_index = constant_interface_method_ref[
interface_method_ref_index]
name_index, descriptor_index = constant_name_and_type[
name_and_type_index]
print '0x%08x %s %r %r %r %d' % (
j0, mnemonic, constant_utf8[constant_class[class_index]],
constant_utf8[name_index], constant_utf8[descriptor_index],
count)
elif opcode in (184, 183, 182):
# invokestatic, invokespecial, invokevirtual
j0 = j
j += 1
method_ref_index, = struct.unpack('>H', s[j : j + 2])
j += 2
class_index, name_and_type_index = constant_method_ref[method_ref_index]
name_index, descriptor_index = constant_name_and_type[
name_and_type_index]
print '0x%08x %s %r %r %r' % (
j0, mnemonic, constant_utf8[constant_class[class_index]],
constant_utf8[name_index], constant_utf8[descriptor_index])
elif ilength:
if ilength > 1:
# TODO(pts): Print the arguments propely, using the constant_pool.
print '0x%08x %s %r' % (j, mnemonic, s[j + 1 : j + ilength])
else:
print '0x%08x %s' % (j, mnemonic)
j += ilength
elif opcode == 171: # lookupswitch
# TODO(pts): test this
j0 = j
j += 1
while (j - code_ofs) & 3:
j += 1
default, = struct.unpack('>L', s[j : j + 4])
j += 4
npairs, = struct.unpack('>L', s[j : j + 4])
j += 4
print '0x%08x lookupswitch default=%d pairs=%r' % (
j0, default,
struct.unpack('>%dl' % (npairs << 1), s[j : j + (npairs << 3)]))
j += npairs << 3
elif opcode == 170: # tableswitch
# TODO(pts): test this
j0 = j
j += 1
while (j - code_ofs) & 3:
j += 1
low, = struct.unpack('>L', s[j : j + 4])
j += 4
high, = struct.unpack('>L', s[j : j + 4])
j += 4
noffsets = high - low + 1
print '0x%08x tableswitch low=%d high=%d offsets=%r' % (
j0, low, high,
struct.unpack('>%dl' % noffsets, s[j : j + (noffsets << 2)]))
j += noffsets << 2
elif opcode == 196: # wide
# TODO(pts): test this
subopcode = ord(s[j + 1])
if subopcode == 132: # iinc
ilength = 6
else:
ilength = 4
submnemonic = INSTRUCTIONS[subopcode][1]
print '0x%08x wide %s %r' % (j, submnemonic, s[j + 2 : j + ilength])
j += ilength
else:
assert 0, 'unknown length for opcode %d' % opcode
assert i == j, 'code parse error got=%d expected=%d' % (j, i)
print '0x%08x end-of-code' % i
exception_table_length, = struct.unpack('>H', s[i : i + 2])
print '0x%08x exception_table_length=%d' % (i, exception_table_length)
i += 2
for ei in xrange(exception_table_length):
ei_start_pc, = struct.unpack('>H', s[i : i + 2])
print '0x%08x exception_table[%d].start_pc=%d' % (
i, ei, ei_start_pc)
i += 2
ei_end_pc, = struct.unpack('>H', s[i : i + 2])
print '0x%08x exception_table[%d].end_pc=%d' % (
i, ei, ei_end_pc)
i += 2
ei_handler_pc, = struct.unpack('>H', s[i : i + 2])
print '0x%08x exception_table[%d].handler_pc=%d' % (
i, ei, ei_end_pc)
i += 2
ei_catch_type, = struct.unpack('>H', s[i : i + 2])
print '0x%08x exception_table[%d].catch_type=%d' % (
i, ei, ei_catch_type)
i += 2
attributes_count, = struct.unpack('>H', s[i : i + 2])
print '0x%08x attributes_count=%d' % (i, attributes_count)
i += 2
for ai in xrange(attributes_count):
ai_name_index, = struct.unpack('>H', s[i : i + 2])
print '0x%08x attribute[%d].name=%r' % (
i, ai, constant_utf8[ai_name_index])
i += 2
assert constant_utf8[ai_name_index] != 'Code'
ai_attribute_length, = struct.unpack('>L', s[i : i + 4])
i += 4
ai_info = s[i : i + ai_attribute_length]
print '0x%08x attribute[%d].info=%r' % (
i, ai, ai_info)
i += ai_attribute_length
# TODO(pts): Parse the attribute.
assert i == iend, 'end-of-code-attr expected at %d, len=%d' % (i, iend)
print '0x%08x end-of-code-attr' % i
def ParseClass(file_name, max_lines=0, offset=0):
try:
f = open(file_name)
s = f.read()
finally:
f.close()
i = 0
magic, = struct.unpack('>L', s[i : i + 4])
print '0x%08x magic=0x%08x' % (i, magic)
assert magic == 0xcafebabe
i += 4
major_version, = struct.unpack('>H', s[i : i + 2])
print '0x%08x major_version=%d' % (i, major_version)
i += 2
minor_version, = struct.unpack('>H', s[i : i + 2])
print '0x%08x minor_version=%d' % (i, minor_version)
i += 2
constant_pool_count, = struct.unpack('>H', s[i : i + 2])
print '0x%08x constant_pool_count=%d' % (i, constant_pool_count)
i += 2
# Maps a CONSTANT_Class_info index to a name_index
constant_class = {}
# Maps a CONSTANT_Utf8 index to a string
constant_utf8 = {}
# Maps a CONSTANT_NameAndType index to (name_index, descriptor_index)
constant_name_and_type = {}
# Maps a CONSTANT_Methodref index to (class_index, name_and_type_index)
constant_method_ref = {}
# Maps a CONSTANT_InterfaceMethodref index to
# (class_index, name_and_type_index)
constant_interface_method_ref = {}
# Maps a name to its CONSTANT_utf8 index
name_to_constant_idx = {}
ci = 1
while ci < constant_pool_count:
tag = ord(s[i])
print '0x%08x constant %d tag=%s' % (
i, ci, TAG_TO_CONSTANT_TYPE.get(tag, tag)),
assert tag in TAG_TO_CONSTANT_TYPE
if tag == 7: #CONSTANT_Class_info
i += 1
j, = struct.unpack('>H', s[i : i + 2])
constant_class[ci] = j
print j,
i += 2
elif tag == 9: #CONSTANT_Fieldref
i += 5
elif tag == 10: #CONSTANT_Methodref
i += 1
constant_method_ref[ci] = struct.unpack('>HH', s[i : i + 4])
print constant_method_ref[ci][0], constant_method_ref[ci][1],
i += 4
elif tag == 11: #CONSTANT_InterfaceMethodref
i += 1
constant_interface_method_ref[ci] = struct.unpack('>HH', s[i : i + 4])
print constant_interface_method_ref[ci][0],
print constant_interface_method_ref[ci][1],
i += 4
elif tag == 8: #CONSTANT_String
i += 3
elif tag == 3: #CONSTANT_Integer
i += 5
elif tag == 4: #CONSTANT_Float
i += 5
elif tag == 5: #CONSTANT_Long
i += 9
ci += 1
elif tag == 6: #CONSTANT_Double
i += 9
ci += 1
elif tag == 12: #CONSTANT_NameAndType
i += 1
constant_name_and_type[ci] = struct.unpack('>HH', s[i : i + 4])
print constant_name_and_type[ci][0], constant_name_and_type[ci][1],
i += 4
elif tag == 1: #CONSTANT_Utf8
blen = struct.unpack('>H', s[i + 1 : i + 3])[0]
name = s[i + 3 : i + 3 + blen]
name_to_constant_idx[name] = ci
constant_utf8[ci] = name
print repr(name),
i += 3 + blen
else:
assert 0
print
ci += 1
access_flags, = struct.unpack('>H', s[i : i + 2])
print '0x%08x access_flags=%s' % (
i, FormatAccessFlags(access_flags, is_class=True))
i += 2
this_class, = struct.unpack('>H', s[i : i + 2])
print '0x%08x this_class=%r' % (
i, constant_utf8[constant_class[this_class]])
i += 2
super_class, = struct.unpack('>H', s[i : i + 2])
print '0x%08x super_class=%r' % (
i, constant_utf8[constant_class[super_class]])
i += 2
interfaces_count, = struct.unpack('>H', s[i : i + 2])
print '0x%08x interfaces_count=%d' % (i, interfaces_count)
i += 2
for ii in xrange(interfaces_count):
interface, = struct.unpack('>H', s[i : i + 2])
print '0x%08x interface[%d]=%r' % (
i, ii, constant_utf8[constant_class[interface]])
i += 2
fields_count, = struct.unpack('>H', s[i : i + 2])
print '0x%08x fields_count=%d' % (i, fields_count)
i += 2
for fi in xrange(fields_count):
fi_access_flags, = struct.unpack('>H', s[i : i + 2])
print '0x%08x field[%d].access_flags=%s' % (
i, fi, FormatAccessFlags(fi_access_flags))
i += 2
fi_name_index, = struct.unpack('>H', s[i : i + 2])
print '0x%08x field[%d].name=%r' % (
i, fi, constant_utf8[fi_name_index])
i += 2
fi_descriptor_index, = struct.unpack('>H', s[i : i + 2])
print '0x%08x field[%d].descriptor=%r' % (
i, fi, constant_utf8[fi_descriptor_index])
i += 2
fi_attributes_count, = struct.unpack('>H', s[i : i + 2])
print '0x%08x field[%d].attributes_count=%d' % (i, fi, fi_attributes_count)
i += 2
for ai in xrange(fi_attributes_count):
ai_name_index, = struct.unpack('>H', s[i : i + 2])
print '0x%08x field[%d].attribute[%d].name=%r' % (
i, fi, ai, constant_utf8[ai_name_index])
i += 2
assert constant_utf8[ai_name_index] != 'Code'
ai_attribute_length, = struct.unpack('>L', s[i : i + 4])
i += 4
ai_info = s[i : i + ai_attribute_length]
print '0x%08x field[%d].attribute[%d].info=%r' % (
i, fi, ai, ai_info)
i += ai_attribute_length
# TODO(pts): Parse the attribute.
methods_count, = struct.unpack('>H', s[i : i + 2])
print '0x%08x methods_count=%d' % (i, methods_count)
i += 2
for fi in xrange(methods_count):
fi_access_flags, = struct.unpack('>H', s[i : i + 2])
print '0x%08x method[%d].access_flags=%s' % (
i, fi, FormatAccessFlags(fi_access_flags))
i += 2
fi_name_index, = struct.unpack('>H', s[i : i + 2])
print '0x%08x method[%d].name=%r' % (
i, fi, constant_utf8[fi_name_index])
i += 2
fi_descriptor_index, = struct.unpack('>H', s[i : i + 2])
print '0x%08x method[%d].descriptor=%r' % (
i, fi, constant_utf8[fi_descriptor_index])
i += 2
fi_attributes_count, = struct.unpack('>H', s[i : i + 2])
print '0x%08x method[%d].attributes_count=%d' % (i, fi, fi_attributes_count)
i += 2
for ai in xrange(fi_attributes_count):
ai_name_index, = struct.unpack('>H', s[i : i + 2])
print '0x%08x method[%d].attribute[%d].name=%r' % (
i, fi, ai, constant_utf8[ai_name_index])
i += 2
ai_attribute_length, = struct.unpack('>L', s[i : i + 4])
i += 4
if constant_utf8[ai_name_index] == 'Code':
print '0x%08x method[%d].attribute[%d].code:' % (i, fi, ai)
# TODO(pts): limit s[:ai_info...]
DumpCode(
s, i, i + ai_attribute_length,
constant_class=constant_class, constant_utf8=constant_utf8,
constant_name_and_type=constant_name_and_type,
constant_method_ref=constant_method_ref,
constant_interface_method_ref=constant_interface_method_ref)
else:
ai_info = s[i : i + ai_attribute_length]
print '0x%08x method[%d].attribute[%d].info=%r' % (
i, fi, ai, ai_info)
i += ai_attribute_length
# TODO(pts): Parse the attribute.
attributes_count, = struct.unpack('>H', s[i : i + 2])
print '0x%08x attributes_count=%d' % (i, attributes_count)
i += 2
for ai in xrange(attributes_count):
ai_name_index, = struct.unpack('>H', s[i : i + 2])
print '0x%08x attribute[%d].name=%r' % (
i, ai, constant_utf8[ai_name_index])
i += 2
assert constant_utf8[ai_name_index] != 'Code'
ai_attribute_length, = struct.unpack('>L', s[i : i + 4])
i += 4
ai_info = s[i : i + ai_attribute_length]
print '0x%08x attribute[%d].info=%r' % (
i, ai, ai_info)
i += ai_attribute_length
# TODO(pts): Parse the attribute.
assert i == len(s), 'class EOF expected at %d, len=%d' % (i, len(s))
print '0x%08x EOF' % i
return name_to_constant_idx
if __name__ == '__main__':
if len(sys.argv) != 2:
print >>sys.stderr, 'Usage: %s <file.class>' % sys.argv[0]
sys.exit(1)
ParseClass(file_name=sys.argv[1])
|
JacobJacob/pyew
|
jdisasm.py
|
Python
|
gpl-2.0
| 19,835 | 0.013511 |
import socket
from selectors import DefaultSelector, EVENT_WRITE, EVENT_READ
sock = socket.socket()
sock.setblocking(False)
selector = DefaultSelector()
urls_todo = set(['/'])
seen_urls = set(['/'])
class Fetcher:
def __init__(self, url):
self.response = b''
self.url = url
self.sock = None
def fetch(self):
# This method fetches the url
self.sock = socket.socket()
self.sock.setblocking(False)
try:
self.sock.connect(('xkcd.com'), 80)
except BlockingIOError:
pass
selector.register(self.sock.fileno(),
EVENT_WRITE,
self.connected)
def connected(self, key, mask):
print('connected!')
selector.unregister(key.fd)
request = 'GET {} HTTP/1.0\r\nHost: xkcd.com\r\n\r\n'.format(self.url)
self.sock.send(request.encode('ascii'))
# Register the next callback
selector.register(key.fd,
EVENT_READ,
self.read_response)
def read_response(self, key, mask):
global stopped
chunk = self.sock.recv(4096) # 4K chunks of data
if chunk:
self.response += chunk
else:
selector.unregister(key.fd) # Done reading
links = self.parse_links()
# Set logic
for link in links.difference(seen_urls):
urls_todo.add(link)
Fetcher(link).fetch()
seen_urls.update(links)
urls_todo.remove(self.url)
if not urls_todo
stopped = True
def parse_links(self):
pass
if __name__ == '__main__':
fetcher = Fetcher('/353')
fetcher.fetch()
while not stopped:
events = selector.select()
for event_key, event_mask in events:
callback = event_key.data
callback(event_key, event_mask)
|
GreenJoey/My-Simple-Programs
|
python/500L Async Scraper/simple-url-getter.py
|
Python
|
gpl-2.0
| 1,959 | 0.001021 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains code for loading and preprocessing the MNIST data."""
import os
import tensorflow as tf
slim = tf.contrib.slim
dataset_data_provider = slim.dataset_data_provider
dataset = slim.dataset
queues = slim.queues
gfile = tf.gfile
import demosaic_utils
def make_demosaic(image, height, width, BURST_LENGTH, to_shift, upscale, jitter):
y = tf.random_uniform([1], jitter * upscale, tf.shape(image)[0]-height - jitter*upscale, tf.int32)
x = tf.random_uniform([1], jitter * upscale, tf.shape(image)[1]-width - jitter*upscale, tf.int32)
y, x = y[0], x[0]
demosaic = tf.reshape(image[y:y+height, x:x+width, :], (1, height, width, 1, 3))
delta = tf.random_uniform([BURST_LENGTH-1,2], -jitter*upscale, jitter*upscale+1, tf.int32)
# delta_big = tf.random_uniform([BURST_LENGTH-1,2], -20, 20, tf.int32)
shift_mask = tf.random_uniform([BURST_LENGTH-1, 1], 0., BURST_LENGTH-1., tf.float32) * to_shift
shift_mask = tf.where(shift_mask > BURST_LENGTH-2., tf.ones([BURST_LENGTH-1, 1]), tf.zeros([BURST_LENGTH-1, 1]))
delta = delta # + tf.cast(tf.tile(shift_mask, [1, 2]), tf.int32) * delta_big
shift_mask = tf.reshape(shift_mask, [1, BURST_LENGTH-1])
for d in range(BURST_LENGTH-1):
y_ = (y + delta[d,0]) # % (tf.shape(image)[0]-height)
x_ = (x + delta[d,1]) # % (tf.shape(image)[1]-width)
demosaic2 = tf.reshape(image[y_:y_+height, x_:x_+width, :], (1, height, width, 1, 3))
demosaic = tf.concat([demosaic, demosaic2], axis=3)
return demosaic, shift_mask
def make_stack_demosaic(image, height, width, depth, BURST_LENGTH, to_shift, upscale, jitter):
height = height * upscale
width = width * upscale
v_error = tf.maximum(height + 2 * jitter * upscale - tf.shape(image)[0] + 1, 0)
h_error = tf.maximum(width + 2 * jitter * upscale - tf.shape(image)[1] + 1, 0)
image = tf.pad(image, [[0,v_error],[0,h_error],[0,0]])
demosaic_stack, shift_stack = make_demosaic(image, height, width, BURST_LENGTH, to_shift, upscale, jitter)
for i in range(depth-1):
demosaic, shift_mask = make_demosaic(image, height, width, BURST_LENGTH, to_shift, upscale, jitter)
demosaic_stack = tf.concat((demosaic_stack, demosaic), axis=0)
shift_stack = tf.concat((shift_stack, shift_mask) , axis=0)
dt = tf.reshape(tf.transpose(demosaic_stack, [0, 3, 1, 2, 4]), [-1, height, width, 3])
height = height // upscale
width = width // upscale
dt = tf.image.resize_images(dt, [height, width], method=tf.image.ResizeMethod.AREA)
demosaic_stack = tf.transpose(tf.reshape(dt, [depth, BURST_LENGTH, height, width, 3]), [0, 2, 3, 1, 4])
mosaic = tf.stack((demosaic_stack[:,::2,::2,:,0],demosaic_stack[:,::2,1::2,:,1],demosaic_stack[:,1::2,::2,:,1],demosaic_stack[:,1::2,1::2,:,2]), axis=-1)
mosaic = demosaic_utils.tf22reshape2(mosaic, BURST_LENGTH)
mosaic = tf.reshape(mosaic, (depth, height, width, BURST_LENGTH))
return mosaic, demosaic_stack, shift_stack
def load_batch_demosaic(BURST_LENGTH, dataset_dir, batch_size=32, height=64, width=64, degamma=1., to_shift=1., upscale=1, jitter=1):
filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)]
filename_queue = tf.train.string_input_producer(filenames)
mosaic = None
while mosaic == None:
_, image_file = tf.WholeFileReader().read(filename_queue)
image = tf.image.decode_image(image_file)
mosaic, demosaic, shift = make_stack_demosaic((tf.cast(image[0], tf.float32) / 255.)**degamma,
height, width, 128, BURST_LENGTH, to_shift, upscale, jitter)
# Batch it up.
mosaic, demosaic, shift = tf.train.shuffle_batch(
[mosaic, demosaic, shift],
batch_size=batch_size,
num_threads=2,
capacity=500 + 3 * batch_size,
enqueue_many=True,
min_after_dequeue=100)
return mosaic, demosaic, shift
def make_batch_hqjitter(patches, BURST_LENGTH, batch_size, repeats, height, width,
to_shift, upscale, jitter, smalljitter):
# patches is [BURST_LENGTH, h_up, w_up, 3]
j_up = jitter * upscale
h_up = height * upscale # + 2 * j_up
w_up = width * upscale # + 2 * j_up
bigj_patches = patches
delta_up = (jitter - smalljitter) * upscale
smallj_patches = patches[:, delta_up:-delta_up, delta_up:-delta_up, ...]
unique = batch_size//repeats
batch = []
for i in range(unique):
for j in range(repeats):
curr = [patches[i, j_up:-j_up, j_up:-j_up, :]]
prob = tf.minimum(tf.cast(tf.random_poisson(1.5, []), tf.float32)/BURST_LENGTH, 1.)
for k in range(BURST_LENGTH - 1):
flip = tf.random_uniform([])
p2use = tf.cond(flip < prob, lambda : bigj_patches, lambda : smallj_patches)
curr.append(tf.random_crop(p2use[i, ...], [h_up, w_up, 3]))
curr = tf.stack(curr, axis=0)
curr = tf.image.resize_images(curr, [height, width], method=tf.image.ResizeMethod.AREA)
curr = tf.transpose(curr, [1,2,3,0])
batch.append(curr)
batch = tf.stack(batch, axis=0)
return batch
def make_stack_hqjitter(image, height, width, depth, BURST_LENGTH, to_shift, upscale, jitter):
j_up = jitter * upscale
h_up = height * upscale + 2 * j_up
w_up = width * upscale + 2 * j_up
v_error = tf.maximum((h_up - tf.shape(image)[0] + 1) // 2, 0)
h_error = tf.maximum((w_up - tf.shape(image)[1] + 1) // 2, 0)
image = tf.pad(image, [[v_error, v_error],[h_error,h_error],[0,0]])
stack = []
for i in range(depth):
stack.append(tf.random_crop(image, [h_up, w_up, 3]))
stack = tf.stack(stack, axis=0)
return stack
def load_batch_hqjitter(dataset_dir, patches_per_img=32, min_queue=8, BURST_LENGTH=1, batch_size=32,
repeats=1, height=64, width=64, degamma=1.,
to_shift=1., upscale=1, jitter=1, smalljitter=1):
filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)]
filename_queue = tf.train.string_input_producer(filenames)
_, image_file = tf.WholeFileReader().read(filename_queue)
image = tf.image.decode_image(image_file)
patches = make_stack_hqjitter((tf.cast(image[0], tf.float32) / 255.)**degamma,
height, width, patches_per_img, BURST_LENGTH, to_shift, upscale, jitter)
unique = batch_size//repeats
# Batch it up.
patches = tf.train.shuffle_batch(
[patches],
batch_size=unique,
num_threads=2,
capacity=min_queue + 3 * batch_size,
enqueue_many=True,
min_after_dequeue=min_queue)
print('PATCHES =================',patches.get_shape().as_list())
patches = make_batch_hqjitter(patches, BURST_LENGTH, batch_size, repeats, height, width, to_shift, upscale, jitter, smalljitter)
return patches
def make_noised(image, height, width, sig_range):
y = tf.random_uniform([1], 0, tf.shape(image)[0]-height, tf.int32)
x = tf.random_uniform([1], 0, tf.shape(image)[1]-width, tf.int32)
y, x = y[0], x[0]
noised = tf.reshape(image[y:y+height, x:x+width, :], (1, height, width, 1, 3))
denoised = noised
sig = tf.random_uniform([1], 0, sig_range, tf.float32)
noised = tf.clip_by_value(noised + tf.random_normal(tf.shape(noised),mean=0.,stddev=sig[0]),0.,1.)
return noised, denoised, tf.reshape(sig, [1,1])
def make_stack_noised(image, height, width, depth, sig_range):
v_error = tf.maximum(height - tf.shape(image)[0] + 1, 0)
h_error = tf.maximum(width - tf.shape(image)[1] + 1, 0)
image = tf.pad(image, [[0,v_error],[0,h_error],[0,0]])
noised_stack, denoised_stack, sig_stack = make_noised(image, height, width, sig_range)
for i in range(depth-1):
noised, denoised, sig = make_noised(image, height, width, sig_range)
noised_stack = tf.concat((noised_stack, noised), axis=0)
denoised_stack = tf.concat((denoised_stack, denoised), axis=0)
sig_stack = tf.concat((sig_stack, sig), axis=0)
return noised_stack, denoised_stack, sig_stack
def load_batch_noised(depth, dataset_dir, batch_size=32, height=64, width=64, degamma=1., sig_range=20.):
filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)]
filename_queue = tf.train.string_input_producer(filenames)
noised_stack = None
while noised_stack == None:
_, image_file = tf.WholeFileReader().read(filename_queue)
image = tf.image.decode_image(image_file)
noised_stack, denoised_stack, sig_stack = make_stack_noised((tf.cast(image[0], tf.float32) / 255.)**degamma, height, width, depth, sig_range)
# Batch it up.
noised, denoised, sig = tf.train.shuffle_batch(
[noised_stack, denoised_stack, sig_stack],
batch_size=batch_size,
num_threads=2,
capacity=1024 + 3 * batch_size,
enqueue_many=True,
min_after_dequeue=500)
return noised, denoised, sig
def decode(tfr_features):
burst = tf.decode_raw(tfr_features['burst_raw'], tf.float32)
merged = tf.decode_raw(tfr_features['merge_raw'], tf.float32)
readvar = tf.decode_raw(tfr_features['readvar'], tf.float32)
shotfactor = tf.decode_raw(tfr_features['shotfactor'], tf.float32)
channelgain = tf.decode_raw(tfr_features['channelgain'], tf.float32)
blacklevels = tf.decode_raw(tfr_features['blacklevels'], tf.float32)
depth = tf.cast(tfr_features['depth'], tf.int32) # 0
height = tf.cast(tfr_features['height'], tf.int32) # 1
width = tf.cast(tfr_features['width'], tf.int32) # 2
# depth = width_
# height = depth_
# width = height_
# WIDTH=4032
# HEIGHT=3024
# payload_raw_c = (payload_raw-bl/16) * ch
burst = tf.reshape(burst, (height,width,depth))
sh = tf.shape(burst)
ch = tf.tile(tf.reshape(channelgain, (2,2,1)), (sh[0]/2, sh[1]/2, sh[2]))
bl = tf.tile(tf.reshape(blacklevels, (2,2,1)), (sh[0]/2, sh[1]/2, sh[2]))
burst = (burst - bl/16.) * ch
merged = tf.reshape(merged, (height,width,3)) / 16.
scale = tf.reduce_max(merged)
burst = tf.clip_by_value(burst, 0., scale)
scale = 1024.
burst = burst / scale
merged = merged / scale
readvar = tf.reshape(readvar * channelgain * channelgain, [4]) / scale / scale
shotfactor = tf.reshape(shotfactor * channelgain, [4]) / scale
return burst, merged, readvar, shotfactor
def decode_patches(tfr_features):
burst = tf.decode_raw(tfr_features['burst_raw'], tf.float32)
merged = tf.decode_raw(tfr_features['merge_raw'], tf.float32)
demosaic = tf.decode_raw(tfr_features['demosaic_raw'], tf.float32)
readvar = tf.decode_raw(tfr_features['readvar'], tf.float32)
shotfactor = tf.decode_raw(tfr_features['shotfactor'], tf.float32)
channelgain = tf.decode_raw(tfr_features['channelgain'], tf.float32)
blacklevels = tf.decode_raw(tfr_features['blacklevels'], tf.float32)
depth = tf.cast(tfr_features['depth'], tf.int32) # 0
height = tf.cast(tfr_features['height'], tf.int32) # 1
width = tf.cast(tfr_features['width'], tf.int32) # 2
patches = tf.cast(tfr_features['patches'], tf.int32)
burst = tf.reshape(burst, (patches, height,width,depth))
sh = tf.shape(burst)
ch = tf.tile(tf.reshape(channelgain, (2,2,1)), (sh[1]/2, sh[2]/2, sh[3]))
bl = tf.tile(tf.reshape(blacklevels, (2,2,1)), (sh[1]/2, sh[2]/2, sh[3]))
burst = (burst - bl/16./2**10) * ch
merged = tf.reshape(merged, (patches,height,width))
demosaic = tf.reshape(demosaic, (patches,height,width,3))
demosaic = demosaic
burst = tf.clip_by_value(burst, -10, 1.)
merged = tf.clip_by_value(merged, -10, 1.)
scale = 2.**10
readvar = tf.reshape(readvar, [4]) / scale / scale
shotfactor = tf.reshape(shotfactor, [4]) / scale
return burst, merged, demosaic, readvar, shotfactor
def read_and_decode_single(filename):
e = tf.python_io.tf_record_iterator(filename).next()
features = tf.parse_single_example(e, features={
'readvar': tf.FixedLenFeature([], tf.string),
'shotfactor': tf.FixedLenFeature([], tf.string),
'blacklevels': tf.FixedLenFeature([], tf.string),
'channelgain': tf.FixedLenFeature([], tf.string),
'burst_raw': tf.FixedLenFeature([], tf.string),
'merge_raw': tf.FixedLenFeature([], tf.string),
'depth': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
})
return decode(features)
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'readvar': tf.FixedLenFeature([], tf.string),
'shotfactor': tf.FixedLenFeature([], tf.string),
'blacklevels': tf.FixedLenFeature([], tf.string),
'channelgain': tf.FixedLenFeature([], tf.string),
'burst_raw': tf.FixedLenFeature([], tf.string),
'merge_raw': tf.FixedLenFeature([], tf.string),
'depth': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
})
return decode(features)
def read_and_decode_patches(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'readvar': tf.FixedLenFeature([], tf.string),
'shotfactor': tf.FixedLenFeature([], tf.string),
'blacklevels': tf.FixedLenFeature([], tf.string),
'channelgain': tf.FixedLenFeature([], tf.string),
'burst_raw': tf.FixedLenFeature([], tf.string),
'merge_raw': tf.FixedLenFeature([], tf.string),
'demosaic_raw': tf.FixedLenFeature([], tf.string),
'depth': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'patches': tf.FixedLenFeature([], tf.int64),
})
return decode_patches(features)
def read_and_decode_str(filename_queue):
reader = tf.TFRecordReader()
s, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'readvar': tf.FixedLenFeature([], tf.string),
'shotfactor': tf.FixedLenFeature([], tf.string),
'blacklevels': tf.FixedLenFeature([], tf.string),
'channelgain': tf.FixedLenFeature([], tf.string),
'burst_raw': tf.FixedLenFeature([], tf.string),
'merge_raw': tf.FixedLenFeature([], tf.string),
'depth': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
})
return s, decode(features)
def load_tfrecord(filename):
g = tf.Graph()
with g.as_default():
tf.logging.set_verbosity(tf.logging.INFO)
mosaic, demosaic_truth, readvar, shotfactor = read_and_decode_single(filename)
init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())
with tf.Session() as sess:
sess.run(init_op)
mosaic, demosaic_truth, readvar, shotfactor = \
sess.run([mosaic, demosaic_truth, readvar, shotfactor])
return mosaic, demosaic_truth, readvar, shotfactor
def sample_patch(burst, merged, height, width, burst_length):
y = tf.random_uniform([1], 0, tf.shape(burst)[0]-height, tf.int32)
x = tf.random_uniform([1], 0, tf.shape(burst)[1]-width, tf.int32)
y, x = (y[0]//2)*2, (x[0]//2)*2
mosaic = burst[y:y+height, x:x+width,:burst_length]
demosaic = merged[y:y+height, x:x+width,:]
return mosaic, demosaic
def stackRGB(burst):
burst = tf.stack((burst[:,::2,::2],(burst[:,1::2,::2]+burst[:,::2,1::2])/2,burst[:,1::2,1::2]), axis=-1)
return burst
def burst2patches(burst, merged, height, width, depth, burst_length):
mosaic, demosaic = sample_patch(burst, merged, height, width, burst_length)
mosaic = tf.expand_dims(mosaic, axis=0)
demosaic = tf.expand_dims(demosaic, axis=0)
for i in range(depth-1):
m, d = sample_patch(burst, merged, height, width, burst_length)
m = tf.expand_dims(m, axis=0)
d = tf.expand_dims(d, axis=0)
mosaic = tf.concat((mosaic, m), axis=0)
demosaic = tf.concat((demosaic, d), axis=0)
return mosaic, demosaic
def inputs(filenames, batch_size, height, width, depth, burst_length):
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(filenames)
# Even when reading in multiple threads, share the filename
# queue.
burst, merged, readvar, shotfactor = read_and_decode(filename_queue)
d = tf.shape(burst)[-1]
burst = tf.cond(d > burst_length, lambda: burst[...,:burst_length], lambda: burst)
burst = tf.cond(d < burst_length, lambda: tf.pad(burst, [[0,0],[0,0],[0,burst_length-d]]), lambda: burst)
mosaic, demosaic = burst2patches(burst, merged, height, width, depth, burst_length)
mosaic = tf.reshape(mosaic, [depth, height, width, burst_length])
demosaic = tf.reshape(demosaic, [depth, height, width, 3])
readvar = tf.tile(tf.reshape(readvar, [1, 4]), [depth, 1])
shotfactor = tf.tile(tf.reshape(shotfactor, [1, 4]), [depth, 1])
valid_mask = tf.ones([1,tf.minimum(burst_length,d)])
valid_mask = tf.cond(burst_length > d, lambda : tf.concat([valid_mask,tf.zeros([1,burst_length-d])], axis=-1), lambda : valid_mask)
valid_mask = tf.tile(valid_mask, [depth, 1])
valid_mask = tf.reshape(valid_mask, [depth, burst_length])
mosaic, demosaic, readvar, shotfactor, valid_mask = tf.train.shuffle_batch(
[mosaic, demosaic, readvar, shotfactor, valid_mask],
batch_size=batch_size,
num_threads=2,
capacity=1024 + 3 * batch_size,
enqueue_many=True,
min_after_dequeue=128)
return mosaic, demosaic, readvar, shotfactor, valid_mask
def inputs_patches(filenames, batch_size, burst_length):
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(filenames)
# Even when reading in multiple threads, share the filename
# queue.
burst, merged, demosaic, readvar, shotfactor = read_and_decode_patches(filename_queue)
# d = tf.shape(burst)[-1]
# burst = tf.cond(d > burst_length, lambda: burst[...,:burst_length], lambda: burst)
# burst = tf.cond(d < burst_length, lambda: tf.pad(burst, [[0,0],[0,0],[0,0],[0,burst_length-d]]), lambda: burst)
burst = burst[...,:burst_length]
depth = 16 # tf.shape(burst)[0]
readvar = tf.tile(tf.reshape(readvar, [1, 4]), [depth, 1])
shotfactor = tf.tile(tf.reshape(shotfactor, [1, 4]), [depth, 1])
burst = tf.reshape(burst, [depth, 256, 256, burst_length])
merged = tf.reshape(merged, [depth, 256, 256])
demosaic = tf.reshape(demosaic, [depth, 256, 256, 3])
burst, merged, demosaic, readvar, shotfactor = tf.train.shuffle_batch(
[burst, merged, demosaic, readvar, shotfactor],
batch_size=batch_size,
num_threads=2,
capacity=1000 + 3 * batch_size,
enqueue_many=True,
min_after_dequeue=128)
return burst, merged, demosaic, readvar, shotfactor
def load_test_patches(filenames, burst_length):
with tf.Graph().as_default():
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(filenames, num_epochs=1, shuffle=False)
# Even when reading in multiple threads, share the filename
# queue.
burst, merged, demosaic, readvar, shotfactor = read_and_decode_patches(filename_queue)
burst = burst[...,:burst_length]
depth = 16 # tf.shape(burst)[0]
readvar = tf.tile(tf.reshape(readvar, [1, 4]), [depth, 1])
shotfactor = tf.tile(tf.reshape(shotfactor, [1, 4]), [depth, 1])
burst = tf.reshape(burst, [depth, 256, 256, burst_length])
merged = tf.reshape(merged, [depth, 256, 256])
demosaic = tf.reshape(demosaic, [depth, 256, 256, 3])
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
with queues.QueueRunners(sess):
patches = {}
for i,f in enumerate(filenames):
print 'loading',f,'its', i,'of',len(filenames)
burst_np, merged_np, demosaic_np, readvar_np, shotfactor_np = sess.run([burst, merged, demosaic, readvar, shotfactor])
patches[f] = [burst_np, merged_np, demosaic_np, readvar_np, shotfactor_np]
return patches
|
google/burst-denoising
|
kpn_data_provider.py
|
Python
|
apache-2.0
| 21,051 | 0.018241 |
#!/usr/bin/python
__version__ = '0.0.1'
import pysimplesoap.client
import pysimplesoap.simplexml
from zimbrasoap.soap import soap,admin,mail
|
Secretions/zmdomainexport
|
zimbrasoap/__init__.py
|
Python
|
gpl-2.0
| 144 | 0.013889 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lumos', '0009_proglang_slug'),
]
operations = [
migrations.AddField(
model_name='softskills',
name='slug',
field=models.SlugField(default=''),
preserve_default=False,
),
]
|
darth-dodo/project_lumos
|
lumos/migrations/0010_softskills_slug.py
|
Python
|
mit
| 428 | 0 |
from __future__ import absolute_import
import momoko
from tornado import gen
from psycopg2.extras import RealDictConnection
def initialize_database():
db = momoko.Pool(
dsn='''dbname=nightson user=vswamy password=vswamy host=localhost port=5432''',
size=5,
connection_factory=RealDictConnection,
)
db.connect()
return db
class BaseEntityManager(object):
db = initialize_database()
def __init__(self):
pass
def __init__(self, request):
self.request = request
@gen.coroutine
def execute_sql(self, sql):
''' Executes an sql statement and returns the value '''
cursor = yield BaseEntityManager.db.execute(sql)
raise gen.Return(cursor)
def get_value(self, key):
''' Gets a value given dictionary like arguments'''
params = {}
if(self.request.method == 'GET'):
params = self.request.query_arguments
elif(self.request.method == 'POST'):
params = self.request.body_arguments
elif(self.request.method == 'PUT'):
params = self.request.arguments
elif(self.request.method == 'DELETE'):
params = self.request.body_arguments
if(key not in params):
return None
''' Params will always be of the form key:[values] '''
return params.get(key)[0]
|
vswamy/nightson
|
nightson/managers/base_entity_manager.py
|
Python
|
apache-2.0
| 1,373 | 0.002185 |
from enum import Enum
class EnumContainerStatus(Enum):
running = "running"
halted = "halted"
networkKilled = "networkKilled"
|
zero-os/0-orchestrator
|
pyclient/zeroos/orchestrator/client/EnumContainerStatus.py
|
Python
|
apache-2.0
| 139 | 0 |
#
# This file is part of pysmi software.
#
# Copyright (c) 2015-2016, Ilya Etingof <ilya@glas.net>
# License: http://pysmi.sf.net/license.html
#
import os
import sys
import time
from pysmi.reader.base import AbstractReader
from pysmi.mibinfo import MibInfo
from pysmi.compat import decode
from pysmi import debug
from pysmi import error
class FileReader(AbstractReader):
"""Fetch ASN.1 MIB text by name from local file.
*FileReader* class instance tries to locate ASN.1 MIB files
by name, fetch and return their contents to caller.
"""
useIndexFile = True # optional .index file mapping MIB to file name
indexFile = '.index'
def __init__(self, path, recursive=True, ignoreErrors=True):
"""Create an instance of *FileReader* serving a directory.
Args:
path (str): directory to search MIB files
Keyword Args:
recursive (bool): whether to include subdirectories
ignoreErrors (bool): ignore filesystem access errors
"""
self._path = os.path.normpath(path)
self._recursive = recursive
self._ignoreErrors = ignoreErrors
self._indexLoaded = False
def __str__(self): return '%s{"%s"}' % (self.__class__.__name__, self._path)
def getSubdirs(self, path, recursive=True, ignoreErrors=True):
if not recursive:
return [path]
dirs = [path]
try:
subdirs = os.listdir(path)
except OSError:
if ignoreErrors:
return dirs
else:
raise error.PySmiError('directory %s access error: %s' % (path, sys.exc_info()[1]))
for d in subdirs:
d = os.path.join(decode(path), decode(d))
if os.path.isdir(d):
dirs.extend(self.getSubdirs(d, recursive))
return dirs
def loadIndex(self, indexFile):
mibIndex = {}
if os.path.exists(indexFile):
try:
mibIndex = dict(
[x.split()[:2] for x in open(indexFile).readlines()]
)
debug.logger & debug.flagReader and debug.logger('loaded MIB index map from %s file, %s entries' % (indexFile, len(mibIndex)))
except IOError:
pass
return mibIndex
def getMibVariants(self, mibname):
if self.useIndexFile:
if not self._indexLoaded:
self._mibIndex = self.loadIndex(
os.path.join(self._path, self.indexFile)
)
self._indexLoaded = True
if mibname in self._mibIndex:
debug.logger & debug.flagReader and debug.logger('found %s in MIB index: %s' % (mibname, self._mibIndex[mibname]))
return [(mibname, self._mibIndex[mibname])]
return super(FileReader, self).getMibVariants(mibname)
def getData(self, mibname):
debug.logger & debug.flagReader and debug.logger('%slooking for MIB %s' % (self._recursive and 'recursively ' or '', mibname))
for path in self.getSubdirs(self._path, self._recursive,
self._ignoreErrors):
for mibalias, mibfile in self.getMibVariants(mibname):
f = os.path.join(decode(path), decode(mibfile))
debug.logger & debug.flagReader and debug.logger('trying MIB %s' % f)
if os.path.exists(f) and os.path.isfile(f):
try:
mtime = os.stat(f)[8]
debug.logger & debug.flagReader and debug.logger('source MIB %s mtime is %s, fetching data...' % (f, time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(mtime))))
return MibInfo(path='file://%s' % f, file=mibfile, name=mibalias, mtime=mtime), decode(open(f, mode='rb').read(self.maxMibSize))
except (OSError, IOError):
debug.logger & debug.flagReader and debug.logger('source file %s open failure: %s' % (f, sys.exc_info()[1]))
if not self._ignoreErrors:
raise error.PySmiError('file %s access error: %s' % (f, sys.exc_info()[1]))
raise error.PySmiReaderFileNotModifiedError('source MIB %s is older than needed' % f, reader=self)
raise error.PySmiReaderFileNotFoundError('source MIB %s not found' % mibname, reader=self)
|
spirrello/spirrello-pynet-work
|
applied_python/lib/python2.7/site-packages/pysmi/reader/localfile.py
|
Python
|
gpl-3.0
| 4,425 | 0.003164 |
# -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import xml.parsers.expat
import e3
import logging
class RichWidget(object):
'''a base widget that allows to add formatted text based on a
xhtml subset'''
def put_text(self, text, fg_color=None, bg_color=None, font=None, size=None,
bold=False, italic=False, underline=False, strike=False):
'''insert text at the current position with the style defined by the
optional parameters'''
raise NotImplementedError('Not implemented')
def put_formatted(self, text, fg_color=None, bg_color=None, font=None, size=None,
bold=False, italic=False, underline=False, strike=False):
'''insert text at the current position with the style defined inside
text'''
try:
result = e3.common.XmlParser.XmlParser(
#'<span>' + text.replace('\n', '') + '</span>').result
'<span>' + text + '</span>').result
except xml.parsers.expat.ExpatError:
logging.getLogger("gtkui.RichWidget").debug("cant parse '%s'" % \
(text, ))
return
dct = e3.common.XmlParser.DictObj(result)
self._put_formatted(dct, fg_color, bg_color, font, size,
bold, italic, underline, strike)
def _put_formatted(self, dct, fg_color=None, bg_color=None, font=None, size=None,
bold=False, italic=False, underline=False, strike=False):
'''insert text at the current position with the style defined inside
text, using the parsed structure stored on dct'''
# override the values if defined, keep the old ones if no new defined
bold = dct.tag == 'b' or dct.tag == 'strong' or bold
italic = dct.tag == 'i' or dct.tag == 'em' or italic
underline = dct.tag == 'u' or underline
strike = dct.tag == 's' or strike
if dct.tag == 'span' and dct.style:
style = e3.common.XmlParser.parse_css(dct.style)
font = style.font_family or font
try:
# TODO: handle different units?
size = int(style.font_size) or size
except ValueError:
pass
except TypeError:
pass
fg_color = style.color or fg_color
bg_color = style.background_color or bg_color
if dct.childs is None:
return
for child in dct.childs:
if isinstance(child, basestring):
self.put_text(child, fg_color, bg_color, font, size,
bold, italic, underline, strike)
elif child.tag == 'img':
self.put_image(child.src, child.alt)
elif child.tag == 'br':
self.new_line()
elif child.tag == 'a':
self.put_link(child.href)
else:
self._put_formatted(child, fg_color, bg_color, font, size,
bold, italic, underline, strike)
def put_image(self, path, tip=None):
'''insert an image at the current position
tip it's the alt text on mouse over'''
raise NotImplementedError('Not implemented')
def new_line(self):
'''put a new line on the text'''
raise NotImplementedError('Not implemented')
def put_link(self, link):
'''insert a link at the current position'''
raise NotImplementedError('Not implemented')
|
tiancj/emesene
|
emesene/gui/gtkui/RichWidget.py
|
Python
|
gpl-3.0
| 4,161 | 0.004086 |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('My Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
import tempfile, os
from django import contrib
tempdata = tempfile.mkdtemp()
approot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
adminroot = os.path.join(contrib.__path__[0], 'admin')
DATABASES = {
'default': {
'NAME': os.path.join(tempdata, 'signalqueue-test.db'),
'TEST_NAME': os.path.join(tempdata, 'signalqueue-test.db'),
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
}
}
TIME_ZONE = 'America/New_York'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
MEDIA_ROOT = os.path.join(approot, 'static')
MEDIA_URL = '/face/'
STATIC_ROOT = os.path.join(adminroot, 'static', 'admin')[0]
STATIC_URL = '/staticfiles/'
ADMIN_MEDIA_PREFIX = '/admin-media/'
ROOT_URLCONF = 'signalqueue.settings.urlconf'
TEMPLATE_DIRS = (
os.path.join(approot, 'templates'),
os.path.join(adminroot, 'templates'),
os.path.join(adminroot, 'templates', 'admin'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
#"django.core.context_processors.i18n", this is AMERICA
"django.core.context_processors.media",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django_nose',
'djcelery',
'delegate',
'signalqueue',
)
LOGGING = dict(
version=1,
disable_existing_loggers=False,
formatters={ 'standard': { 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s' }, },
handlers={
'default': { 'level':'DEBUG', 'class':'logging.StreamHandler', 'formatter':'standard', },
'nil': { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', },
},
loggers={
'signalqueue': { 'handlers': ['default'], 'level': 'INFO', 'propagate': False },
},
root={ 'handlers': ['default'], 'level': 'INFO', 'propagate': False },
)
SQ_QUEUES = {
'default': { # you need at least one dict named 'default' in SQ_QUEUES
'ENGINE': 'signalqueue.worker.backends.RedisSetQueue', # required - full path to a QueueBase subclass
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(port=8356),
},
'listqueue': {
'ENGINE': 'signalqueue.worker.backends.RedisQueue',
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(port=8356),
},
'db': {
'ENGINE': 'signalqueue.worker.backends.DatabaseQueueProxy',
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(app_label='signalqueue',
modl_name='EnqueuedSignal'),
},
'celery': {
'ENGINE': 'signalqueue.worker.celeryqueue.CeleryQueue',
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(celery_queue_name='inactive',
transport='redis', port=8356),
},
}
SQ_ADDITIONAL_SIGNALS=['signalqueue.tests']
SQ_WORKER_PORT = 11201
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
try:
from kombu import Queue
except ImportError:
pass
else:
CELERY_DEFAULT_QUEUE = 'default'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_QUEUES = (
Queue('default', routing_key='default.#'),
Queue('yodogg', routing_key='yodogg.#'),
)
CELERY_ALWAYS_EAGER = True
BROKER_URL = 'redis://localhost:8356/0'
BROKER_HOST = "localhost"
BROKER_BACKEND = "redis"
REDIS_PORT = 8356
REDIS_HOST = "localhost"
BROKER_USER = ""
BROKER_PASSWORD = ""
BROKER_VHOST = "0"
REDIS_DB = 0
REDIS_CONNECT_RETRY = True
CELERY_SEND_EVENTS = True
CELERY_RESULT_BACKEND = "redis://localhost:8356/0"
CELERY_TASK_RESULT_EXPIRES = 10
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
try:
import djcelery
except ImportError:
pass
else:
djcelery.setup_loader()
# package path-extension snippet.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
fish2000/django-signalqueue
|
signalqueue/settings/__init__.py
|
Python
|
bsd-3-clause
| 4,884 | 0.006347 |
# -*- coding:utf-8 -*-
'''
fio测试工具执行脚本
'''
import os,shutil,re,time,sys,copy
from test import BaseTest
from lpt.lib.error import *
from lpt.lib import lptxml
from lpt.lib import lptlog
from lpt.lib.share import utils
from lpt.lib import lptreport
class TestControl(BaseTest):
'''
继承BaseTest属性和方法
'''
def __init__(self, jobs_xml, job_node, tool, tarball='fio-2.1.10.tar.bz2'):
super(TestControl, self).__init__(jobs_xml, job_node, tool, tarball)
def setup(self):
'''编译源码,设置程序
'''
if not self.check_bin(self.processBin):
self.tar_src_dir = self.extract_bar()
os.chdir(self.tar_src_dir)
self.compile(configure_status=True, make_status=True)
os.chdir(self.lpt_root)
def run(self):
tool_node = self.check_tool_result_node()
self.config_file = os.path.join(self.tar_src_dir, self.get_config_value(tool_node, "config_file", "./fio-mixed.job", valueType=str))
lptlog.info("使用配置文件: %s" % self.config_file)
self.result_tmp_file = os.path.join(self.tmp_dir, "fio_output")
self.filesize = self.get_config_value(tool_node, "filesize", "100M", valueType=str)
lptlog.info("测试读写文件大小: %s" % self.filesize)
f = open(self.config_file,'r')
lines = f.read()
f.close()
f = open(self.config_file,'w')
lines = re.sub('size=(\d+)M', 'size=%s'%self.filesize, lines)
f.write(lines)
f.close()
self.mainParameters["parameters"] = "./fio --output %s %s"%(self.result_tmp_file, self.config_file)
lptlog.info("----------开始测试")
os.chdir(self.tar_src_dir)
utils.system("./fio --output %s %s"%(self.result_tmp_file, self.config_file))
def create_result(self):
lptlog.info("----------创建结果")
self.result_list = self.__match_index(self.result_tmp_file)
def __match_index(self, file):
if not os.path.isfile(file):
return []
lptlog.debug("在%s中搜索测试指标" % file)
results_lines = utils.read_all_lines(file)
labels = ('io', 'aggrb', 'minb', 'maxb', 'mint','maxt')
parallel_template = {'parallels': '1,2,3,4', 'parallel': '1', 'iter': '1', 'times': '2'}
result_list = []
count = 0
for line in results_lines:
if 'READ:' in line:
tmp_list = []
parallel_dict = copy.deepcopy(parallel_template)
parallel_dict['parallel'] = str(count / 2 + 1)
parallel_dict['iter'] = 'READ'
tmp_list.append(parallel_dict)
tmp_list.append(self.dict_generator(labels,line))
result_list.append(tmp_list)
count = count + 1
elif 'WRITE:' in line:
tmp_list = []
parallel_dict = copy.deepcopy(parallel_template)
parallel_dict['parallel'] = str(count / 2 + 1)
parallel_dict['iter'] = 'WRITE'
tmp_list.append(parallel_dict)
tmp_list.append(self.dict_generator(labels,line))
result_list.append(tmp_list)
count = count + 1
if count in [2,4,6,8]:
tmp_list = []
dict2 = result_list[-1][1]
dict1 = result_list[-2][1]
parallel_dict = copy.deepcopy(parallel_template)
parallel_dict['parallel'] = str(count / 2)
parallel_dict['iter'] = 'Average'
tmp_list.append(parallel_dict)
tmp_list.append(self.dict_average(dict1, dict2))
result_list.append(tmp_list)
return result_list
def dict_generator(self, labels, line):
result_dict = {}
line = line.replace(',','')
line = line.split()
for l,v in zip(labels, (line[1].split('=')[1][:-2], line[2].split('=')[1][:-4], line[3].split('=')[1][:-4], line[4].split('=')[1][:-4], line[5].split('=')[1][:-4], line[6].split('=')[1][:-4])):
result_dict[l] = "%s" % v
return result_dict
def dict_average(self, dict1, dict2):
result_dict = {}
for k,v in dict1.items():
try:
result_dict[k] = str((float(dict1[k]) * 0.33 + float(dict2[k]) * 0.67))
except e:
raise e
sys.exit()
return result_dict
|
ShaolongHu/lpts
|
tests/fio.py
|
Python
|
gpl-2.0
| 4,566 | 0.007627 |
from __future__ import division
import sys
if sys.version_info.major > 2:
from importlib import reload
from PySide import QtGui, QtCore
import traceback
import numpy as np
import FreeCAD as App
import FreeCADGui as Gui
from ._tools import BaseTool, input_field, text_field
from ._glider import draw_glider, draw_lines
from .pivy_primitives_new import vector3D
from .pivy_primitives_new import InteractionSeparator, Object3D, Arrow
from .pivy_primitives_new import Line as _Line
from .pivy_primitives_new import Marker as _Marker
from .pivy_primitives_new import coin
from openglider.glider.parametric.lines import UpperNode2D, LowerNode2D, \
BatchNode2D, Line2D, LineSet2D
from openglider.lines.line_types import LineType
import numpy as np
def refresh():
pass
class Line(_Line):
def set_disabled(self):
super(Line, self).set_disabled()
points = np.array(self.points)
points.T[2] = -1
self.points = points
def set_enabled(self):
super(Line, self).set_enabled()
points = np.array(self.points)
points.T[2] = 0
self.points = points
class Marker(_Marker):
def set_disabled(self):
super(Marker, self).set_disabled()
points = np.array(self.points)
points.T[2] = -1
self.points = points
def set_enabled(self):
super(Marker, self).set_enabled()
points = np.array(self.points)
points.T[2] = 0
self.points = points
class LineContainer(InteractionSeparator):
def Select(self, obj, multi=False):
if not multi:
for o in self.selected_objects:
o.unselect()
self.selected_objects = []
if obj:
if obj in self.selected_objects:
self.selected_objects.remove(obj)
elif obj.enabled:
self.selected_objects.append(obj)
self.ColorSelected()
self.selection_changed()
def select_all_cb(self, event_callback):
event = event_callback.getEvent()
if (event.getKey() == ord('a')):
if event.getState() == event.DOWN:
if self.selected_objects:
for o in self.selected_objects:
o.unselect()
self.selected_objects = []
else:
for obj in self.objects:
if obj.dynamic and obj.enabled:
self.selected_objects.append(obj)
self.ColorSelected()
self.selection_changed()
# all line info goes into the tool.
# the lineset will be totally reloaded after the tool work is ready
# if an error occurs nothing will happen
# 1: create markers from existing lineset
# 2: create lines from existing lineset
# 3: eventhandler for adding and connecting lines
class LineTool(BaseTool):
widget_name = 'Line Tool'
def __init__(self, obj):
super(LineTool, self).__init__(obj)
# get the parametric shape
_shape = self.parametric_glider.shape.get_half_shape()
self.ribs = _shape.ribs
self.front = _shape.front
self.back = _shape.back
self.xpos = self.parametric_glider.shape.rib_x_values
self.disabled_color = (0.5, 0.5, 0.5)
# setup the GUI
self.setup_widget()
self.setup_pivy()
def setup_pivy(self):
# pivy helper line
self.helper_line = coin.SoSeparator()
self.temp_point = coin.SoSeparator()
# pivy lines, points, shape
self.shape = LineContainer()
self.shape.selection_changed = self.selection_changed
self.shape.setName('shape')
self.shape.register(self.view)
self.task_separator += [self.shape, self.helper_line]
self.task_separator += [self.temp_point]
self.draw_shape()
self.update_layer_selection()
self.update_helper_line()
self.setup_cb()
def setup_widget(self):
# qt helper line
self.Qhl_pos = QtGui.QDoubleSpinBox()
# qt element widget
self.tool_widget = QtGui.QStackedWidget()
self.tool_layout = QtGui.QFormLayout(self.tool_widget)
# qt layer widget
self.layer_widget = QtGui.QWidget()
self.layer_layout = QtGui.QFormLayout(self.layer_widget)
self.layer_selection = LayerComboBox(self.layer_widget)
self.layer_combobox = LayerComboBox(self.layer_widget)
self.layer_color_button = QtGui.QPushButton('select color')
self.layer_color_dialog = QtGui.QColorDialog()
self.tool_widget.setWindowTitle('object properties')
self.layer_widget.setWindowTitle('layers')
self.form.append(self.layer_widget)
self.form.append(self.tool_widget)
# temp_wid = QtGui.QWidget()
# temp_lay = QtGui.QHBoxLayout(temp_wid)
# self.layout.setWidget(1, input_field, temp_wid)
self.none_widget = QtGui.QWidget()
self.line_widget = QtGui.QWidget()
self.lw_att_wid = QtGui.QWidget()
self.up_att_wid = QtGui.QWidget()
self.Qline_list = QtGui.QListWidget()
for _type in LineType.types.values():
self.Qline_list.addItem(QLineType_item(_type))
self.Qline_list.sortItems()
self.up_att_lay = QtGui.QFormLayout(self.up_att_wid)
self.lw_att_lay = QtGui.QFormLayout(self.lw_att_wid)
self.line_layout = QtGui.QFormLayout(self.line_widget)
self.none_layout = QtGui.QFormLayout(self.none_widget)
self.target_length = QtGui.QDoubleSpinBox()
self.target_length.setDecimals(5)
self.line_layout.setWidget(
0, text_field, QtGui.QLabel('target length: '))
self.line_layout.setWidget(0, input_field, self.target_length)
self.line_layout.setWidget(1, text_field, QtGui.QLabel('line type: '))
self.line_layout.setWidget(1, input_field, self.Qline_list)
self.target_length.valueChanged.connect(self.update_target_length)
self.Qline_list.currentItemChanged.connect(self.update_line_type)
self.QLineName = QtGui.QLineEdit()
self.line_layout.setWidget(2, text_field, QtGui.QLabel('name'))
self.line_layout.setWidget(2, input_field, self.QLineName)
self.QLineName.textChanged.connect(self.line_name_changed)
self.attach_x_val = QtGui.QDoubleSpinBox()
self.attach_y_val = QtGui.QDoubleSpinBox()
self.attach_z_val = QtGui.QDoubleSpinBox()
for spinbox in [
self.attach_x_val, self.attach_y_val, self.attach_z_val]:
spinbox.setMaximum(10.)
spinbox.setMinimum(-10.)
spinbox.valueChanged.connect(self.update_lw_att_pos)
self.lw_att_lay.addWidget(self.attach_x_val)
self.lw_att_lay.addWidget(self.attach_y_val)
self.lw_att_lay.addWidget(self.attach_z_val)
self.up_att_force = QtGui.QDoubleSpinBox()
self.up_att_force.setSingleStep(0.1)
self.up_att_lay.setWidget(0, text_field, QtGui.QLabel('force'))
self.up_att_lay.setWidget(0, input_field, self.up_att_force)
self.up_att_force.valueChanged.connect(self.update_up_att_force)
self.up_att_rib = QtGui.QSpinBox()
self.up_att_rib.setMinimum(0)
self.up_att_rib.setMaximum(self.parametric_glider.shape.half_rib_num - 1)
self.up_att_lay.setWidget(1, text_field, QtGui.QLabel('rib nr'))
self.up_att_lay.setWidget(1, input_field, self.up_att_rib)
self.up_att_rib.valueChanged.connect(self.update_up_att_rib)
self.up_att_pos = QtGui.QDoubleSpinBox()
self.up_att_pos.setMinimum(0)
self.up_att_pos.setMaximum(1)
self.up_att_pos.setSingleStep(0.01)
self.up_att_lay.setWidget(2, text_field, QtGui.QLabel('position'))
self.up_att_lay.setWidget(2, input_field, self.up_att_pos)
self.up_att_pos.valueChanged.connect(self.update_up_att_pos)
self.tool_widget.addWidget(self.none_widget)
self.tool_widget.addWidget(self.line_widget)
self.tool_widget.addWidget(self.lw_att_wid)
self.tool_widget.addWidget(self.up_att_wid)
self.tool_widget.setCurrentWidget(self.none_widget)
button = QtGui.QPushButton('Help')
self.layout.setWidget(0, input_field, button)
button.clicked.connect(self.show_help)
self.Qhl_pos.setValue(50)
self.Qhl_pos.setRange(0, 100)
self.Qhl_pos.setSingleStep(1)
self.Qhl_pos.connect(
self.Qhl_pos,
QtCore.SIGNAL('valueChanged(double)'),
self.update_helper_line)
self.layout.setWidget(1, text_field, QtGui.QLabel('helper_line_pos'))
self.layout.setWidget(1, input_field, self.Qhl_pos)
# layers:
add_button = QtGui.QPushButton('add layer')
del_button = QtGui.QPushButton('delete layer')
self.layer_layout.setWidget(
0, text_field, QtGui.QLabel('work on layer'))
self.layer_layout.setWidget(0, input_field, self.layer_combobox)
self.layer_layout.setWidget(1, text_field, add_button)
self.layer_layout.setWidget(1, input_field, del_button)
self.layer_layout.setWidget(2, text_field, QtGui.QLabel('setLayer'))
self.layer_layout.setWidget(2, input_field, self.layer_selection)
self.layer_layout.setWidget(3, text_field, QtGui.QLabel('select color of disabled lines'))
self.layer_layout.setWidget(3, input_field, self.layer_color_button)
# dialogs
self.add_layer_dialog = QtGui.QInputDialog()
add_button.clicked.connect(self.add_new_layer)
del_button.clicked.connect(self.delete_layer)
self.layer_combobox.currentIndexChanged.connect(self.show_layer)
self.layer_selection.activated.connect(self.set_layer_by_current)
self.layer_selection.setEnabled(False)
self.layer_color_button.clicked.connect(self.layer_color_dialog.open)
self.layer_color_dialog.accepted.connect(self.color_changed)
def color_changed(self):
color = self.layer_color_dialog.currentColor().getRgbF()[:-1]
for obj in self.shape.objects:
obj.disabled_col = color
if not obj.enabled:
obj.set_disabled()
def line_name_changed(self, name):
self.shape.selected_objects[0].name = name
def add_new_layer(self):
self.add_layer_dialog.exec_()
text = self.add_layer_dialog.textValue()
self.layer_combobox.addItem(text)
index = self.layer_combobox.findText(text)
self.layer_combobox.setCurrentIndex(index)
self.set_layer(text=text)
self.show_layer()
self.update_layer_selection()
self.layer_combobox.model().sort(0)
self.layer_selection.model().sort(0)
def delete_layer(self):
current_layer = self.layer_combobox.currentText()
self.set_layer(text='', objects=self.shape.objects,
from_layer=current_layer)
self.layer_combobox.removeItem(self.layer_combobox.currentIndex())
self.update_layer_selection()
self.show_layer()
self.update_layer_selection()
def update_layer_selection(self):
self.layer_selection.getAllItems(self.layer_combobox)
self.selection_changed()
def set_layer_by_current(self):
self.set_layer()
self.show_layer()
def set_layer(self, text=None, objects=None, from_layer=None):
text = text or self.layer_selection.currentText()
objects = objects or self.shape.selected_objects
for obj in objects:
if hasattr(obj, 'layer'):
if from_layer is None or from_layer == obj.layer:
obj.layer = text
def show_layer(self):
self.shape.deselect_all()
for obj in self.shape.objects:
if hasattr(obj, 'layer'):
if self.layer_combobox.currentText() == '':
if not obj.enabled:
obj.set_enabled()
elif obj.layer != self.layer_combobox.currentText():
if obj.enabled:
obj.set_disabled()
else:
if not obj.enabled:
obj.set_enabled()
def show_help(self):
App.Console.PrintMessage('Use this commands to rule the lineinput\n')
App.Console.PrintMessage('g...grap element and move it\n')
App.Console.PrintMessage('l...create line from 2 points\n')
App.Console.PrintMessage('v...add a new point\n')
App.Console.PrintMessage('x...delete a point or a line\n')
App.Console.PrintMessage('cltr + p...attachment point\n')
App.Console.PrintMessage('cltr...multiselection\n')
def setup_cb(self):
self.point_preview_cb = self.view.addEventCallbackPivy(
coin.SoLocation2Event.getClassTypeId(), self.point_preview)
self.line_cb = self.view.addEventCallbackPivy(
coin.SoKeyboardEvent.getClassTypeId(), self.add_line)
self.node_cb = self.view.addEventCallbackPivy(
coin.SoKeyboardEvent.getClassTypeId(), self.add_node)
def remove_all_callbacks(self):
if self.point_preview_cb:
self.view.removeEventCallbackPivy(coin.SoLocation2Event.getClassTypeId(), self.point_preview_cb)
if self.line_cb:
self.view.removeEventCallbackPivy(coin.SoKeyboardEvent.getClassTypeId(), self.line_cb)
if self.node_cb:
self.view.removeEventCallbackPivy(coin.SoKeyboardEvent.getClassTypeId(), self.node_cb)
def update_helper_line(self, pos=50):
self.helper_line.removeAllChildren()
l = Line(vector3D(self.help_line(pos / 100)), dynamic=False)
l.set_color('red')
self.helper_line += [l]
def help_line(self, pos=0.5):
return [fr + pos * (ba - fr) for fr, ba in np.array(self.ribs)]
def point_preview(self, event_callback, force=False):
event = event_callback.getEvent()
self.temp_point.removeAllChildren()
if type(event) == coin.SoLocation2Event or force:
self.upper_preview_node = None
pos = event.getPosition()
check_points = [i.tolist() for i in self.help_line(self.Qhl_pos.value() / 100)]
for i, point in enumerate(check_points):
s = self.view.getPointOnScreen(point[0], point[1], 0.)
if (abs(s[0] - pos[0]) ** 2 + abs(s[1] - pos[1]) ** 2) < (15 ** 2) and point[0] >= 0:
self.upper_preview_node = (point, i)
m = Marker(vector3D([point]), dynamic=False)
m.set_color('blue')
self.temp_point += [m]
break
def add_line(self, event_callback):
event = event_callback.getEvent()
if (event.getKey() == ord('l') and
event.getState() == 1):
objs = self.shape.selected_objects
if len(objs) == 2:
if (isinstance(objs[0], NodeMarker) and
isinstance(objs[1], NodeMarker)):
line = ConnectionLine(objs[0], objs[1])
line.layer = self.layer_combobox.currentText()
self.shape += [line]
elif len(objs) == 1:
if (isinstance(objs[0], NodeMarker)):
marker2 = self.node_cb(event_callback, force=True)
if marker2:
line = ConnectionLine(objs[0], marker2)
self.shape += [line]
self.shape.Select(marker2)
self.shape.selection_changed()
line.layer = self.layer_combobox.currentText()
def add_node(self, event_callback, force=False):
event = event_callback.getEvent()
if ((event.getKey() == ord('i') or force) and
(event.getState() == 1)):
objs = self.shape.selected_objects
if len(objs) == 1 and (isinstance(objs[0], Lower_Att_Marker)):
node = objs[0].node
point = Lower_Att_Marker(node, self.parametric_glider)
point.layer = self.layer_combobox.currentText()
self.shape += [point]
self.shape.Select(point)
self.shape.grab_cb(event_callback, force=True)
elif self.upper_preview_node:
self.add_attachment_point(self.upper_preview_node[0])
else:
pos = event.getPosition()
pos_3D = list(self.view.getPoint(*pos))
pos_3D[-1] = 0.
if event.wasCtrlDown():
node = LowerNode2D(pos_3D[:-1], [0, 0, 0])
point = Lower_Att_Marker(node, self.parametric_glider)
point.layer = self.layer_combobox.currentText()
else:
node = BatchNode2D(pos_3D[:-1])
point = NodeMarker(node, self.parametric_glider)
point.layer = self.layer_combobox.currentText()
self.shape += [point]
return point
def copy_node(self, event_callback, force=False):
event = event_callback.getEvent()
if ((event.getKey() == ord('c')) and
(event.getState() == 1)):
# get selection
objs = self.shape.selected_objects
if len(objs) == 1 and (isinstance(objs[0], Upper_Att_Marker)):
node = objs[0].node
ap = Upper_Att_Marker(node, self.parametric_glider)
ap.layer = self.layer_combobox.currentText()
self.shape += [ap]
def add_attachment_point(self, pos):
x, y = pos
rib_nr = self.xpos.index(x)
pos = float(self.Qhl_pos.value())
node = UpperNode2D(rib_nr, pos / 100)
node_pos = node.get_2D(self.parametric_glider.shape)
ap = Upper_Att_Marker(node, self.parametric_glider)
ap.layer = self.layer_combobox.currentText()
self.shape += [ap]
def selection_changed(self):
# je nach dem welches widget grad selektiert ist
# soll er ein widget einblenden.
# wenn mehrere elemente oder keinen ausgewaehlt ist dann nichts auswaehlen
def show_line_widget(objs):
for obj in objs:
if not (isinstance(obj, ConnectionLine)):
return False
return True
def has_uppermost_line(objs):
for obj in objs:
if obj.is_uppermost_line():
return True
return False
def show_upper_att_widget(objs):
for obj in objs:
if not isinstance(obj, Upper_Att_Marker):
return False
return True
def show_lower_att_widget(objs):
for obj in objs:
if not isinstance(obj, Lower_Att_Marker):
return False
return True
selected_objs = self.shape.selected_objects
if selected_objs:
self.layer_selection.setEnabled(True)
self.target_length.setEnabled(True)
self.layer_selection.setItemByText(selected_objs[0].layer)
# self.layer_combobox.blockSignals(True)
# self.layer_combobox.setItemByText(selected_objs[0].layer)
# self.layer_combobox.blockSignals(False)
if show_line_widget(selected_objs):
self.tool_widget.setCurrentWidget(self.line_widget)
if has_uppermost_line(selected_objs):
self.target_length.setEnabled(False)
else:
self.target_length.setValue(selected_objs[0].target_length)
line_type_item = self.Qline_list.findItems(
selected_objs[0].line_type, QtCore.Qt.MatchExactly)[0]
self.Qline_list.setCurrentItem(line_type_item)
if len(selected_objs) != 1:
self.QLineName.setDisabled(True)
else:
self.QLineName.blockSignals(True)
self.QLineName.setText(selected_objs[0].name)
self.QLineName.blockSignals(False)
self.QLineName.setEnabled(True)
elif show_lower_att_widget(selected_objs):
self.tool_widget.setCurrentWidget(self.lw_att_wid)
x, y, z = selected_objs[0].pos_3D
self.attach_x_val.setValue(x)
self.attach_y_val.setValue(y)
self.attach_z_val.setValue(z)
elif show_upper_att_widget(selected_objs):
self.tool_widget.setCurrentWidget(self.up_att_wid)
self.up_att_force.setValue(selected_objs[0].force)
rib_nr = set([i.rib_nr for i in selected_objs])
if len(rib_nr) > 1:
self.up_att_rib.setDisabled(True)
else:
self.up_att_rib.setValue(list(rib_nr)[0])
self.up_att_rib.setEnabled(True)
pos = selected_objs[0].rib_pos
self.up_att_pos.setValue(pos)
self.up_att_pos.setEnabled(True)
else:
self.tool_widget.setCurrentWidget(self.none_widget)
else:
self.tool_widget.setCurrentWidget(self.none_widget)
self.layer_selection.setEnabled(False)
def update_target_length(self, *args):
l = float(self.target_length.value())
for obj in self.shape.selected_objects:
obj.target_length = l
def update_line_type(self, *args):
for obj in self.shape.selected_objects:
obj.line_type = self.Qline_list.currentItem().line_type.name
def update_lw_att_pos(self, *args):
x = self.attach_x_val.value()
y = self.attach_y_val.value()
z = self.attach_z_val.value()
for obj in self.shape.selected_objects:
obj.pos_3D = [x, y, z]
def update_up_att_force(self, *args):
for obj in self.shape.selected_objects:
obj.force = self.up_att_force.value()
def update_up_att_rib(self, *args):
for obj in self.shape.selected_objects:
obj.rib_nr = self.up_att_rib.value()
def update_up_att_pos(self, *args):
# print('update pos')
for obj in self.shape.selected_objects:
obj.rib_pos = self.up_att_pos.value()
def draw_shape(self):
self.shape.removeAllChildren()
self.shape += [Line(vector3D(self.front)), Line(vector3D(self.back))]
self.shape += list(map(Line, vector3D(self.ribs)))
shape = self.parametric_glider.shape
# make own seperator for shape
nodes = {}
for node in self.parametric_glider.lineset.nodes:
if isinstance(node, UpperNode2D):
# coord = self.parametric_glider.shape_point(node.rib_no, node.position/100)
pos = node.get_2D(self.parametric_glider.shape)
obj = Upper_Att_Marker(node, self.parametric_glider)
obj.force = node.force
self.shape += [obj]
elif isinstance(node, BatchNode2D):
obj = NodeMarker(node, self.parametric_glider)
self.shape += [obj]
elif isinstance(node, LowerNode2D):
obj = Lower_Att_Marker(node, self.parametric_glider)
obj.pos_3D = node.pos_3D
obj._node = node
self.shape += [obj]
nodes[node] = obj
self.layer_combobox.addItem(node.layer)
for line in self.parametric_glider.lineset.lines:
m1 = nodes[line.lower_node]
m2 = nodes[line.upper_node]
obj = ConnectionLine(m1, m2)
obj.line_type = line.line_type.name
obj.target_length = line.target_length
obj.name = line.name
obj.layer = line.layer
self.shape += [obj]
self.layer_combobox.addItem(line.layer)
self.layer_combobox.model().sort(0)
self.layer_selection.model().sort(0)
self.show_layer()
def accept(self):
'''glider 2d will recive the 2d information
the attachmentpoints are already stored.
the other points are stored into the batch_points list
'''
lines = []
for obj in self.shape.objects:
if isinstance(obj, ConnectionLine):
l = Line2D(obj.marker1.node, obj.marker2.node)
if not obj.is_uppermost_line():
l.target_length = obj.target_length
l.line_type = LineType.types[obj.line_type]
l.layer = obj.layer
l.name = obj.name
lines.append(l)
if isinstance(l.upper_node, UpperNode2D):
l.upper_node.name = obj.name
lineset = self.parametric_glider.lineset
try:
new_lines = LineSet2D(lines)
self.parametric_glider.lineset = new_lines
self.parametric_glider.get_glider_3d(self.obj.Proxy.getGliderInstance())
except Exception as e:
App.Console.PrintError(traceback.format_exc())
self.parametric_glider.lineset = lineset
self.parametric_glider.get_glider_3d(self.obj.Proxy.getGliderInstance())
return
self.shape.unregister()
self.remove_all_callbacks()
super(LineTool, self).accept()
self.update_view_glider()
def reject(self):
self.shape.unregister()
self.remove_all_callbacks()
super(LineTool, self).reject()
class NodeMarker(Marker):
std_col = 'black'
ovr_col = 'red'
sel_col = 'yellow'
def __init__(self, node, par_glider, pos=None):
if pos is None:
pos = node.get_2D(par_glider.shape)
pos = vector3D(pos)
super(NodeMarker, self).__init__([pos], dynamic=True)
self._node = node
self.par_glider = par_glider
@property
def node(self):
self._node.pos_2D = list(self.pos)[:-1]
return self._node
@property
def pos(self):
return list(self.points[0])
@pos.setter
def pos(self, pos):
self.points = [pos]
@property
def layer(self):
return self._node.layer
@layer.setter
def layer(self, layer):
self._node.layer = layer
class Upper_Att_Marker(NodeMarker):
std_col = 'blue'
def __init__(self, node, par_glider):
super(Upper_Att_Marker, self).__init__(node, par_glider)
@property
def force(self):
return self._node.force
@force.setter
def force(self, value):
self._node.force = value
@property
def rib_nr(self):
return self._node.cell_no
@rib_nr.setter
def rib_nr(self, nr):
self._node.cell_no = nr
self.pos = vector3D(self._node.get_2D(self.par_glider.shape))
for foo in self.on_drag:
foo()
@property
def rib_pos(self):
return self._node.rib_pos
@rib_pos.setter
def rib_pos(self, pos):
self._node.rib_pos = pos
# print('update pos')
self.pos = vector3D(self._node.get_2D(self.par_glider.shape))
for foo in self.on_drag:
foo()
def drag(self, *arg):
pass
class Lower_Att_Marker(NodeMarker):
std_col = 'green'
def __init__(self, node, par_glider):
pos = node.pos_2D
super(Lower_Att_Marker, self).__init__(node, par_glider)
@property
def pos_3D(self):
return self._node.pos_3D
@pos_3D.setter
def pos_3D(self, value):
self._node.pos_3D = value
@property
def pos(self):
return list(self.points[0])
@pos.setter
def pos(self, pos):
self.points = [pos]
class ConnectionLine(Line):
def __init__(self, marker1, marker2):
super(ConnectionLine, self).__init__([marker1.pos, marker2.pos], dynamic=True)
self.marker1 = marker1
self.marker2 = marker2
self.marker1.on_drag.append(self.update_Line)
self.marker2.on_drag.append(self.update_Line)
self.drawstyle.lineWidth = 1.
self.target_length = 1.
self.line_type = 'default'
self.layer = ''
self.name = 'line_name'
def is_uppermost_line(self):
return (isinstance(self.marker1, Upper_Att_Marker) or
isinstance(self.marker2, Upper_Att_Marker))
def update_Line(self):
self.points = [self.marker1.pos, self.marker2.pos]
# def drag(self, mouse_coords, fact=1.):
# self.marker1.drag(mouse_coords, fact)
# self.marker2.drag(mouse_coords, fact)
@property
def drag_objects(self):
return [self.marker1, self.marker2]
@property
def points(self):
return self.data.point.getValues()
@points.setter
def points(self, points):
p = [[pi[0], pi[1], pi[2] -0.001] for pi in points]
self.data.point.setValue(0, 0, 0)
self.data.point.setValues(0, len(p), p)
def check_dependency(self):
if self.marker1._delete or self.marker2._delete:
self.delete()
class QLineType_item(QtGui.QListWidgetItem):
def __init__(self, line_type):
self.line_type = line_type
super(QLineType_item, self).__init__()
self.setText(self.line_type.name)
class LayerComboBox(QtGui.QComboBox):
def __init__(self, parent=None):
super(LayerComboBox, self).__init__(parent)
self.setInsertPolicy(QtGui.QComboBox.InsertAlphabetically)
self.addItem('')
def addItem(self, text):
if self.findText(text) == -1:
super(LayerComboBox, self).addItem(text)
def removeItem(self, index):
super(LayerComboBox, self).removeItem(index)
if self.count() == 0:
self.addItem('')
def removeAll(self):
while self.currentIndex() != -1:
super(LayerComboBox, self).removeItem(self.currentIndex())
def getAllItems(self, other):
self.removeAll()
for i in range(other.count()):
self.addItem(other.itemText(i))
def currentText(self):
return self.itemText(self.currentIndex())
def setItemByText(self, text):
item = self.findText(text)
if item != -1:
self.setCurrentIndex(item)
class LineSelectionSeperator(InteractionSeparator):
def register(self, view, observer_tool):
self.view = view
self.observer_tool = observer_tool
self.mouse_over = self.view.addEventCallbackPivy(
coin.SoLocation2Event.getClassTypeId(), self.mouse_over_cb)
self.select = self.view.addEventCallbackPivy(
coin.SoMouseButtonEvent.getClassTypeId(), self.select_cb)
self.select_all = self.view.addEventCallbackPivy(
coin.SoKeyboardEvent.getClassTypeId(), self.select_all_cb)
def unregister(self):
self.view.removeEventCallbackPivy(
coin.SoLocation2Event.getClassTypeId(), self.mouse_over)
self.view.removeEventCallbackPivy(
coin.SoMouseButtonEvent.getClassTypeId(), self.select)
self.view.removeEventCallbackPivy(
coin.SoKeyboardEvent.getClassTypeId(), self.select_all)
def selection_changed(self):
self.observer_tool.selection_changed(*self.get_data())
def get_data(self):
line_force = np.zeros(3)
line_length = np.zeros(3)
node_force = np.zeros(3)
for obj in self.selected_objects:
if isinstance(obj, GliderLine):
line_force += obj.line.force * obj.line.diff_vector
line_length[0] += obj.line.length_no_sag
try:
line_length[1] += obj.line.length_with_sag
line_length[2] += obj.line.get_stretched_length()
except ValueError:
pass
return line_force, line_length
class GliderLine(Line):
def __init__(self, line):
points = [line.lower_node.vec, line.upper_node.vec]
points = line.get_line_points(2)
super(Line, self).__init__(points, dynamic=True)
self.line = line
class LineObserveTool(BaseTool):
widget_name = 'line observe tool'
turn = False
def __init__(self, obj):
super(LineObserveTool, self).__init__(obj)
self.g3d = self.obj.Proxy.getGliderInstance()
self.setup_qt()
self.g3d.lineset.recalc(False)
self.draw_glider()
def setup_qt(self):
self.force = QtGui.QLabel("x: {:5.1f} N\n"\
"y: {:5.1f} N\n"\
"z: {:5.1f} N".format(0, 0, 0))
self.length = QtGui.QLabel("length without sag: {:5.3f} m\n"\
"length with sag: {:5.3f} m\n"\
"stretched lengths: {:5.3f} m".format(0, 0, 0))
self.force_factor = QtGui.QSlider(QtCore.Qt.Orientation.Horizontal)
self.force_factor.setTickInterval(100)
self.force_factor.setMinimum(1)
self.force_factor.setValue(10)
self.layout.setWidget(0, text_field, QtGui.QLabel("force"))
self.layout.setWidget(0, input_field, self.force)
self.layout.setWidget(1, text_field, QtGui.QLabel("length"))
self.layout.setWidget(1, input_field, self.length)
self.layout.setWidget(2, text_field, QtGui.QLabel("force-factor"))
self.layout.setWidget(2, input_field, self.force_factor)
self.recalc_button = QtGui.QPushButton("recompute")
self.sag_check = QtGui.QCheckBox("sag")
self.sag_check.setTristate(False)
self.sag_check.setCheckState(QtCore.Qt.CheckState(False))
self.layout.setWidget(3, input_field, self.recalc_button)
self.layout.setWidget(3, text_field, self.sag_check)
self.force_factor.sliderReleased.connect(self.draw_residual_forces)
self.recalc_button.clicked.connect(self.recompute_lines)
def recompute_lines(self):
calculate_sag = bool(self.sag_check.checkState())
self.g3d.lineset.recalc(calculate_sag=calculate_sag)
self.line_sep.unregister()
self.task_separator.removeAllChildren()
self.draw_glider()
def draw_glider(self):
_rot = coin.SbRotation()
_rot.setValue(coin.SbVec3f(0, 1, 0), coin.SbVec3f(1, 0, 0))
rot = coin.SoRotation()
rot.rotation.setValue(_rot)
self.task_separator += rot
draw_glider(self.g3d, self.task_separator, profile_num=50, hull=None, ribs=True, fill_ribs=False)
# self.g3d.lineset.recalc(calculate_sag=True)
self.line_sep = LineSelectionSeperator()
self.arrows = coin.SoSeparator()
self.task_separator += self.line_sep, self.arrows
for line in self.g3d.lineset.lines:
self.line_sep += GliderLine(line)
self.line_sep.register(self.view, self)
self.draw_residual_forces()
def draw_residual_forces(self, factor=None):
self.arrows.removeAllChildren()
factor = (factor or self.force_factor.value()) * 1e-3
for node in self.g3d.lineset.nodes:
if True: #node.type == 1:
point = node.vec
force = self.g3d.lineset.get_residual_force(node) * factor
force *= 1 - 2 * (node.type == 1)
if np.linalg.norm(force) > 1e-4:
arrow = Arrow([point, point - force], arrow_size=0.05 * np.linalg.norm(force))
arrow.set_color("red")
self.arrows += arrow
def selection_changed(self, force, length):
self.force.setText("x: {:5.1f} N\n"\
"y: {:5.1f} N\n"\
"z: {:5.1f} N".format(*force))
self.length.setText("length without sag: {:5.3f} m\n"\
"length with sag: {:5.3f} m\n"\
"stretched lengths {:5.3f} m".format(*length))
def accept(self):
self.line_sep.unregister()
super(LineObserveTool, self).accept()
def reject(self):
self.line_sep.unregister()
super(LineObserveTool, self).reject()
|
booya-at/freecad_glider
|
freecad/freecad_glider/tools/line_tool.py
|
Python
|
lgpl-2.1
| 36,337 | 0.001431 |
from proboscis import test
@test(groups=['benchmark.discovery'])
class BenchmarkDiscoveryTests(object):
def __init__(self):
pass
|
jlongever/RackHD
|
test/benchmark/api_v2_0/discovery_tests.py
|
Python
|
apache-2.0
| 142 | 0.007042 |
'''
Created on May 26, 2012
@author: Charlie
'''
class MyPackageMod01(object):
def __init__(self):
pass
|
donlee888/JsObjects
|
Python/ComplexPaths02/src/main/mypackage/MyPackageMod01.py
|
Python
|
mit
| 122 | 0.02459 |
"""Controllers for the mozzarella application."""
|
ColoradoSchoolOfMines/acm-website
|
mozzarella/controllers/__init__.py
|
Python
|
gpl-3.0
| 50 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-01-12 17:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('themes', '0002_auto_20170110_1809'),
]
operations = [
migrations.AlterField(
model_name='themes',
name='css_style',
field=models.CharField(choices=[('green', 'Green'), ('red', 'Red'), ('black', 'Black')], default='green', max_length=50, verbose_name='Css Style'),
),
]
|
amadeusproject/amadeuslms
|
themes/migrations/0003_auto_20170112_1408.py
|
Python
|
gpl-2.0
| 560 | 0.001786 |
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from ..forms import AnonymousUserShippingForm, ShippingAddressesForm
from ...userprofile.forms import get_address_form
from ...userprofile.models import Address
from ...teamstore.utils import get_team
def anonymous_user_shipping_address_view(request, checkout):
team = get_team(request.session['team'])
if team.group_shipping:
address_form, preview = get_address_form(
request.POST or None, country_code=request.country.code,
autocomplete_type='shipping',
initial={'country': request.country.code},
instance=team.shipping_address)
else:
address_form, preview = get_address_form(
request.POST or None, country_code=request.country.code,
autocomplete_type='shipping',
initial={'country': request.country.code},
instance=checkout.shipping_address)
user_form = AnonymousUserShippingForm(
not preview and request.POST or None, initial={'email': checkout.email}
if not preview else request.POST.dict())
if team.group_shipping and user_form.is_valid():
checkout.shipping_address = team.shipping_address
checkout.email = user_form.cleaned_data['email']
return redirect('checkout:shipping-method')
elif all([user_form.is_valid(), address_form.is_valid()]):
checkout.shipping_address = address_form.instance
checkout.email = user_form.cleaned_data['email']
return redirect('checkout:shipping-method')
return TemplateResponse(
request, 'checkout/shipping_address.html', context={
'address_form': address_form, 'user_form': user_form,
'group_shipping': team.group_shipping, 'checkout': checkout})
def user_shipping_address_view(request, checkout):
data = request.POST or None
additional_addresses = request.user.addresses.all()
checkout.email = request.user.email
shipping_address = checkout.shipping_address
if shipping_address is not None and shipping_address.id:
address_form, preview = get_address_form(
data, country_code=request.country.code,
initial={'country': request.country})
addresses_form = ShippingAddressesForm(
data, additional_addresses=additional_addresses,
initial={'address': shipping_address.id})
elif shipping_address:
address_form, preview = get_address_form(
data, country_code=shipping_address.country.code,
instance=shipping_address)
addresses_form = ShippingAddressesForm(
data, additional_addresses=additional_addresses)
else:
address_form, preview = get_address_form(
data, initial={'country': request.country},
country_code=request.country.code)
addresses_form = ShippingAddressesForm(
data, additional_addresses=additional_addresses)
if addresses_form.is_valid() and not preview:
if addresses_form.cleaned_data['address'] != ShippingAddressesForm.NEW_ADDRESS:
address_id = addresses_form.cleaned_data['address']
checkout.shipping_address = Address.objects.get(id=address_id)
return redirect('checkout:shipping-method')
elif address_form.is_valid():
checkout.shipping_address = address_form.instance
return redirect('checkout:shipping-method')
return TemplateResponse(
request, 'checkout/shipping_address.html', context={
'address_form': address_form, 'user_form': addresses_form,
'checkout': checkout, 'additional_addresses': additional_addresses})
|
jonathanmeier5/teamstore
|
saleor/checkout/views/shipping.py
|
Python
|
bsd-3-clause
| 3,737 | 0.00107 |
"""Integration with the Rachio Iro sprinkler system controller."""
from abc import abstractmethod
from contextlib import suppress
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID, ATTR_ID
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import as_timestamp, now, parse_datetime, utc_from_timestamp
from .const import (
CONF_MANUAL_RUN_MINS,
DEFAULT_MANUAL_RUN_MINS,
DOMAIN as DOMAIN_RACHIO,
KEY_CUSTOM_CROP,
KEY_CUSTOM_SHADE,
KEY_CUSTOM_SLOPE,
KEY_DEVICE_ID,
KEY_DURATION,
KEY_ENABLED,
KEY_ID,
KEY_IMAGE_URL,
KEY_NAME,
KEY_ON,
KEY_RAIN_DELAY,
KEY_RAIN_DELAY_END,
KEY_SCHEDULE_ID,
KEY_SUBTYPE,
KEY_SUMMARY,
KEY_TYPE,
KEY_ZONE_ID,
KEY_ZONE_NUMBER,
SCHEDULE_TYPE_FIXED,
SCHEDULE_TYPE_FLEX,
SERVICE_SET_ZONE_MOISTURE,
SERVICE_START_MULTIPLE_ZONES,
SIGNAL_RACHIO_CONTROLLER_UPDATE,
SIGNAL_RACHIO_RAIN_DELAY_UPDATE,
SIGNAL_RACHIO_SCHEDULE_UPDATE,
SIGNAL_RACHIO_ZONE_UPDATE,
SLOPE_FLAT,
SLOPE_MODERATE,
SLOPE_SLIGHT,
SLOPE_STEEP,
)
from .entity import RachioDevice
from .webhooks import (
SUBTYPE_RAIN_DELAY_OFF,
SUBTYPE_RAIN_DELAY_ON,
SUBTYPE_SCHEDULE_COMPLETED,
SUBTYPE_SCHEDULE_STARTED,
SUBTYPE_SCHEDULE_STOPPED,
SUBTYPE_SLEEP_MODE_OFF,
SUBTYPE_SLEEP_MODE_ON,
SUBTYPE_ZONE_COMPLETED,
SUBTYPE_ZONE_PAUSED,
SUBTYPE_ZONE_STARTED,
SUBTYPE_ZONE_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
ATTR_DURATION = "duration"
ATTR_PERCENT = "percent"
ATTR_SCHEDULE_SUMMARY = "Summary"
ATTR_SCHEDULE_ENABLED = "Enabled"
ATTR_SCHEDULE_DURATION = "Duration"
ATTR_SCHEDULE_TYPE = "Type"
ATTR_SORT_ORDER = "sortOrder"
ATTR_ZONE_NUMBER = "Zone number"
ATTR_ZONE_SHADE = "Shade"
ATTR_ZONE_SLOPE = "Slope"
ATTR_ZONE_SUMMARY = "Summary"
ATTR_ZONE_TYPE = "Type"
START_MULTIPLE_ZONES_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_DURATION): cv.ensure_list_csv,
}
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Rachio switches."""
zone_entities = []
has_flex_sched = False
entities = await hass.async_add_executor_job(_create_entities, hass, config_entry)
for entity in entities:
if isinstance(entity, RachioZone):
zone_entities.append(entity)
if isinstance(entity, RachioSchedule) and entity.type == SCHEDULE_TYPE_FLEX:
has_flex_sched = True
async_add_entities(entities)
_LOGGER.info("%d Rachio switch(es) added", len(entities))
def start_multiple(service: ServiceCall) -> None:
"""Service to start multiple zones in sequence."""
zones_list = []
person = hass.data[DOMAIN_RACHIO][config_entry.entry_id]
entity_id = service.data[ATTR_ENTITY_ID]
duration = iter(service.data[ATTR_DURATION])
default_time = service.data[ATTR_DURATION][0]
entity_to_zone_id = {
entity.entity_id: entity.zone_id for entity in zone_entities
}
for (count, data) in enumerate(entity_id):
if data in entity_to_zone_id:
# Time can be passed as a list per zone,
# or one time for all zones
time = int(next(duration, default_time)) * 60
zones_list.append(
{
ATTR_ID: entity_to_zone_id.get(data),
ATTR_DURATION: time,
ATTR_SORT_ORDER: count,
}
)
if len(zones_list) != 0:
person.start_multiple_zones(zones_list)
_LOGGER.debug("Starting zone(s) %s", entity_id)
else:
raise HomeAssistantError("No matching zones found in given entity_ids")
hass.services.async_register(
DOMAIN_RACHIO,
SERVICE_START_MULTIPLE_ZONES,
start_multiple,
schema=START_MULTIPLE_ZONES_SCHEMA,
)
if has_flex_sched:
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_ZONE_MOISTURE,
{vol.Required(ATTR_PERCENT): cv.positive_int},
"set_moisture_percent",
)
def _create_entities(hass, config_entry):
entities = []
person = hass.data[DOMAIN_RACHIO][config_entry.entry_id]
# Fetch the schedule once at startup
# in order to avoid every zone doing it
for controller in person.controllers:
entities.append(RachioStandbySwitch(controller))
entities.append(RachioRainDelay(controller))
zones = controller.list_zones()
schedules = controller.list_schedules()
flex_schedules = controller.list_flex_schedules()
current_schedule = controller.current_schedule
for zone in zones:
entities.append(RachioZone(person, controller, zone, current_schedule))
for sched in schedules + flex_schedules:
entities.append(RachioSchedule(person, controller, sched, current_schedule))
_LOGGER.debug("Added %s", entities)
return entities
class RachioSwitch(RachioDevice, SwitchEntity):
"""Represent a Rachio state that can be toggled."""
def __init__(self, controller):
"""Initialize a new Rachio switch."""
super().__init__(controller)
self._state = None
@property
def name(self) -> str:
"""Get a name for this switch."""
return f"Switch on {self._controller.name}"
@property
def is_on(self) -> bool:
"""Return whether the switch is currently on."""
return self._state
@callback
def _async_handle_any_update(self, *args, **kwargs) -> None:
"""Determine whether an update event applies to this device."""
if args[0][KEY_DEVICE_ID] != self._controller.controller_id:
# For another device
return
# For this device
self._async_handle_update(args, kwargs)
@abstractmethod
def _async_handle_update(self, *args, **kwargs) -> None:
"""Handle incoming webhook data."""
class RachioStandbySwitch(RachioSwitch):
"""Representation of a standby status/button."""
@property
def name(self) -> str:
"""Return the name of the standby switch."""
return f"{self._controller.name} in standby mode"
@property
def unique_id(self) -> str:
"""Return a unique id by combining controller id and purpose."""
return f"{self._controller.controller_id}-standby"
@property
def icon(self) -> str:
"""Return an icon for the standby switch."""
return "mdi:power"
@callback
def _async_handle_update(self, *args, **kwargs) -> None:
"""Update the state using webhook data."""
if args[0][0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_ON:
self._state = True
elif args[0][0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_OFF:
self._state = False
self.async_write_ha_state()
def turn_on(self, **kwargs) -> None:
"""Put the controller in standby mode."""
self._controller.rachio.device.turn_off(self._controller.controller_id)
def turn_off(self, **kwargs) -> None:
"""Resume controller functionality."""
self._controller.rachio.device.turn_on(self._controller.controller_id)
async def async_added_to_hass(self):
"""Subscribe to updates."""
if KEY_ON in self._controller.init_data:
self._state = not self._controller.init_data[KEY_ON]
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_RACHIO_CONTROLLER_UPDATE,
self._async_handle_any_update,
)
)
class RachioRainDelay(RachioSwitch):
"""Representation of a rain delay status/switch."""
def __init__(self, controller):
"""Set up a Rachio rain delay switch."""
self._cancel_update = None
super().__init__(controller)
@property
def name(self) -> str:
"""Return the name of the switch."""
return f"{self._controller.name} rain delay"
@property
def unique_id(self) -> str:
"""Return a unique id by combining controller id and purpose."""
return f"{self._controller.controller_id}-delay"
@property
def icon(self) -> str:
"""Return an icon for rain delay."""
return "mdi:camera-timer"
@callback
def _async_handle_update(self, *args, **kwargs) -> None:
"""Update the state using webhook data."""
if self._cancel_update:
self._cancel_update()
self._cancel_update = None
if args[0][0][KEY_SUBTYPE] == SUBTYPE_RAIN_DELAY_ON:
endtime = parse_datetime(args[0][0][KEY_RAIN_DELAY_END])
_LOGGER.debug("Rain delay expires at %s", endtime)
self._state = True
assert endtime is not None
self._cancel_update = async_track_point_in_utc_time(
self.hass, self._delay_expiration, endtime
)
elif args[0][0][KEY_SUBTYPE] == SUBTYPE_RAIN_DELAY_OFF:
self._state = False
self.async_write_ha_state()
@callback
def _delay_expiration(self, *args) -> None:
"""Trigger when a rain delay expires."""
self._state = False
self._cancel_update = None
self.async_write_ha_state()
def turn_on(self, **kwargs) -> None:
"""Activate a 24 hour rain delay on the controller."""
self._controller.rachio.device.rain_delay(self._controller.controller_id, 86400)
_LOGGER.debug("Starting rain delay for 24 hours")
def turn_off(self, **kwargs) -> None:
"""Resume controller functionality."""
self._controller.rachio.device.rain_delay(self._controller.controller_id, 0)
_LOGGER.debug("Canceling rain delay")
async def async_added_to_hass(self):
"""Subscribe to updates."""
if KEY_RAIN_DELAY in self._controller.init_data:
self._state = self._controller.init_data[
KEY_RAIN_DELAY
] / 1000 > as_timestamp(now())
# If the controller was in a rain delay state during a reboot, this re-sets the timer
if self._state is True:
delay_end = utc_from_timestamp(
self._controller.init_data[KEY_RAIN_DELAY] / 1000
)
_LOGGER.debug("Re-setting rain delay timer for %s", delay_end)
self._cancel_update = async_track_point_in_utc_time(
self.hass, self._delay_expiration, delay_end
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_RACHIO_RAIN_DELAY_UPDATE,
self._async_handle_any_update,
)
)
class RachioZone(RachioSwitch):
"""Representation of one zone of sprinklers connected to the Rachio Iro."""
def __init__(self, person, controller, data, current_schedule):
"""Initialize a new Rachio Zone."""
self.id = data[KEY_ID]
self._zone_name = data[KEY_NAME]
self._zone_number = data[KEY_ZONE_NUMBER]
self._zone_enabled = data[KEY_ENABLED]
self._entity_picture = data.get(KEY_IMAGE_URL)
self._person = person
self._shade_type = data.get(KEY_CUSTOM_SHADE, {}).get(KEY_NAME)
self._zone_type = data.get(KEY_CUSTOM_CROP, {}).get(KEY_NAME)
self._slope_type = data.get(KEY_CUSTOM_SLOPE, {}).get(KEY_NAME)
self._summary = ""
self._current_schedule = current_schedule
super().__init__(controller)
def __str__(self):
"""Display the zone as a string."""
return f'Rachio Zone "{self.name}" on {str(self._controller)}'
@property
def zone_id(self) -> str:
"""How the Rachio API refers to the zone."""
return self.id
@property
def name(self) -> str:
"""Return the friendly name of the zone."""
return self._zone_name
@property
def unique_id(self) -> str:
"""Return a unique id by combining controller id and zone number."""
return f"{self._controller.controller_id}-zone-{self.zone_id}"
@property
def icon(self) -> str:
"""Return the icon to display."""
return "mdi:water"
@property
def zone_is_enabled(self) -> bool:
"""Return whether the zone is allowed to run."""
return self._zone_enabled
@property
def entity_picture(self):
"""Return the entity picture to use in the frontend, if any."""
return self._entity_picture
@property
def extra_state_attributes(self) -> dict:
"""Return the optional state attributes."""
props = {ATTR_ZONE_NUMBER: self._zone_number, ATTR_ZONE_SUMMARY: self._summary}
if self._shade_type:
props[ATTR_ZONE_SHADE] = self._shade_type
if self._zone_type:
props[ATTR_ZONE_TYPE] = self._zone_type
if self._slope_type:
if self._slope_type == SLOPE_FLAT:
props[ATTR_ZONE_SLOPE] = "Flat"
elif self._slope_type == SLOPE_SLIGHT:
props[ATTR_ZONE_SLOPE] = "Slight"
elif self._slope_type == SLOPE_MODERATE:
props[ATTR_ZONE_SLOPE] = "Moderate"
elif self._slope_type == SLOPE_STEEP:
props[ATTR_ZONE_SLOPE] = "Steep"
return props
def turn_on(self, **kwargs) -> None:
"""Start watering this zone."""
# Stop other zones first
self.turn_off()
# Start this zone
manual_run_time = timedelta(
minutes=self._person.config_entry.options.get(
CONF_MANUAL_RUN_MINS, DEFAULT_MANUAL_RUN_MINS
)
)
# The API limit is 3 hours, and requires an int be passed
self._controller.rachio.zone.start(self.zone_id, manual_run_time.seconds)
_LOGGER.debug(
"Watering %s on %s for %s",
self.name,
self._controller.name,
str(manual_run_time),
)
def turn_off(self, **kwargs) -> None:
"""Stop watering all zones."""
self._controller.stop_watering()
def set_moisture_percent(self, percent) -> None:
"""Set the zone moisture percent."""
_LOGGER.debug("Setting %s moisture to %s percent", self._zone_name, percent)
self._controller.rachio.zone.set_moisture_percent(self.id, percent / 100)
@callback
def _async_handle_update(self, *args, **kwargs) -> None:
"""Handle incoming webhook zone data."""
if args[0][KEY_ZONE_ID] != self.zone_id:
return
self._summary = args[0][KEY_SUMMARY]
if args[0][KEY_SUBTYPE] == SUBTYPE_ZONE_STARTED:
self._state = True
elif args[0][KEY_SUBTYPE] in [
SUBTYPE_ZONE_STOPPED,
SUBTYPE_ZONE_COMPLETED,
SUBTYPE_ZONE_PAUSED,
]:
self._state = False
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Subscribe to updates."""
self._state = self.zone_id == self._current_schedule.get(KEY_ZONE_ID)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_RACHIO_ZONE_UPDATE, self._async_handle_update
)
)
class RachioSchedule(RachioSwitch):
"""Representation of one fixed schedule on the Rachio Iro."""
def __init__(self, person, controller, data, current_schedule):
"""Initialize a new Rachio Schedule."""
self._schedule_id = data[KEY_ID]
self._schedule_name = data[KEY_NAME]
self._duration = data[KEY_DURATION]
self._schedule_enabled = data[KEY_ENABLED]
self._summary = data[KEY_SUMMARY]
self.type = data.get(KEY_TYPE, SCHEDULE_TYPE_FIXED)
self._current_schedule = current_schedule
super().__init__(controller)
@property
def name(self) -> str:
"""Return the friendly name of the schedule."""
return f"{self._schedule_name} Schedule"
@property
def unique_id(self) -> str:
"""Return a unique id by combining controller id and schedule."""
return f"{self._controller.controller_id}-schedule-{self._schedule_id}"
@property
def icon(self) -> str:
"""Return the icon to display."""
return "mdi:water" if self.schedule_is_enabled else "mdi:water-off"
@property
def extra_state_attributes(self) -> dict:
"""Return the optional state attributes."""
return {
ATTR_SCHEDULE_SUMMARY: self._summary,
ATTR_SCHEDULE_ENABLED: self.schedule_is_enabled,
ATTR_SCHEDULE_DURATION: f"{round(self._duration / 60)} minutes",
ATTR_SCHEDULE_TYPE: self.type,
}
@property
def schedule_is_enabled(self) -> bool:
"""Return whether the schedule is allowed to run."""
return self._schedule_enabled
def turn_on(self, **kwargs) -> None:
"""Start this schedule."""
self._controller.rachio.schedulerule.start(self._schedule_id)
_LOGGER.debug(
"Schedule %s started on %s",
self.name,
self._controller.name,
)
def turn_off(self, **kwargs) -> None:
"""Stop watering all zones."""
self._controller.stop_watering()
@callback
def _async_handle_update(self, *args, **kwargs) -> None:
"""Handle incoming webhook schedule data."""
# Schedule ID not passed when running individual zones, so we catch that error
with suppress(KeyError):
if args[0][KEY_SCHEDULE_ID] == self._schedule_id:
if args[0][KEY_SUBTYPE] in [SUBTYPE_SCHEDULE_STARTED]:
self._state = True
elif args[0][KEY_SUBTYPE] in [
SUBTYPE_SCHEDULE_STOPPED,
SUBTYPE_SCHEDULE_COMPLETED,
]:
self._state = False
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Subscribe to updates."""
self._state = self._schedule_id == self._current_schedule.get(KEY_SCHEDULE_ID)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_RACHIO_SCHEDULE_UPDATE, self._async_handle_update
)
)
|
rohitranjan1991/home-assistant
|
homeassistant/components/rachio/switch.py
|
Python
|
mit
| 19,041 | 0.00084 |
#!/usr/bin/python
import time
import RPi.GPIO as GPIO
# remember to change the GPIO values below to match your sensors
# GPIO output = the pin that's connected to "Trig" on the sensor
# GPIO input = the pin that's connected to "Echo" on the sensor
def reading(sensor):
# Disable any warning message such as GPIO pins in use
GPIO.setwarnings(False)
# use the values of the GPIO pins, and not the actual pin number
# so if you connect to GPIO 25 which is on pin number 22, the
# reference in this code is 25, which is the number of the GPIO
# port and not the number of the physical pin
GPIO.setmode(GPIO.BCM)
if sensor == 0:
# point the software to the GPIO pins the sensor is using
# change these values to the pins you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
# GPIO input = the pin that's connected to "Echo" on the sensor
GPIO.setup(22,GPIO.OUT)
GPIO.setup(27,GPIO.IN)
GPIO.output(22, GPIO.LOW)
# found that the sensor can crash if there isn't a delay here
# no idea why. If you have odd crashing issues, increase delay
time.sleep(0.3)
# sensor manual says a pulse ength of 10Us will trigger the
# sensor to transmit 8 cycles of ultrasonic burst at 40kHz and
# wait for the reflected ultrasonic burst to be received
# to get a pulse length of 10Us we need to start the pulse, then
# wait for 10 microseconds, then stop the pulse. This will
# result in the pulse length being 10Us.
# start the pulse on the GPIO pin
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(22, True)
# wait 10 micro seconds (this is 0.00001 seconds) so the pulse
# length is 10Us as the sensor expects
time.sleep(0.00001)
# stop the pulse after the time above has passed
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(22, False)
# listen to the input pin. 0 means nothing is happening. Once a
# signal is received the value will be 1 so the while loop
# stops and has the last recorded time the signal was 0
# change this value to the pin you are using
# GPIO input = the pin that's connected to "Echo" on the sensor
while GPIO.input(27) == 0:
signaloff = time.time()
# listen to the input pin. Once a signal is received, record the
# time the signal came through
# change this value to the pin you are using
# GPIO input = the pin that's connected to "Echo" on the sensor
while GPIO.input(27) == 1:
signalon = time.time()
# work out the difference in the two recorded times above to
# calculate the distance of an object in front of the sensor
timepassed = signalon - signaloff
# we now have our distance but it's not in a useful unit of
# measurement. So now we convert this distance into centimetres
distance = timepassed * 17000
# return the distance of an object in front of the sensor in cm
return distance
# we're no longer using the GPIO, so tell software we're done
GPIO.cleanup()
else:
print "Incorrect usonic() function varible."
|
LinuCC/sturo
|
src/lib/usonic.py
|
Python
|
gpl-2.0
| 3,134 | 0.021378 |
import configparser
CONFIG_PATH = 'accounting.conf'
class MyConfigParser():
def __init__(self, config_path=CONFIG_PATH):
self.config = configparser.ConfigParser(allow_no_value=True)
self.config.read(config_path)
def config_section_map(self, section):
""" returns all configuration options in 'section' in a dict with
key: config_option and value: the read value in the file"""
dict1 = {}
options = self.config.options(section)
for option in options:
try:
dict1[option] = self.config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
dict1[option] = None
return dict1
# getint(section, option)
# getboolean(section, option)
|
Stiliyan92/accounting-system
|
common/config_parser.py
|
Python
|
gpl-2.0
| 828 | 0.001208 |
import os
from bs4 import BeautifulSoup
count = pd.DataFrame(columns = ['filename', 'count'])
for folder, subs, files in os.walk('data/xml'):
for filename in files:
try:
if ('.xml' in filename) and (filename[0] != '.'):
f = open(os.path.join(folder, filename))
soup = BeautifulSoup(f.read())
tokens = soup.findAll('token')
tokens_arr = [token.text for token in tokens]
text = ' '.join(tokens_arr)
f = open('data/text/'+filename, 'w')
f.write(text)
f.close()
except Exception as e:
print e
continue
|
fahadsultan/CausalRelations
|
to_txt.py
|
Python
|
apache-2.0
| 551 | 0.039927 |
#!/usr/bin/env python
"""
Jedi EPC server.
Copyright (C) 2012 Takafumi Arakaki
Author: Takafumi Arakaki <aka.tkf at gmail.com>
This file is NOT part of GNU Emacs.
Jedi EPC server is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
Jedi EPC server is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Jedi EPC server.
If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import glob
import itertools
import logging
import logging.handlers
import os
import re
import site
import sys
from collections import namedtuple
import jedi
import jedi.api
import epc
import epc.server
import sexpdata
logger = logging.getLogger('jediepcserver')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description=__doc__)
parser.add_argument(
'--address', default='localhost')
parser.add_argument(
'--port', default=0, type=int)
parser.add_argument(
'--port-file', '-f', default='-', type=argparse.FileType('wt'),
help='file to write port on. default is stdout.')
parser.add_argument(
'--sys-path', '-p', default=[], action='append',
help='paths to be inserted at the top of `sys.path`.')
parser.add_argument(
'--sys-path-append', default=[], action='append',
help='paths to be appended at the end of `sys.path`.')
parser.add_argument(
'--virtual-env', '-v', default=[], action='append',
help='paths to be used as if VIRTUAL_ENV is set to it.')
parser.add_argument(
'--log', help='Save server log to this file.')
parser.add_argument(
'--log-level',
choices=['CRITICAL', 'ERROR', 'WARN', 'INFO', 'DEBUG'],
help='Logging level for log file.')
parser.add_argument(
'--log-rotate-max-size', default=0, type=int,
help='Rotate log file after it reaches this size',
)
parser.add_argument(
'--log-rotate-max-count', default=3, type=int,
help='Max number of log rotations before removal',
)
parser.add_argument(
'--log-traceback', action='store_true', default=False,
help='Include traceback in logging output.')
parser.add_argument(
'--pdb', dest='debugger', const='pdb', action='store_const',
help='start pdb when error occurs.')
parser.add_argument(
'--ipdb', dest='debugger', const='ipdb', action='store_const',
help='start ipdb when error occurs.')
PY3 = (sys.version_info[0] >= 3)
NEED_ENCODE = not PY3
LogSettings = namedtuple(
'LogSettings',
[
'log_file',
'log_level',
'log_rotate_max_size',
'log_rotate_max_count',
],
)
try:
jedi.create_environment
except AttributeError:
jedi_create_environment = None
else:
_cached_jedi_environments = {}
def jedi_create_environment(venv, safe=False):
"""Cache jedi environments to avoid startup cost."""
try:
return _cached_jedi_environments[venv]
except KeyError:
logger.info('Creating jedi environment: %s', venv)
if venv is None:
jedienv = jedi.api.environment.get_default_environment()
else:
jedienv = jedi.create_environment(venv, safe=safe)
_cached_jedi_environments[venv] = jedienv
return jedienv
def get_venv_sys_path(venv):
if jedi_create_environment is not None:
return jedi_create_environment(venv).get_sys_path()
from jedi.evaluate.sys_path import get_venv_path
return get_venv_path(venv)
class JediEPCHandler(object):
def __init__(self, sys_path=(), virtual_envs=(), sys_path_append=()):
self.script_kwargs = self._get_script_path_kwargs(
sys_path=sys_path,
virtual_envs=virtual_envs,
sys_path_append=sys_path_append,
)
def get_sys_path(self):
environment = self.script_kwargs.get('environment')
if environment is not None:
return environment.get_sys_path()
sys_path = self.script_kwargs.get('sys_path')
if sys_path is not None:
return sys_path
return sys.path
@classmethod
def _get_script_path_kwargs(cls, sys_path, virtual_envs, sys_path_append):
result = {}
if jedi_create_environment:
# Need to specify some environment explicitly to workaround
# https://github.com/davidhalter/jedi/issues/1242. Otherwise jedi
# will create a lot of child processes.
if virtual_envs:
primary_env, virtual_envs = virtual_envs[0], virtual_envs[1:]
primary_env = path_expand_vars_and_user(primary_env)
else:
primary_env = None
try:
result['environment'] = jedi_create_environment(primary_env)
except Exception:
logger.warning(
'Cannot create environment for %r', primary_env, exc_info=1
)
if primary_env is not None:
result['environment'] = jedi_create_environment(None)
if not sys_path and not virtual_envs and not sys_path_append:
# No additional path customizations.
return result
# Either multiple environments or custom sys_path extensions are
# specified, or jedi version doesn't support environments.
final_sys_path = []
final_sys_path.extend(path_expand_vars_and_user(p) for p in sys_path)
for p in virtual_envs:
final_sys_path.extend(get_venv_sys_path(path_expand_vars_and_user(p)))
final_sys_path.extend(
path_expand_vars_and_user(p) for p in sys_path_append
)
dupes = set()
def not_seen_yet(val):
if val in dupes:
return False
dupes.add(val)
return True
result['sys_path'] = [p for p in final_sys_path if not_seen_yet(p)]
return result
def jedi_script(self, source, line, column, source_path):
if NEED_ENCODE:
source = source.encode('utf-8')
source_path = source_path and source_path.encode('utf-8')
return jedi.Script(
source, line, column, source_path or '', **self.script_kwargs
)
def complete(self, *args):
def _wrap_completion_result(comp):
try:
docstr = comp.docstring()
except Exception:
logger.warning(
"Cannot get docstring for completion %s", comp, exc_info=1
)
docstr = ""
return dict(
word=comp.name,
doc=docstr,
description=candidates_description(comp),
symbol=candidate_symbol(comp),
)
return [
_wrap_completion_result(comp)
for comp in self.jedi_script(*args).completions()
]
def get_in_function_call(self, *args):
sig = self.jedi_script(*args).call_signatures()
call_def = sig[0] if sig else None
if not call_def:
return []
return dict(
# p.description should do the job. But jedi-vim use replace.
# So follow what jedi-vim does...
params=[PARAM_PREFIX_RE.sub('', p.description).replace('\n', '')
for p in call_def.params],
index=call_def.index,
call_name=call_def.name,
)
def _goto(self, method, *args):
"""
Helper function for `goto_assignments` and `usages`.
:arg method: `jedi.Script.goto_assignments` or `jedi.Script.usages`
:arg args: Arguments to `jedi_script`
"""
# `definitions` is a list. Each element is an instances of
# `jedi.api_classes.BaseOutput` subclass, i.e.,
# `jedi.api_classes.RelatedName` or `jedi.api_classes.Definition`.
definitions = method(self.jedi_script(*args))
return [dict(
column=d.column,
line_nr=d.line,
module_path=d.module_path if d.module_path != '__builtin__' else [],
module_name=d.module_name,
description=d.description,
) for d in definitions]
def goto(self, *args):
return self._goto(jedi.Script.goto_assignments, *args)
def related_names(self, *args):
return self._goto(jedi.Script.usages, *args)
def get_definition(self, *args):
definitions = self.jedi_script(*args).goto_definitions()
return [definition_to_dict(d) for d in definitions]
def defined_names(self, *args):
# XXX: there's a bug in Jedi that returns returns definitions from inside
# classes or functions even though all_scopes=False is set by
# default. Hence some additional filtering is in order.
#
# See https://github.com/davidhalter/jedi/issues/1202
top_level_names = [
defn
for defn in jedi.api.names(*args)
if defn.parent().type == 'module'
]
return list(map(get_names_recursively, top_level_names))
def get_jedi_version(self):
return [dict(
name=module.__name__,
file=getattr(module, '__file__', []),
version=get_module_version(module) or [],
) for module in [sys, jedi, epc, sexpdata]]
def candidate_symbol(comp):
"""
Return a character representing completion type.
:type comp: jedi.api.Completion
:arg comp: A completion object returned by `jedi.Script.completions`.
"""
try:
return comp.type[0].lower()
except (AttributeError, TypeError):
return '?'
def candidates_description(comp):
"""
Return `comp.description` in an appropriate format.
* Avoid return a string 'None'.
* Strip off all newlines. This is required for using
`comp.description` as candidate summary.
"""
desc = comp.description
return _WHITESPACES_RE.sub(' ', desc) if desc and desc != 'None' else ''
_WHITESPACES_RE = re.compile(r'\s+')
PARAM_PREFIX_RE = re.compile(r'^param\s+')
"""RE to strip unwanted "param " prefix returned by param.description."""
def definition_to_dict(d):
return dict(
doc=d.docstring(),
description=d.description,
desc_with_module=d.desc_with_module,
line_nr=d.line,
column=d.column,
module_path=d.module_path,
name=getattr(d, 'name', []),
full_name=getattr(d, 'full_name', []),
type=getattr(d, 'type', []),
)
def get_names_recursively(definition, parent=None):
"""
Fetch interesting defined names in sub-scopes under `definition`.
:type names: jedi.api_classes.Definition
"""
d = definition_to_dict(definition)
try:
d['local_name'] = parent['local_name'] + '.' + d['name']
except (AttributeError, TypeError):
d['local_name'] = d['name']
if definition.type == 'class':
ds = definition.defined_names()
return [d] + [get_names_recursively(c, d) for c in ds]
else:
return [d]
def get_module_version(module):
notfound = object()
for key in ['__version__', 'version']:
version = getattr(module, key, notfound)
if version is not notfound:
return version
try:
from pkg_resources import get_distribution, DistributionNotFound
try:
return get_distribution(module.__name__).version
except DistributionNotFound:
pass
except ImportError:
pass
def path_expand_vars_and_user(p):
return os.path.expandvars(os.path.expanduser(p))
def configure_logging(log_settings):
"""
:type log_settings: LogSettings
"""
if not log_settings.log_file:
return
fmter = logging.Formatter('%(asctime)s:' + logging.BASIC_FORMAT)
if log_settings.log_rotate_max_size > 0:
handler = logging.handlers.RotatingFileHandler(
filename=log_settings.log_file,
mode='w',
maxBytes=log_settings.log_rotate_max_size,
backupCount=log_settings.log_rotate_max_count,
)
else:
handler = logging.FileHandler(filename=log_settings.log_file, mode='w')
handler.setFormatter(fmter)
if log_settings.log_level:
logging.root.setLevel(log_settings.log_level.upper())
logging.root.addHandler(handler)
def jedi_epc_server(
address='localhost',
port=0,
port_file=sys.stdout,
sys_path=[],
virtual_env=[],
sys_path_append=[],
debugger=None,
log_traceback=None,
):
"""Start EPC server.
:type log_settings: LogSettings
"""
logger.debug(
'jedi_epc_server: sys_path=%r virtual_env=%r sys_path_append=%r',
sys_path, virtual_env, sys_path_append,
)
if not virtual_env and os.getenv('VIRTUAL_ENV'):
logger.debug(
'Taking virtual env from VIRTUAL_ENV: %r',
os.environ['VIRTUAL_ENV'],
)
virtual_env = [os.environ['VIRTUAL_ENV']]
handler = JediEPCHandler(
sys_path=sys_path,
virtual_envs=virtual_env,
sys_path_append=sys_path_append,
)
logger.debug(
'Starting Jedi EPC server with the following sys.path: %r',
handler.get_sys_path(),
)
server = epc.server.EPCServer((address, port))
server.register_function(handler.complete)
server.register_function(handler.get_in_function_call)
server.register_function(handler.goto)
server.register_function(handler.related_names)
server.register_function(handler.get_definition)
server.register_function(handler.defined_names)
server.register_function(handler.get_jedi_version)
@server.register_function
def toggle_log_traceback():
server.log_traceback = not server.log_traceback
return server.log_traceback
port_file.write(str(server.server_address[1])) # needed for Emacs client
port_file.write("\n")
port_file.flush()
if port_file is not sys.stdout:
port_file.close()
# This is not supported Python-EPC API, but I am using this for
# backward compatibility for Python-EPC < 0.0.4. In the future,
# it should be passed to the constructor.
server.log_traceback = bool(log_traceback)
if debugger:
server.set_debugger(debugger)
handler = logging.StreamHandler()
fmter = logging.Formatter('%(asctime)s:' + logging.BASIC_FORMAT)
handler.setFormatter(fmter)
handler.setLevel(logging.DEBUG)
server.logger.addHandler(handler)
server.logger.setLevel(logging.DEBUG)
return server
# def add_virtualenv_path(venv):
# """Add virtualenv's site-packages to `sys.path`."""
# venv = os.path.abspath(venv)
# paths = glob.glob(os.path.join(
# venv, 'lib', 'python*', 'site-packages'))
# if not paths:
# raise ValueError('Invalid venv: no site-packages found: %s' % venv)
# for path in paths:
# site.addsitedir(path)
def main(args=None):
ns = parser.parse_args(args)
ns_vars = vars(ns).copy()
log_settings = LogSettings(
log_file=ns_vars.pop('log'),
log_level=ns_vars.pop('log_level'),
log_rotate_max_size=ns_vars.pop('log_rotate_max_size'),
log_rotate_max_count=ns_vars.pop('log_rotate_max_count'),
)
configure_logging(log_settings)
server = jedi_epc_server(**ns_vars)
server.serve_forever()
server.logger.info('exit')
if __name__ == '__main__':
main()
|
kyon-bll/.dotfiles
|
.emacs.d/elpa/jedi-core-20191011.1750/jediepcserver.py
|
Python
|
mit
| 15,847 | 0.000252 |
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import urllib.parse
import requests
from testutils import prefix, api_v0
HOUR = 60 * 60
DAY = HOUR * 24
@prefix('test_v0_schedules')
def test_api_v0_schedules(team, roster, role):
tuesday9am = 2 * DAY + 9 * HOUR
tuesday9pm = tuesday9am + 12 * HOUR
wednesday9am = tuesday9pm + 12 * HOUR
wednesday9pm = wednesday9am + 12 * HOUR
team_name = team.create()
team_name_2 = team.create()
roster_name = roster.create(team_name)
roster_name_2 = roster.create(team_name_2)
role_name = role.create()
role_name_2 = role.create()
# test create schedule
events = [{'start': tuesday9am, 'duration': 12 * HOUR},
{'start': tuesday9pm, 'duration': 12 * HOUR},
{'start': wednesday9am, 'duration': 12 * HOUR},
{'start': wednesday9pm, 'duration': 12 * HOUR}]
re = requests.post(api_v0('teams/%s/rosters/%s/schedules' % (team_name, roster_name)),
json={
'role': role_name,
'events': events,
'advanced_mode': 1
})
assert re.status_code == 201
schedule_id = str(re.json()['id'])
# verify schedule created properly
re = requests.get(api_v0('teams/%s/rosters/%s/schedules' % (team_name, roster_name)))
assert re.status_code == 200
data = re.json()
assert len(data) == 1
schedule = data[0]
assert schedule['role'] == role_name
# check consecutive events have been merged
assert len(schedule['events']) == 1
assert schedule['events'][0]['start'] == tuesday9am
assert schedule['events'][0]['duration'] == 48 * HOUR
assert schedule['advanced_mode'] == 1
# test 'schedule' endpoint
re = requests.get(api_v0('schedules/%s' % (schedule_id)))
assert re.status_code == 200
assert re.json() == data[0]
updated_events = [{'start': 0, 'duration': 100}, {'start': 150, 'duration': 200}]
# verify schedule updates properly
re = requests.put(api_v0('schedules/' + schedule_id),
json={'role': role_name_2,
'team': team_name_2,
'roster': roster_name_2,
'auto_populate_threshold': 28,
'events': updated_events,
'advanced_mode': 1})
assert re.status_code == 200
re = requests.get(api_v0('teams/%s/rosters/%s/schedules' % (team_name_2, roster_name_2)))
assert re.status_code == 200
data = re.json()
assert len(data) == 1
schedule = data[0]
assert schedule['roster'] == roster_name_2
assert schedule['role'] == role_name_2
assert schedule['auto_populate_threshold'] == 28
assert schedule['events'] == updated_events
assert schedule['advanced_mode'] == 1
re = requests.put(api_v0('schedules/' + schedule_id), json={'team': team_name, 'roster': roster_name})
assert re.status_code == 200
# test delete schedule
re = requests.delete(api_v0('schedules/' + schedule_id))
assert re.status_code == 200
# verify schedule was deleted
re = requests.get(api_v0('teams/%s/rosters/%s/schedules' % (team_name_2, roster_name_2)))
assert re.status_code == 200
data = re.json()
assert data == []
@prefix('test_v0_advanced_schedule')
def test_api_v0_advanced_schedule(team, roster, role, schedule):
team_name = team.create()
roster_name = roster.create(team_name)
role_name = role.create()
schedule_id = schedule.create(team_name,
roster_name,
{'role': role_name,
'events': [{'start': 0, 'duration': 100},
{'start': 200, 'duration': 300}],
'advanced_mode': 1})
# check invalid schedule updates
re = requests.put(api_v0('schedules/%d' % schedule_id), json={'events': [{'start': 0, 'duration': 100},
{'start': 150, 'duration': 300}],
'advanced_mode': 0})
assert re.status_code == 400
re = requests.put(api_v0('schedules/%d' % schedule_id), json={'advanced_mode': 0})
assert re.status_code == 400
@prefix('test_v0_invalid_schedule_event')
def test_api_v0_invalid_schedule_event(team, roster, role, schedule):
team_name = team.create()
roster_name = roster.create(team_name)
role_name = role.create()
api_url = api_v0('teams/%s/rosters/%s/schedules' % (team_name, roster_name))
re = requests.post(api_url, json={
'role': role_name,
'events': [{'duration': 100},
{'start': 150, 'duration': 300}],
'advanced_mode': 1
})
assert re.status_code == 400
re = requests.post(api_url, json={
'role': role_name,
'events': [{'start': 150}],
'advanced_mode': 1
})
assert re.status_code == 400
re = requests.post(api_url, json={
'role': role_name,
'events': [{'start': 150, 'duration': 300}],
'advanced_mode': 0
})
assert re.status_code == 400
re = requests.post(api_url, json={
'role': role_name,
'events': 7 * [{'start': 150, 'duration': 300}],
'advanced_mode': 0
})
assert re.status_code == 400
@prefix('test_v0_schedules_spaces')
def test_api_v0_schedules_with_spaces_in_roster_name(team):
team_name = 'test_v0 spaces team foo'
roster_name = 'test_v0 spaces roster foo'
re = requests.post(api_v0('teams'), json={'name': team_name, 'scheduling_timezone': 'UTC'})
assert re.status_code == 201
team.mark_for_cleaning(team_name)
re = requests.post(api_v0('teams/%s/rosters' % team_name),
json={'name': roster_name})
assert re.status_code == 201
re = requests.get(api_v0('teams/%s/rosters/%s/schedules' %
(team_name, urllib.parse.quote(roster_name, safe=''))))
assert re.status_code == 200
|
diegocepedaw/oncall
|
e2e/test_schedules.py
|
Python
|
bsd-2-clause
| 6,315 | 0.002375 |
# Copyright (c) 2010 ActiveState Software Inc. All rights reserved.
"""
pypm.common.util
~~~~~~~~~~~~~~~~
Assorted utility code
"""
import os
from os import path as P
import sys
import re
from contextlib import contextmanager
import logging
import time
import textwrap
from datetime import datetime
from pkg_resources import Requirement
from pkg_resources import resource_filename
import six
import pypm
from zclockfile import LockFile
LOG = logging.getLogger(__name__)
# Language/library utilities
#####################################################################
def wrapped(txt, prefix='', **options):
"""Return wrapped text suitable for printing to terminal"""
MAX_WIDTH=70 # textwrap.wrap's default
return '\n'.join([
'{0}{1}'.format(prefix, line)
for line in textwrap.wrap(txt, width=MAX_WIDTH-len(prefix), **options)])
def lazyproperty(func):
"""A property decorator for lazy evaluation"""
cache = {}
def _get(self):
"""Return the property value from cache once it is calculated"""
try:
return cache[self]
except KeyError:
cache[self] = value = func(self)
return value
return property(_get)
def memoize(fn):
"""Memoize functions that take simple arguments
The arugments of this function must be 'hashable'
Keywords are not supported
"""
memo = {}
def wrapper(*args):
key = tuple(args)
if key not in memo:
memo[key] = fn(*args)
return memo[key]
return wrapper
class ConfigParserNamedLists(object):
"""Parse a named mapping from the configuration file.
Example input (config file):
[packages]
free = http://pypm-free.as.com
be = http://pypm-be.as.com
staging = http://pypm-staging.as.com
default = be free
QA = staging default
What this class produces (self.mapping):
{
'free': [factory('free', 'http://pypm-free.as.com')],
'be': [factory('be', 'http://pypm-be.as.com')],
'staging': [factory('staging', 'http://pypm-staging.as.com')],
'default': [factory('be', 'http://pypm-be.as.com'),
factory('free', 'http://pypm-free.as.com')],
'QA': [factory('staging', 'http://pypm-staging.as.com'),
factory('be', 'http://pypm-be.as.com'),
factory('free', 'http://pypm-free.as.com')],
}
"""
VALUE_SEP = re.compile('[\s,]+')
def __init__(self, option_items, factory, is_sentinel):
"""
- option_items: ConfigParser.items('yoursection')
- factory: a function that produces the value object
- sentinel_p: a function that returns True for sentinels
"""
self.option_items = option_items
self.factory = factory
self.is_sentinel = is_sentinel
self.mapping = {}
self._init()
def _init(self):
for name, value in self.option_items:
if name in self.mapping:
raise ValueError('duplicate option key found: {0}'.format(name))
else:
self.mapping[name] = value
# substitute references
_processed = set()
for name in self.mapping:
self._expand_rvalue(name, _processed)
def _expand_rvalue(self, name, processed):
if name in processed:
return
value = self.mapping[name]
if isinstance(value, list):
processed.add(name)
return
if name not in self.mapping:
raise ValueError('unknown option reference: {0}'.format(name))
if self.is_sentinel(value):
self.mapping[name] = [self.factory(name, value)]
else:
self.mapping[name] = []
for part in self.VALUE_SEP.split(value):
self._expand_rvalue(part, processed)
self.mapping[name].extend(self.mapping[part])
# System routines
######################################################################
@contextmanager
def locked(lockfile):
"""'with' context to lock a file"""
lock = LockFile(lockfile)
try:
yield
finally:
lock.close()
@contextmanager
def dlocked(directory):
"""Lock based on a directory
You need this function if you do not want more than on process to be
operating on a directory
"""
if not P.exists(directory):
os.makedirs(directory)
lockfile = P.join(directory, '.lock')
with locked(lockfile):
yield
def get_user_agent(default):
"""Return an user agent string representing PyPM
Retain the default user-agent for backward-compat
"""
return '{0} (PyPM {1.__version__})'.format(default, pypm)
# Path routines
# ########################################################################
def existing(path):
"""Return path, but assert its presence first"""
assert isinstance(path, (six.string_types, six.text_type)), \
'not of string type: %s <%s>' % (path, type(path))
assert P.exists(path), 'file/directory not found: %s' % path
return path
def concise_path(pth):
"""Return a concise, but human-understandable, version of ``pth``
Compresses %HOME% and %APPDATA%
"""
aliases = [
('%APPDATA%', os.getenv('APPDATA', None)),
('~', P.expanduser('~')),
]
for alias, pthval in aliases:
if pthval and pth.startswith(pthval):
return P.join(alias, P.relpath(pth, pthval))
return pth
def abs2rel(absolute_path):
"""Convert an absolute path to relative path assuming the topmost directory
is the bast dir.
>>> strip_abs_root('/opt/ActivePython/')
'opt/ActivePython/'
>>> strip_abs_root('/opt/ActivePython')
'opt/ActivePython'
"""
assert os.path.isabs(absolute_path), \
'`%s` is not a absolute path' % absolute_path
if sys.platform.startswith('win'):
assert absolute_path[1:3] == ':\\'
return absolute_path[3:] # remove the DRIVE
else:
assert absolute_path[0] == '/'
return absolute_path[1:] # remove the '/'
def url_join(url, components):
"""Join URL components .. always with a forward slash"""
assert type(components) is list
assert '\\' not in url, \
'URL is not supposed to contain backslashes. Is this windows path? '+url
return url + '/' + '/'.join(components)
def path_to_url(path):
"""Convert local path to remote url
"""
if sys.platform.startswith('win'):
assert '/' not in path, \
'windows path cannot contain forward slash: '+path
drive, path = os.path.splitdrive(path)
return url_join('file:///' + drive,
path.split('\\'))
else:
return 'file://' + P.abspath(path)
def pypm_file(*paths):
"""Return absolute path to a file residing inside the pypm package using
pkg_resources API"""
return resource_filename(Requirement.parse('pypm'), P.join(*paths))
class BareDateTime(six.text_type):
"""Wrapper around the DateTime object with our own standard string
representation
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
FORMAT = DATE_FORMAT + ' ' + TIME_FORMAT
@classmethod
def to_string(cls, dt):
"""Convert the datetime object `dt` to a string
with format as defind by this class
"""
return dt.strftime(cls.FORMAT)
@classmethod
def to_datetime(cls, dt_string):
"""Convert dt_string, formatted by `to_string()` method above"""
ts = time.mktime(time.strptime(dt_string, cls.FORMAT))
return datetime.fromtimestamp(ts)
|
igemsoftware/SYSU-Software2013
|
project/Python27_32/Lib/site-packages/pypm/common/util.py
|
Python
|
mit
| 7,802 | 0.003204 |
from typing import Any
# Copyright (c) 2016 Google Inc. (under http://www.apache.org/licenses/LICENSE-2.0)
# If not annotate_pep484, info in pyi files is augmented with heuristics to decide if un-annotated
# arguments are "Any" or "" (like "self")
class B(object):
def __init__(self):
pass
def f(self, x):
# type: (e1) -> None
pass
class C(object):
def __init__(self, x):
# type: (e2) -> None
pass
@staticmethod
def f2():
pass
@staticmethod
def f3(x, y):
# type: (Any, e3) -> None
pass
@classmethod
def f4(cls):
pass
|
google/merge_pyi
|
testdata/heuristics.comment.py
|
Python
|
apache-2.0
| 631 | 0.006339 |
# Copyright: (c) 2018, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
Context of the running Ansible.
In the future we *may* create Context objects to allow running multiple Ansible plays in parallel
with different contexts but that is currently out of scope as the Ansible library is just for
running the ansible command line tools.
These APIs are still in flux so do not use them unless you are willing to update them with every Ansible release
"""
from ansible.module_utils.common._collections_compat import Mapping, Set
from ansible.module_utils.common.collections import is_sequence
from ansible.utils.context_objects import CLIArgs, GlobalCLIArgs
__all__ = ('CLIARGS',)
# Note: this is not the singleton version. The Singleton is only created once the program has
# actually parsed the args
CLIARGS = CLIArgs({})
# This should be called immediately after cli_args are processed (parsed, validated, and any
# normalization performed on them). No other code should call it
def _init_global_context(cli_args):
"""Initialize the global context objects"""
global CLIARGS
CLIARGS = GlobalCLIArgs.from_options(cli_args)
def cliargs_deferred_get(key, default=None, shallowcopy=False):
"""Closure over getting a key from CLIARGS with shallow copy functionality
Primarily used in ``FieldAttribute`` where we need to defer setting the default
until after the CLI arguments have been parsed
This function is not directly bound to ``CliArgs`` so that it works with
``CLIARGS`` being replaced
"""
def inner():
value = CLIARGS.get(key, default=default)
if not shallowcopy:
return value
elif is_sequence(value):
return value[:]
elif isinstance(value, (Mapping, Set)):
return value.copy()
return value
return inner
|
vmindru/ansible
|
lib/ansible/context.py
|
Python
|
gpl-3.0
| 2,049 | 0.003416 |
from django.test.testcases import SimpleTestCase
from corehq.apps.app_manager.const import APP_V2
from corehq.apps.app_manager.models import Application, Module, OpenCaseAction, ParentSelect, OpenSubCaseAction, \
AdvancedModule, LoadUpdateAction, AdvancedOpenCaseAction
from mock import patch
class CaseMetaTest(SimpleTestCase):
def setUp(self):
self.is_usercase_in_use_patch = patch('corehq.apps.app_manager.models.is_usercase_in_use')
self.is_usercase_in_use_mock = self.is_usercase_in_use_patch.start()
def tearDown(self):
self.is_usercase_in_use_patch.stop()
def _make_module(self, app, module_id, case_type):
m = app.add_module(Module.new_module('Module{}'.format(module_id), lang='en'))
m.case_type = case_type
mf = app.new_form(module_id, 'form {}'.format(case_type), lang='en')
mf.actions.open_case = OpenCaseAction(name_path="/data/question1", external_id=None)
mf.actions.open_case.condition.type = 'always'
return m
def test_hierarchy(self):
app, expected_hierarchy = self.get_test_app()
meta = app.get_case_metadata()
self.assertDictEqual(meta.type_hierarchy, expected_hierarchy)
def get_test_app(self):
app = Application.new_app('domain', 'New App', APP_V2)
app.version = 1
m0 = self._make_module(app, 0, 'parent')
m0.get_form(0).actions.subcases.append(OpenSubCaseAction(
case_type='child',
reference_id='parent'
))
m1 = self._make_module(app, 1, 'child')
m1.get_form(0).actions.subcases.append(OpenSubCaseAction(
case_type='grand child',
reference_id='parent'
))
m2 = self._make_module(app, 2, 'grand child')
m3 = app.add_module(AdvancedModule.new_module('Module3', lang='en'))
m3.case_type = 'other grand child'
m3f0 = m3.new_form('other form', 'en')
m3f0.actions.load_update_cases.append(LoadUpdateAction(
case_type='child',
case_tag='child'))
m3f0.actions.open_cases.append(AdvancedOpenCaseAction(
name_path='/data/question1',
case_type='other grand child',
parent_tag='child'
))
m3f0.actions.open_cases[0].open_condition.type = 'always'
m2.parent_select = ParentSelect(active=True, module_id=m1.unique_id)
m1.parent_select = ParentSelect(active=True, module_id=m0.unique_id)
expected_hierarchy = {
'parent': {
'child': {
'grand child': {},
'other grand child': {}
}
}
}
return app, expected_hierarchy
|
puttarajubr/commcare-hq
|
corehq/apps/app_manager/tests/test_case_meta.py
|
Python
|
bsd-3-clause
| 2,721 | 0.00147 |
import aifc
import sndhdr
import utils
class Aiff:
def __init__(self, filename):
assert sndhdr.what(filename).filetype == 'aiff'
x = aifc.open(filename)
data = x.readframes(x.getnframes())
self.nchannels = x.getnchannels()
self.sampwidth = x.getsampwidth()
self.framerate = x.getframerate()
self.sig = utils.from_buffer(data).reshape(-1, x.getnchannels())
def save(self, filename):
y = aifc.open(filename, 'wb')
y.setnchannels(self.nchannels)
y.setsampwidth(self.sampwidth)
y.setframerate(self.framerate)
y.writeframes(self.sig.flatten().tobytes())
def save_channel(self, filename, channel):
y = aifc.open(filename, 'wb')
y.setnchannels(1)
y.setsampwidth(self.sampwidth)
y.setframerate(self.framerate)
y.writeframes(self.sig[:, channel].flatten().tobytes())
|
viniciusd/DCO1008---Digital-Signal-Processing
|
projeto3/aiff.py
|
Python
|
mit
| 913 | 0.001095 |
from pythonforandroid.recipe import PythonRecipe
class Asn1cryptoRecipe(PythonRecipe):
name = 'asn1crypto'
version = '0.23.0'
url = 'https://pypi.python.org/packages/31/53/8bca924b30cb79d6d70dbab6a99e8731d1e4dd3b090b7f3d8412a8d8ffbc/asn1crypto-0.23.0.tar.gz#md5=97d54665c397b72b165768398dfdd876'
depends = ['python2', 'setuptools']
call_hostpython_via_targetpython = False
recipe = Asn1cryptoRecipe()
|
wexi/python-for-android
|
pythonforandroid/recipes/asn1crypto/__init__.py
|
Python
|
mit
| 426 | 0.002347 |
# -*- coding: utf-8 -*-
"""Video is ungraded Xmodule for support video content.
It's new improved video module, which support additional feature:
- Can play non-YouTube video sources via in-browser HTML5 video player.
- YouTube defaults to HTML5 mode from the start.
- Speed changes in both YouTube and non-YouTube videos happen via
in-browser HTML5 video method (when in HTML5 mode).
- Navigational subtitles can be disabled altogether via an attribute
in XML.
Examples of html5 videos for manual testing:
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.mp4
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.webm
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.ogv
"""
from __future__ import absolute_import
import copy
import json
import logging
from collections import OrderedDict, defaultdict
from operator import itemgetter
import six
from django.conf import settings
from lxml import etree
from opaque_keys.edx.locator import AssetLocator
from web_fragments.fragment import Fragment
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import ScopeIds
from xblock.runtime import KvsFieldData
from openedx.core.djangoapps.video_config.models import HLSPlaybackEnabledFlag, CourseYoutubeBlockedFlag
from openedx.core.djangoapps.video_pipeline.config.waffle import DEPRECATE_YOUTUBE, waffle_flags
from openedx.core.lib.cache_utils import request_cached
from openedx.core.lib.license import LicenseMixin
from xmodule.contentstore.content import StaticContent
from xmodule.editing_module import EditingMixin, TabsEditingMixin
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata
from xmodule.raw_module import EmptyDataRawMixin
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.util.xmodule_django import add_webpack_to_fragment
from xmodule.video_module import manage_video_subtitles_save
from xmodule.x_module import (
PUBLIC_VIEW, STUDENT_VIEW,
HTMLSnippet, ResourceTemplates, shim_xmodule_js,
XModuleMixin, XModuleToXBlockMixin, XModuleDescriptorToXBlockMixin,
)
from xmodule.xml_module import XmlMixin, deserialize_field, is_pointer_tag, name_to_pathname
from .bumper_utils import bumperize
from .transcripts_utils import (
Transcript,
VideoTranscriptsMixin,
clean_video_id,
get_html5_ids,
get_transcript_for_video,
subs_filename
)
from .video_handlers import VideoStudentViewHandlers, VideoStudioViewHandlers
from .video_utils import create_youtube_string, format_xml_exception_message, get_poster, rewrite_video_url
from .video_xfields import VideoFields
# The following import/except block for edxval is temporary measure until
# edxval is a proper XBlock Runtime Service.
#
# Here's the deal: the VideoBlock should be able to take advantage of edx-val
# (https://github.com/edx/edx-val) to figure out what URL to give for video
# resources that have an edx_video_id specified. edx-val is a Django app, and
# including it causes tests to fail because we run common/lib tests standalone
# without Django dependencies. The alternatives seem to be:
#
# 1. Move VideoBlock out of edx-platform.
# 2. Accept the Django dependency in common/lib.
# 3. Try to import, catch the exception on failure, and check for the existence
# of edxval_api before invoking it in the code.
# 4. Make edxval an XBlock Runtime Service
#
# (1) is a longer term goal. VideoBlock should be made into an XBlock and
# extracted from edx-platform entirely. But that's expensive to do because of
# the various dependencies (like templates). Need to sort this out.
# (2) is explicitly discouraged.
# (3) is what we're doing today. The code is still functional when called within
# the context of the LMS, but does not cause failure on import when running
# standalone tests. Most VideoBlock tests tend to be in the LMS anyway,
# probably for historical reasons, so we're not making things notably worse.
# (4) is one of the next items on the backlog for edxval, and should get rid
# of this particular import silliness. It's just that I haven't made one before,
# and I was worried about trying it with my deadline constraints.
try:
import edxval.api as edxval_api
except ImportError:
edxval_api = None
try:
from branding.models import BrandingInfoConfig
except ImportError:
BrandingInfoConfig = None
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
EXPORT_IMPORT_COURSE_DIR = u'course'
EXPORT_IMPORT_STATIC_DIR = u'static'
@XBlock.wants('settings', 'completion', 'i18n', 'request_cache')
class VideoBlock(
VideoFields, VideoTranscriptsMixin, VideoStudioViewHandlers, VideoStudentViewHandlers,
TabsEditingMixin, EmptyDataRawMixin, XmlMixin, EditingMixin,
XModuleDescriptorToXBlockMixin, XModuleToXBlockMixin, HTMLSnippet, ResourceTemplates, XModuleMixin,
LicenseMixin):
"""
XML source example:
<video show_captions="true"
youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg"
url_name="lecture_21_3" display_name="S19V3: Vacancies"
>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.mp4"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.webm"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.ogv"/>
</video>
"""
has_custom_completion = True
completion_mode = XBlockCompletionMode.COMPLETABLE
video_time = 0
icon_class = 'video'
show_in_read_only_mode = True
tabs = [
{
'name': _("Basic"),
'template': "video/transcripts.html",
'current': True
},
{
'name': _("Advanced"),
'template': "tabs/metadata-edit-tab.html"
}
]
uses_xmodule_styles_setup = True
requires_per_student_anonymous_id = True
def get_transcripts_for_student(self, transcripts):
"""Return transcript information necessary for rendering the XModule student view.
This is more or less a direct extraction from `get_html`.
Args:
transcripts (dict): A dict with all transcripts and a sub.
Returns:
Tuple of (track_url, transcript_language, sorted_languages)
track_url -> subtitle download url
transcript_language -> default transcript language
sorted_languages -> dictionary of available transcript languages
"""
track_url = None
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.download_track:
if self.track:
track_url = self.track
elif sub or other_lang:
track_url = self.runtime.handler_url(self, 'transcript', 'download').rstrip('/?')
transcript_language = self.get_default_transcript_language(transcripts)
native_languages = {lang: label for lang, label in settings.LANGUAGES if len(lang) == 2}
languages = {
lang: native_languages.get(lang, display)
for lang, display in settings.ALL_LANGUAGES
if lang in other_lang
}
if not other_lang or (other_lang and sub):
languages['en'] = 'English'
# OrderedDict for easy testing of rendered context in tests
sorted_languages = sorted(list(languages.items()), key=itemgetter(1))
sorted_languages = OrderedDict(sorted_languages)
return track_url, transcript_language, sorted_languages
@property
def youtube_deprecated(self):
"""
Return True if youtube is deprecated and hls as primary playback is enabled else False
"""
# Return False if `hls` playback feature is disabled.
if not HLSPlaybackEnabledFlag.feature_enabled(self.location.course_key):
return False
# check if youtube has been deprecated and hls as primary playback
# is enabled for this course
return waffle_flags()[DEPRECATE_YOUTUBE].is_enabled(self.location.course_key)
def youtube_disabled_for_course(self):
if not self.location.context_key.is_course:
return False # Only courses have this flag
if CourseYoutubeBlockedFlag.feature_enabled(self.location.course_key):
return True
else:
return False
def prioritize_hls(self, youtube_streams, html5_sources):
"""
Decide whether hls can be prioritized as primary playback or not.
If both the youtube and hls sources are present then make decision on flag
If only either youtube or hls is present then play whichever is present
"""
yt_present = bool(youtube_streams.strip()) if youtube_streams else False
hls_present = any(source for source in html5_sources if source.strip().endswith('.m3u8'))
if yt_present and hls_present:
return self.youtube_deprecated
return False
def student_view(self, _context):
"""
Return the student view.
"""
fragment = Fragment(self.get_html())
add_webpack_to_fragment(fragment, 'VideoBlockPreview')
shim_xmodule_js(fragment, 'Video')
return fragment
def author_view(self, context):
"""
Renders the Studio preview view.
"""
return self.student_view(context)
def studio_view(self, _context):
"""
Return the studio view.
"""
fragment = Fragment(
self.system.render_template(self.mako_template, self.get_context())
)
add_webpack_to_fragment(fragment, 'VideoBlockStudio')
shim_xmodule_js(fragment, 'TabsEditingDescriptor')
return fragment
def public_view(self, context):
"""
Returns a fragment that contains the html for the public view
"""
return Fragment(self.get_html(view=PUBLIC_VIEW))
def get_html(self, view=STUDENT_VIEW):
track_status = (self.download_track and self.track)
transcript_download_format = self.transcript_download_format if not track_status else None
sources = [source for source in self.html5_sources if source]
download_video_link = None
branding_info = None
youtube_streams = ""
video_duration = None
video_status = None
# Determine if there is an alternative source for this video
# based on user locale. This exists to support cases where
# we leverage a geography specific CDN, like China.
default_cdn_url = getattr(settings, 'VIDEO_CDN_URL', {}).get('default')
cdn_url = getattr(settings, 'VIDEO_CDN_URL', {}).get(self.system.user_location, default_cdn_url)
# If we have an edx_video_id, we prefer its values over what we store
# internally for download links (source, html5_sources) and the youtube
# stream.
if self.edx_video_id and edxval_api:
try:
val_profiles = ["youtube", "desktop_webm", "desktop_mp4"]
if HLSPlaybackEnabledFlag.feature_enabled(self.course_id):
val_profiles.append('hls')
# strip edx_video_id to prevent ValVideoNotFoundError error if unwanted spaces are there. TNL-5769
val_video_urls = edxval_api.get_urls_for_profiles(self.edx_video_id.strip(), val_profiles)
# VAL will always give us the keys for the profiles we asked for, but
# if it doesn't have an encoded video entry for that Video + Profile, the
# value will map to `None`
# add the non-youtube urls to the list of alternative sources
# use the last non-None non-youtube non-hls url as the link to download the video
for url in [val_video_urls[p] for p in val_profiles if p != "youtube"]:
if url:
if url not in sources:
sources.append(url)
# don't include hls urls for download
if self.download_video and not url.endswith('.m3u8'):
# function returns None when the url cannot be re-written
rewritten_link = rewrite_video_url(cdn_url, url)
if rewritten_link:
download_video_link = rewritten_link
else:
download_video_link = url
# set the youtube url
if val_video_urls["youtube"]:
youtube_streams = "1.00:{}".format(val_video_urls["youtube"])
# get video duration
video_data = edxval_api.get_video_info(self.edx_video_id.strip())
video_duration = video_data.get('duration')
video_status = video_data.get('status')
except (edxval_api.ValInternalError, edxval_api.ValVideoNotFoundError):
# VAL raises this exception if it can't find data for the edx video ID. This can happen if the
# course data is ported to a machine that does not have the VAL data. So for now, pass on this
# exception and fallback to whatever we find in the VideoBlock.
log.warning("Could not retrieve information from VAL for edx Video ID: %s.", self.edx_video_id)
# If the user comes from China use China CDN for html5 videos.
# 'CN' is China ISO 3166-1 country code.
# Video caching is disabled for Studio. User_location is always None in Studio.
# CountryMiddleware disabled for Studio.
if getattr(self, 'video_speed_optimizations', True) and cdn_url:
branding_info = BrandingInfoConfig.get_config().get(self.system.user_location)
if self.edx_video_id and edxval_api and video_status != u'external':
for index, source_url in enumerate(sources):
new_url = rewrite_video_url(cdn_url, source_url)
if new_url:
sources[index] = new_url
# If there was no edx_video_id, or if there was no download specified
# for it, we fall back on whatever we find in the VideoBlock.
if not download_video_link and self.download_video:
if self.html5_sources:
download_video_link = self.html5_sources[0]
# don't give the option to download HLS video urls
if download_video_link and download_video_link.endswith('.m3u8'):
download_video_link = None
transcripts = self.get_transcripts_info()
track_url, transcript_language, sorted_languages = self.get_transcripts_for_student(transcripts=transcripts)
cdn_eval = False
cdn_exp_group = None
if self.youtube_disabled_for_course():
self.youtube_streams = ''
else:
self.youtube_streams = youtube_streams or create_youtube_string(self) # pylint: disable=W0201
settings_service = self.runtime.service(self, 'settings')
poster = None
if edxval_api and self.edx_video_id:
poster = edxval_api.get_course_video_image_url(
course_id=self.runtime.course_id.for_branch(None),
edx_video_id=self.edx_video_id.strip()
)
completion_service = self.runtime.service(self, 'completion')
if completion_service:
completion_enabled = completion_service.completion_tracking_enabled()
else:
completion_enabled = False
# This is the setting that controls whether the autoadvance button will be visible, not whether the
# video will autoadvance or not.
# For autoadvance controls to be shown, both the feature flag and the course setting must be true.
# This allows to enable the feature for certain courses only.
autoadvance_enabled = settings.FEATURES.get('ENABLE_AUTOADVANCE_VIDEOS', False) and \
getattr(self, 'video_auto_advance', False)
# This is the current status of auto-advance (not the control visibility).
# But when controls aren't visible we force it to off. The student might have once set the preference to
# true, but now staff or admin have hidden the autoadvance button and the student won't be able to disable
# it anymore; therefore we force-disable it in this case (when controls aren't visible).
autoadvance_this_video = self.auto_advance and autoadvance_enabled
metadata = {
'saveStateEnabled': view != PUBLIC_VIEW,
'saveStateUrl': self.ajax_url + '/save_user_state',
'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', False),
'streams': self.youtube_streams,
'sources': sources,
'poster': poster,
'duration': video_duration,
# This won't work when we move to data that
# isn't on the filesystem
'captionDataDir': getattr(self, 'data_dir', None),
'showCaptions': json.dumps(self.show_captions),
'generalSpeed': self.global_speed,
'speed': self.speed,
'autoAdvance': autoadvance_this_video,
'savedVideoPosition': self.saved_video_position.total_seconds(),
'start': self.start_time.total_seconds(),
'end': self.end_time.total_seconds(),
'completionEnabled': completion_enabled,
'completionPercentage': settings.COMPLETION_VIDEO_COMPLETE_PERCENTAGE,
'transcriptLanguage': transcript_language,
'transcriptLanguages': sorted_languages,
'ytTestTimeout': settings.YOUTUBE['TEST_TIMEOUT'],
'ytApiUrl': settings.YOUTUBE['API'],
'lmsRootURL': settings.LMS_ROOT_URL,
'transcriptTranslationUrl': self.runtime.handler_url(
self, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.runtime.handler_url(
self, 'transcript', 'available_translations'
).rstrip('/?'),
'publishCompletionUrl': self.runtime.handler_url(self, 'publish_completion', '').rstrip('?'),
# For now, the option "data-autohide-html5" is hard coded. This option
# either enables or disables autohiding of controls and captions on mouse
# inactivity. If set to true, controls and captions will autohide for
# HTML5 sources (non-YouTube) after a period of mouse inactivity over the
# whole video. When the mouse moves (or a key is pressed while any part of
# the video player is focused), the captions and controls will be shown
# once again.
#
# There is no option in the "Advanced Editor" to set this option. However,
# this option will have an effect if changed to "True". The code on
# front-end exists.
'autohideHtml5': False,
# This is the server's guess at whether youtube is available for
# this user, based on what was recorded the last time we saw the
# user, and defaulting to True.
'recordedYoutubeIsAvailable': self.youtube_is_available,
'prioritizeHls': self.prioritize_hls(self.youtube_streams, sources),
}
bumperize(self)
context = {
'autoadvance_enabled': autoadvance_enabled,
'bumper_metadata': json.dumps(self.bumper['metadata']), # pylint: disable=E1101
'metadata': json.dumps(OrderedDict(metadata)),
'poster': json.dumps(get_poster(self)),
'branding_info': branding_info,
'cdn_eval': cdn_eval,
'cdn_exp_group': cdn_exp_group,
'id': self.location.html_id(),
'display_name': self.display_name_with_default,
'handout': self.handout,
'download_video_link': download_video_link,
'track': track_url,
'transcript_download_format': transcript_download_format,
'transcript_download_formats_list': self.fields['transcript_download_format'].values,
'license': getattr(self, "license", None),
}
return self.system.render_template('video.html', context)
def validate(self):
"""
Validates the state of this Video XBlock instance. This
is the override of the general XBlock method, and it will also ask
its superclass to validate.
"""
validation = super(VideoBlock, self).validate()
if not isinstance(validation, StudioValidation):
validation = StudioValidation.copy(validation)
no_transcript_lang = []
for lang_code, transcript in self.transcripts.items():
if not transcript:
no_transcript_lang.append([label for code, label in settings.ALL_LANGUAGES if code == lang_code][0])
if no_transcript_lang:
ungettext = self.runtime.service(self, "i18n").ungettext
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.WARNING,
ungettext(
'There is no transcript file associated with the {lang} language.',
'There are no transcript files associated with the {lang} languages.',
len(no_transcript_lang)
).format(lang=', '.join(no_transcript_lang))
)
)
return validation
def editor_saved(self, user, old_metadata, old_content):
"""
Used to update video values during `self`:save method from CMS.
old_metadata: dict, values of fields of `self` with scope=settings which were explicitly set by user.
old_content, same as `old_metadata` but for scope=content.
Due to nature of code flow in item.py::_save_item, before current function is called,
fields of `self` instance have been already updated, but not yet saved.
To obtain values, which were changed by user input,
one should compare own_metadata(self) and old_medatada.
Video player has two tabs, and due to nature of sync between tabs,
metadata from Basic tab is always sent when video player is edited and saved first time, for example:
{'youtube_id_1_0': u'3_yD_cEKoCk', 'display_name': u'Video', 'sub': u'3_yD_cEKoCk', 'html5_sources': []},
that's why these fields will always present in old_metadata after first save. This should be fixed.
At consequent save requests html5_sources are always sent too, disregard of their change by user.
That means that html5_sources are always in list of fields that were changed (`metadata` param in save_item).
This should be fixed too.
"""
metadata_was_changed_by_user = old_metadata != own_metadata(self)
# There is an edge case when old_metadata and own_metadata are same and we are importing transcript from youtube
# then there is a syncing issue where html5_subs are not syncing with youtube sub, We can make sync better by
# checking if transcript is present for the video and if any html5_ids transcript is not present then trigger
# the manage_video_subtitles_save to create the missing transcript with particular html5_id.
if not metadata_was_changed_by_user and self.sub and hasattr(self, 'html5_sources'):
html5_ids = get_html5_ids(self.html5_sources)
for subs_id in html5_ids:
try:
Transcript.asset(self.location, subs_id)
except NotFoundError:
# If a transcript does not not exist with particular html5_id then there is no need to check other
# html5_ids because we have to create a new transcript with this missing html5_id by turning on
# metadata_was_changed_by_user flag.
metadata_was_changed_by_user = True
break
if metadata_was_changed_by_user:
self.edx_video_id = self.edx_video_id and self.edx_video_id.strip()
# We want to override `youtube_id_1_0` with val youtube profile in the first place when someone adds/edits
# an `edx_video_id` or its underlying YT val profile. Without this, override will only happen when a user
# saves the video second time. This is because of the syncing of basic and advanced video settings which
# also syncs val youtube id from basic tab's `Video Url` to advanced tab's `Youtube ID`.
if self.edx_video_id and edxval_api:
val_youtube_id = edxval_api.get_url_for_profile(self.edx_video_id, 'youtube')
if val_youtube_id and self.youtube_id_1_0 != val_youtube_id:
self.youtube_id_1_0 = val_youtube_id
manage_video_subtitles_save(
self,
user,
old_metadata if old_metadata else None,
generate_translation=True
)
def save_with_metadata(self, user):
"""
Save module with updated metadata to database."
"""
self.save()
self.runtime.modulestore.update_item(self, user.id)
@property
def editable_metadata_fields(self):
editable_fields = super(VideoBlock, self).editable_metadata_fields
settings_service = self.runtime.service(self, 'settings')
if settings_service:
xb_settings = settings_service.get_settings_bucket(self)
if not xb_settings.get("licensing_enabled", False) and "license" in editable_fields:
del editable_fields["license"]
# Default Timed Transcript a.k.a `sub` has been deprecated and end users shall
# not be able to modify it.
editable_fields.pop('sub')
languages = [{'label': label, 'code': lang} for lang, label in settings.ALL_LANGUAGES]
languages.sort(key=lambda l: l['label'])
editable_fields['transcripts']['custom'] = True
editable_fields['transcripts']['languages'] = languages
editable_fields['transcripts']['type'] = 'VideoTranslations'
# We need to send ajax requests to show transcript status
# whenever edx_video_id changes on frontend. Thats why we
# are changing type to `VideoID` so that a specific
# Backbonjs view can handle it.
editable_fields['edx_video_id']['type'] = 'VideoID'
# construct transcripts info and also find if `en` subs exist
transcripts_info = self.get_transcripts_info()
possible_sub_ids = [self.sub, self.youtube_id_1_0] + get_html5_ids(self.html5_sources)
for sub_id in possible_sub_ids:
try:
get_transcript_for_video(
self.location,
subs_id=sub_id,
file_name=sub_id,
language=u'en'
)
transcripts_info['transcripts'] = dict(transcripts_info['transcripts'], en=sub_id)
break
except NotFoundError:
continue
editable_fields['transcripts']['value'] = transcripts_info['transcripts']
editable_fields['transcripts']['urlRoot'] = self.runtime.handler_url(
self,
'studio_transcript',
'translation'
).rstrip('/?')
editable_fields['handout']['type'] = 'FileUploader'
return editable_fields
@classmethod
def parse_xml_new_runtime(cls, node, runtime, keys):
"""
Implement the video block's special XML parsing requirements for the
new runtime only. For all other runtimes, use the existing XModule-style
methods like .from_xml().
"""
video_block = runtime.construct_xblock_from_class(cls, keys)
field_data = cls.parse_video_xml(node)
for key, val in field_data.items():
setattr(video_block, key, cls.fields[key].from_json(val))
# Update VAL with info extracted from `xml_object`
video_block.edx_video_id = video_block.import_video_info_into_val(
node,
runtime.resources_fs,
keys.usage_id.context_key,
)
return video_block
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses
xml_data: A string of xml that will be translated into data and children for
this module
system: A DescriptorSystem for interacting with external resources
id_generator is used to generate course-specific urls and identifiers
"""
xml_object = etree.fromstring(xml_data)
url_name = xml_object.get('url_name', xml_object.get('slug'))
block_type = 'video'
definition_id = id_generator.create_definition(block_type, url_name)
usage_id = id_generator.create_usage(definition_id)
if is_pointer_tag(xml_object):
filepath = cls._format_filepath(xml_object.tag, name_to_pathname(url_name))
xml_object = cls.load_file(filepath, system.resources_fs, usage_id)
system.parse_asides(xml_object, definition_id, usage_id, id_generator)
field_data = cls.parse_video_xml(xml_object, id_generator)
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
video = system.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
# We also don't have separate notions of definition and usage ids yet,
# so we use the location for both
ScopeIds(None, block_type, definition_id, usage_id),
field_data,
)
# Update VAL with info extracted from `xml_object`
video.edx_video_id = video.import_video_info_into_val(
xml_object,
system.resources_fs,
getattr(id_generator, 'target_course_id', None)
)
return video
def definition_to_xml(self, resource_fs):
"""
Returns an xml string representing this module.
"""
xml = etree.Element('video')
youtube_string = create_youtube_string(self)
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't need to write it out.
if youtube_string and youtube_string != '1.00:3_yD_cEKoCk':
xml.set('youtube', six.text_type(youtube_string))
xml.set('url_name', self.url_name)
attrs = {
'display_name': self.display_name,
'show_captions': json.dumps(self.show_captions),
'start_time': self.start_time,
'end_time': self.end_time,
'sub': self.sub,
'download_track': json.dumps(self.download_track),
'download_video': json.dumps(self.download_video),
}
for key, value in attrs.items():
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't write it out.
if value:
if key in self.fields and self.fields[key].is_set_on(self):
try:
xml.set(key, six.text_type(value))
except UnicodeDecodeError:
exception_message = format_xml_exception_message(self.location, key, value)
log.exception(exception_message)
# If exception is UnicodeDecodeError set value using unicode 'utf-8' scheme.
log.info("Setting xml value using 'utf-8' scheme.")
xml.set(key, six.text_type(value, 'utf-8'))
except ValueError:
exception_message = format_xml_exception_message(self.location, key, value)
log.exception(exception_message)
raise
for source in self.html5_sources:
ele = etree.Element('source')
ele.set('src', source)
xml.append(ele)
if self.track:
ele = etree.Element('track')
ele.set('src', self.track)
xml.append(ele)
if self.handout:
ele = etree.Element('handout')
ele.set('src', self.handout)
xml.append(ele)
transcripts = {}
if self.transcripts is not None:
transcripts.update(self.transcripts)
edx_video_id = clean_video_id(self.edx_video_id)
if edxval_api and edx_video_id:
try:
# Create static dir if not created earlier.
resource_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
# Backward compatible exports
# edxval exports new transcripts into the course OLX and returns a transcript
# files map so that it can also be rewritten in old transcript metadata fields
# (i.e. `self.transcripts`) on import and older open-releases (<= ginkgo),
# who do not have deprecated contentstore yet, can also import and use new-style
# transcripts into their openedX instances.
exported_metadata = edxval_api.export_to_xml(
video_id=edx_video_id,
resource_fs=resource_fs,
static_dir=EXPORT_IMPORT_STATIC_DIR,
course_id=six.text_type(self.runtime.course_id.for_branch(None))
)
# Update xml with edxval metadata
xml.append(exported_metadata['xml'])
# we don't need sub if english transcript
# is also in new transcripts.
new_transcripts = exported_metadata['transcripts']
transcripts.update(new_transcripts)
if new_transcripts.get('en'):
xml.set('sub', '')
# Update `transcripts` attribute in the xml
xml.set('transcripts', json.dumps(transcripts))
except edxval_api.ValVideoNotFoundError:
pass
# Sorting transcripts for easy testing of resulting xml
for transcript_language in sorted(transcripts.keys()):
ele = etree.Element('transcript')
ele.set('language', transcript_language)
ele.set('src', transcripts[transcript_language])
xml.append(ele)
# handle license specifically
self.add_license_to_xml(xml)
return xml
def create_youtube_url(self, youtube_id):
"""
Args:
youtube_id: The ID of the video to create a link for
Returns:
A full youtube url to the video whose ID is passed in
"""
if youtube_id:
return u'https://www.youtube.com/watch?v={0}'.format(youtube_id)
else:
return u''
def get_context(self):
"""
Extend context by data for transcript basic tab.
"""
_context = super(VideoBlock, self).get_context()
metadata_fields = copy.deepcopy(self.editable_metadata_fields)
display_name = metadata_fields['display_name']
video_url = metadata_fields['html5_sources']
video_id = metadata_fields['edx_video_id']
youtube_id_1_0 = metadata_fields['youtube_id_1_0']
def get_youtube_link(video_id):
"""
Returns the fully-qualified YouTube URL for the given video identifier
"""
# First try a lookup in VAL. If we have a YouTube entry there, it overrides the
# one passed in.
if self.edx_video_id and edxval_api:
val_youtube_id = edxval_api.get_url_for_profile(self.edx_video_id, "youtube")
if val_youtube_id:
video_id = val_youtube_id
return self.create_youtube_url(video_id)
_ = self.runtime.service(self, "i18n").ugettext
video_url.update({
'help': _('The URL for your video. This can be a YouTube URL or a link to an .mp4, .ogg, or .webm video file hosted elsewhere on the Internet.'), # pylint: disable=line-too-long
'display_name': _('Default Video URL'),
'field_name': 'video_url',
'type': 'VideoList',
'default_value': [get_youtube_link(youtube_id_1_0['default_value'])]
})
source_url = self.create_youtube_url(youtube_id_1_0['value'])
# First try a lookup in VAL. If any video encoding is found given the video id then
# override the source_url with it.
if self.edx_video_id and edxval_api:
val_profiles = ['youtube', 'desktop_webm', 'desktop_mp4']
if HLSPlaybackEnabledFlag.feature_enabled(self.runtime.course_id.for_branch(None)):
val_profiles.append('hls')
# Get video encodings for val profiles.
val_video_encodings = edxval_api.get_urls_for_profiles(self.edx_video_id, val_profiles)
# VAL's youtube source has greater priority over external youtube source.
if val_video_encodings.get('youtube'):
source_url = self.create_youtube_url(val_video_encodings['youtube'])
# If no youtube source is provided externally or in VAl, update source_url in order: hls > mp4 and webm
if not source_url:
if val_video_encodings.get('hls'):
source_url = val_video_encodings['hls']
elif val_video_encodings.get('desktop_mp4'):
source_url = val_video_encodings['desktop_mp4']
elif val_video_encodings.get('desktop_webm'):
source_url = val_video_encodings['desktop_webm']
# Only add if html5 sources do not already contain source_url.
if source_url and source_url not in video_url['value']:
video_url['value'].insert(0, source_url)
metadata = {
'display_name': display_name,
'video_url': video_url,
'edx_video_id': video_id
}
_context.update({'transcripts_basic_tab_metadata': metadata})
return _context
@classmethod
def _parse_youtube(cls, data):
"""
Parses a string of Youtube IDs such as "1.0:AXdE34_U,1.5:VO3SxfeD"
into a dictionary. Necessary for backwards compatibility with
XML-based courses.
"""
ret = {'0.75': '', '1.00': '', '1.25': '', '1.50': ''}
videos = data.split(',')
for video in videos:
pieces = video.split(':')
try:
speed = '%.2f' % float(pieces[0]) # normalize speed
# Handle the fact that youtube IDs got double-quoted for a period of time.
# Note: we pass in "VideoFields.youtube_id_1_0" so we deserialize as a String--
# it doesn't matter what the actual speed is for the purposes of deserializing.
youtube_id = deserialize_field(cls.youtube_id_1_0, pieces[1])
ret[speed] = youtube_id
except (ValueError, IndexError):
log.warning('Invalid YouTube ID: %s', video)
return ret
@classmethod
def parse_video_xml(cls, xml, id_generator=None):
"""
Parse video fields out of xml_data. The fields are set if they are
present in the XML.
Arguments:
id_generator is used to generate course-specific urls and identifiers
"""
if isinstance(xml, six.string_types):
xml = etree.fromstring(xml)
field_data = {}
# Convert between key types for certain attributes --
# necessary for backwards compatibility.
conversions = {
# example: 'start_time': cls._example_convert_start_time
}
# Convert between key names for certain attributes --
# necessary for backwards compatibility.
compat_keys = {
'from': 'start_time',
'to': 'end_time'
}
sources = xml.findall('source')
if sources:
field_data['html5_sources'] = [ele.get('src') for ele in sources]
track = xml.find('track')
if track is not None:
field_data['track'] = track.get('src')
handout = xml.find('handout')
if handout is not None:
field_data['handout'] = handout.get('src')
transcripts = xml.findall('transcript')
if transcripts:
field_data['transcripts'] = {tr.get('language'): tr.get('src') for tr in transcripts}
for attr, value in xml.items():
if attr in compat_keys:
attr = compat_keys[attr]
if attr in cls.metadata_to_strip + ('url_name', 'name'):
continue
if attr == 'youtube':
speeds = cls._parse_youtube(value)
for speed, youtube_id in speeds.items():
# should have made these youtube_id_1_00 for
# cleanliness, but hindsight doesn't need glasses
normalized_speed = speed[:-1] if speed.endswith('0') else speed
# If the user has specified html5 sources, make sure we don't use the default video
if youtube_id != '' or 'html5_sources' in field_data:
field_data['youtube_id_{0}'.format(normalized_speed.replace('.', '_'))] = youtube_id
elif attr in conversions:
field_data[attr] = conversions[attr](value)
elif attr not in cls.fields:
field_data.setdefault('xml_attributes', {})[attr] = value
else:
# We export values with json.dumps (well, except for Strings, but
# for about a month we did it for Strings also).
field_data[attr] = deserialize_field(cls.fields[attr], value)
course_id = getattr(id_generator, 'target_course_id', None)
# Update the handout location with current course_id
if 'handout' in list(field_data.keys()) and course_id:
handout_location = StaticContent.get_location_from_path(field_data['handout'])
if isinstance(handout_location, AssetLocator):
handout_new_location = StaticContent.compute_location(course_id, handout_location.path)
field_data['handout'] = StaticContent.serialize_asset_key_with_slash(handout_new_location)
# For backwards compatibility: Add `source` if XML doesn't have `download_video`
# attribute.
if 'download_video' not in field_data and sources:
field_data['source'] = field_data['html5_sources'][0]
# For backwards compatibility: if XML doesn't have `download_track` attribute,
# it means that it is an old format. So, if `track` has some value,
# `download_track` needs to have value `True`.
if 'download_track' not in field_data and track is not None:
field_data['download_track'] = True
# load license if it exists
field_data = LicenseMixin.parse_license_from_xml(field_data, xml)
return field_data
def import_video_info_into_val(self, xml, resource_fs, course_id):
"""
Import parsed video info from `xml` into edxval.
Arguments:
xml (lxml object): xml representation of video to be imported.
resource_fs (OSFS): Import file system.
course_id (str): course id
"""
edx_video_id = clean_video_id(self.edx_video_id)
# Create video_asset is not already present.
video_asset_elem = xml.find('video_asset')
if video_asset_elem is None:
video_asset_elem = etree.Element('video_asset')
# This will be a dict containing the list of names of the external transcripts.
# Example:
# {
# 'en': ['The_Flash.srt', 'Harry_Potter.srt'],
# 'es': ['Green_Arrow.srt']
# }
external_transcripts = defaultdict(list)
# Add trancript from self.sub and self.youtube_id_1_0 fields.
external_transcripts['en'] = [
subs_filename(transcript, 'en')
for transcript in [self.sub, self.youtube_id_1_0] if transcript
]
for language_code, transcript in self.transcripts.items():
external_transcripts[language_code].append(transcript)
if edxval_api:
edx_video_id = edxval_api.import_from_xml(
video_asset_elem,
edx_video_id,
resource_fs,
EXPORT_IMPORT_STATIC_DIR,
external_transcripts,
course_id=course_id
)
return edx_video_id
def index_dictionary(self):
xblock_body = super(VideoBlock, self).index_dictionary()
video_body = {
"display_name": self.display_name,
}
def _update_transcript_for_index(language=None):
""" Find video transcript - if not found, don't update index """
try:
transcripts = self.get_transcripts_info()
transcript = self.get_transcript(
transcripts, transcript_format='txt', lang=language
)[0].replace("\n", " ")
transcript_index_name = "transcript_{}".format(language if language else self.transcript_language)
video_body.update({transcript_index_name: transcript})
except NotFoundError:
pass
if self.sub:
_update_transcript_for_index()
# Check to see if there are transcripts in other languages besides default transcript
if self.transcripts:
for language in self.transcripts.keys():
_update_transcript_for_index(language)
if "content" in xblock_body:
xblock_body["content"].update(video_body)
else:
xblock_body["content"] = video_body
xblock_body["content_type"] = "Video"
return xblock_body
@property
def request_cache(self):
"""
Returns the request_cache from the runtime.
"""
return self.runtime.service(self, "request_cache")
@classmethod
@request_cached(
request_cache_getter=lambda args, kwargs: args[1],
)
def get_cached_val_data_for_course(cls, request_cache, video_profile_names, course_id):
"""
Returns the VAL data for the requested video profiles for the given course.
"""
return edxval_api.get_video_info_for_course_and_profiles(six.text_type(course_id), video_profile_names)
def student_view_data(self, context=None):
"""
Returns a JSON representation of the student_view of this XModule.
The contract of the JSON content is between the caller and the particular XModule.
"""
context = context or {}
# If the "only_on_web" field is set on this video, do not return the rest of the video's data
# in this json view, since this video is to be accessed only through its web view."
if self.only_on_web:
return {"only_on_web": True}
encoded_videos = {}
val_video_data = {}
all_sources = self.html5_sources or []
# Check in VAL data first if edx_video_id exists
if self.edx_video_id:
video_profile_names = context.get("profiles", ["mobile_low"])
if HLSPlaybackEnabledFlag.feature_enabled(self.location.course_key) and 'hls' not in video_profile_names:
video_profile_names.append('hls')
# get and cache bulk VAL data for course
val_course_data = self.get_cached_val_data_for_course(
self.request_cache,
video_profile_names,
self.location.course_key,
)
val_video_data = val_course_data.get(self.edx_video_id, {})
# Get the encoded videos if data from VAL is found
if val_video_data:
encoded_videos = val_video_data.get('profiles', {})
# If information for this edx_video_id is not found in the bulk course data, make a
# separate request for this individual edx_video_id, unless cache misses are disabled.
# This is useful/required for videos that don't have a course designated, such as the introductory video
# that is shared across many courses. However, this results in a separate database request so watch
# out for any performance hit if many such videos exist in a course. Set the 'allow_cache_miss' parameter
# to False to disable this fall back.
elif context.get("allow_cache_miss", "True").lower() == "true":
try:
val_video_data = edxval_api.get_video_info(self.edx_video_id)
# Unfortunately, the VAL API is inconsistent in how it returns the encodings, so remap here.
for enc_vid in val_video_data.pop('encoded_videos'):
if enc_vid['profile'] in video_profile_names:
encoded_videos[enc_vid['profile']] = {key: enc_vid[key] for key in ["url", "file_size"]}
except edxval_api.ValVideoNotFoundError:
pass
# Fall back to other video URLs in the video module if not found in VAL
if not encoded_videos:
if all_sources:
encoded_videos["fallback"] = {
"url": all_sources[0],
"file_size": 0, # File size is unknown for fallback URLs
}
# Include youtube link if there is no encoding for mobile- ie only a fallback URL or no encodings at all
# We are including a fallback URL for older versions of the mobile app that don't handle Youtube urls
if self.youtube_id_1_0:
encoded_videos["youtube"] = {
"url": self.create_youtube_url(self.youtube_id_1_0),
"file_size": 0, # File size is not relevant for external link
}
available_translations = self.available_translations(self.get_transcripts_info())
transcripts = {
lang: self.runtime.handler_url(self, 'transcript', 'download', query="lang=" + lang, thirdparty=True)
for lang in available_translations
}
return {
"only_on_web": self.only_on_web,
"duration": val_video_data.get('duration', None),
"transcripts": transcripts,
"encoded_videos": encoded_videos,
"all_sources": all_sources,
}
|
ESOedX/edx-platform
|
common/lib/xmodule/xmodule/video_module/video_module.py
|
Python
|
agpl-3.0
| 51,311 | 0.002904 |
"""Provide functionality to keep track of devices."""
import asyncio
import voluptuous as vol
from homeassistant.loader import bind_hass
from homeassistant.components import group
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import GPSType, ConfigType, HomeAssistantType
from homeassistant.helpers.event import async_track_utc_time_change
from homeassistant.const import ATTR_GPS_ACCURACY, STATE_HOME
from . import legacy, setup
from .config_entry import ( # noqa # pylint: disable=unused-import
async_setup_entry,
async_unload_entry,
)
from .legacy import DeviceScanner # noqa # pylint: disable=unused-import
from .const import (
ATTR_ATTRIBUTES,
ATTR_BATTERY,
ATTR_CONSIDER_HOME,
ATTR_DEV_ID,
ATTR_GPS,
ATTR_HOST_NAME,
ATTR_LOCATION_NAME,
ATTR_MAC,
ATTR_SOURCE_TYPE,
CONF_AWAY_HIDE,
CONF_CONSIDER_HOME,
CONF_NEW_DEVICE_DEFAULTS,
CONF_SCAN_INTERVAL,
CONF_TRACK_NEW,
DEFAULT_AWAY_HIDE,
DEFAULT_CONSIDER_HOME,
DEFAULT_TRACK_NEW,
DOMAIN,
PLATFORM_TYPE_LEGACY,
SOURCE_TYPE_BLUETOOTH_LE,
SOURCE_TYPE_BLUETOOTH,
SOURCE_TYPE_GPS,
SOURCE_TYPE_ROUTER,
)
ENTITY_ID_ALL_DEVICES = group.ENTITY_ID_FORMAT.format("all_devices")
SERVICE_SEE = "see"
SOURCE_TYPES = (
SOURCE_TYPE_GPS,
SOURCE_TYPE_ROUTER,
SOURCE_TYPE_BLUETOOTH,
SOURCE_TYPE_BLUETOOTH_LE,
)
NEW_DEVICE_DEFAULTS_SCHEMA = vol.Any(
None,
vol.Schema(
{
vol.Optional(CONF_TRACK_NEW, default=DEFAULT_TRACK_NEW): cv.boolean,
vol.Optional(CONF_AWAY_HIDE, default=DEFAULT_AWAY_HIDE): cv.boolean,
}
),
)
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_SCAN_INTERVAL): cv.time_period,
vol.Optional(CONF_TRACK_NEW): cv.boolean,
vol.Optional(CONF_CONSIDER_HOME, default=DEFAULT_CONSIDER_HOME): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_NEW_DEVICE_DEFAULTS, default={}): NEW_DEVICE_DEFAULTS_SCHEMA,
}
)
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
SERVICE_SEE_PAYLOAD_SCHEMA = vol.Schema(
vol.All(
cv.has_at_least_one_key(ATTR_MAC, ATTR_DEV_ID),
{
ATTR_MAC: cv.string,
ATTR_DEV_ID: cv.string,
ATTR_HOST_NAME: cv.string,
ATTR_LOCATION_NAME: cv.string,
ATTR_GPS: cv.gps,
ATTR_GPS_ACCURACY: cv.positive_int,
ATTR_BATTERY: cv.positive_int,
ATTR_ATTRIBUTES: dict,
ATTR_SOURCE_TYPE: vol.In(SOURCE_TYPES),
ATTR_CONSIDER_HOME: cv.time_period,
# Temp workaround for iOS app introduced in 0.65
vol.Optional("battery_status"): str,
vol.Optional("hostname"): str,
},
)
)
@bind_hass
def is_on(hass: HomeAssistantType, entity_id: str = None):
"""Return the state if any or a specified device is home."""
entity = entity_id or ENTITY_ID_ALL_DEVICES
return hass.states.is_state(entity, STATE_HOME)
def see(
hass: HomeAssistantType,
mac: str = None,
dev_id: str = None,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy=None,
battery: int = None,
attributes: dict = None,
):
"""Call service to notify you see device."""
data = {
key: value
for key, value in (
(ATTR_MAC, mac),
(ATTR_DEV_ID, dev_id),
(ATTR_HOST_NAME, host_name),
(ATTR_LOCATION_NAME, location_name),
(ATTR_GPS, gps),
(ATTR_GPS_ACCURACY, gps_accuracy),
(ATTR_BATTERY, battery),
)
if value is not None
}
if attributes:
data[ATTR_ATTRIBUTES] = attributes
hass.services.call(DOMAIN, SERVICE_SEE, data)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the device tracker."""
tracker = await legacy.get_tracker(hass, config)
legacy_platforms = await setup.async_extract_config(hass, config)
setup_tasks = [
legacy_platform.async_setup_legacy(hass, tracker)
for legacy_platform in legacy_platforms
]
if setup_tasks:
await asyncio.wait(setup_tasks)
tracker.async_setup_group()
async def async_platform_discovered(p_type, info):
"""Load a platform."""
platform = await setup.async_create_platform_type(hass, config, p_type, {})
if platform is None or platform.type != PLATFORM_TYPE_LEGACY:
return
await platform.async_setup_legacy(hass, tracker, info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
# Clean up stale devices
async_track_utc_time_change(
hass, tracker.async_update_stale, second=range(0, 60, 5)
)
async def async_see_service(call):
"""Service to see a device."""
# Temp workaround for iOS, introduced in 0.65
data = dict(call.data)
data.pop("hostname", None)
data.pop("battery_status", None)
await tracker.async_see(**data)
hass.services.async_register(
DOMAIN, SERVICE_SEE, async_see_service, SERVICE_SEE_PAYLOAD_SCHEMA
)
# restore
await tracker.async_setup_tracked_device()
return True
|
Cinntax/home-assistant
|
homeassistant/components/device_tracker/__init__.py
|
Python
|
apache-2.0
| 5,372 | 0.000931 |
import logging
import os
from tempfile import mkdtemp
from .repositories import Repository, AuthenticatedRepository
log = logging.getLogger(__name__)
class RepoManager(object):
"""
Manages creation and deletion of `Repository` objects.
"""
to_cleanup = {}
def __init__(self, authenticated=False, cache_directory=None,
tools=None, executor=None, shallow_clone=False):
self.should_cleanup = cache_directory is None
self.authenticated = authenticated
self.cache_directory = cache_directory
self.tools = tools or []
self.executor = executor
self.shallow = shallow_clone
def get_repo_class(self):
if self.authenticated:
return AuthenticatedRepository
return Repository
def clone_dir(self, repo_name):
dired_repo_name = repo_name.replace('/', '__')
if not self.cache_directory:
dirname = mkdtemp(suffix=dired_repo_name)
else:
dirname = os.path.abspath("%s/%s" % (
self.cache_directory, dired_repo_name))
return dirname
def fetch(self, dirname, remote_name, ref):
log.debug("Fetching %s %s", remote_name, ref)
self.executor("cd %s && git fetch --depth=1 %s %s" % (dirname,
remote_name,
ref))
def pull(self, dirname):
log.debug("Pulling all %s", dirname)
self.executor("cd %s && git pull --all" % dirname)
def add_remote(self, dirname, name, url):
log.debug("Adding remote %s url: %s", name, url)
self.executor("cd %s && git remote add %s %s" % (dirname,
name,
url))
def set_up_clone(self, repo_name, remote_repo):
"""Sets up the working directory and returns a tuple of
(dirname, repo )"""
dirname = self.clone_dir(repo_name)
self.to_cleanup[repo_name] = dirname
klass = self.get_repo_class()
repo = klass(repo_name,
dirname,
self.tools,
self.executor,
shallow=self.shallow_clone)
return (dirname, repo)
def clone_repo(self, repo_name, remote_repo, ref):
"""Clones the given repo and returns the Repository object."""
self.shallow_clone = False
dirname, repo = self.set_up_clone(repo_name, remote_repo)
if os.path.isdir("%s/.git" % dirname):
log.debug("Updating %s to %s", repo.download_location, dirname)
self.executor(
"cd %s && git checkout master" % dirname)
self.pull(dirname)
else:
log.debug("Cloning %s to %s", repo.download_location, dirname)
self.executor(
"git clone %s %s" % (repo.download_location, dirname))
if remote_repo is not None:
log.debug("Pulling remote branch from %s", remote_repo.url)
self.add_remote(dirname,
remote_repo.name,
remote_repo.url)
self.pull(dirname)
return repo
def cleanup(self):
if self.should_cleanup:
for repo_dir in self.to_cleanup.values():
log.debug("Cleaning up %s", repo_dir)
self.executor('rm -rf %s' % repo_dir)
class ShallowRepoManager(RepoManager):
def __init__(self, *args, **kwargs):
super(ShallowRepoManager, self).__init__(*args, **kwargs)
def clone_repo(self, repo_name, remote_repo, ref):
self.shallow_clone = True
dirname, repo = self.set_up_clone(repo_name, remote_repo)
remote_name = 'origin'
log.debug("Shallow cloning.")
download_location = repo.download_location
log.debug("Creating stub git repo at %s" % (dirname))
self.executor("mkdir -p %s" % (dirname, ))
self.executor("cd %s && git init" % (dirname, ))
log.debug("Adding origin repo %s " % (download_location))
self.add_remote(dirname, 'origin', download_location)
if remote_repo:
self.add_remote(dirname, remote_repo.name, remote_repo.url)
remote_name = remote_repo.name
self.fetch(dirname, 'origin', 'HEAD')
self.fetch(dirname, remote_name, ref)
return repo
|
richtier/imhotep
|
imhotep/repomanagers.py
|
Python
|
mit
| 4,477 | 0 |
"""Default website configurations, used only for testing.
"""
from donut import environment
# Public Test Database
TEST = environment.Environment(
db_hostname="localhost",
db_name="donut_test",
db_user="donut_test",
db_password="public",
debug=True,
testing=True,
secret_key="1234567890",
imgur_api={
"id": "b579f690cacf867",
"secret": "****************************************"
},
restricted_ips=r"127\.0\.0\.1")
|
ASCIT/donut-python
|
donut/default_config.py
|
Python
|
mit
| 472 | 0 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2020 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Simplified sprite animation framework.
.. note:: This module is an evolving "work-in-progress" and should be treated
as such until such time as this notice disappears.
"""
from time import sleep, perf_counter
from PIL import Image
class dict_wrapper(object):
"""
Helper class to turn dictionaries into objects.
"""
def __init__(self, d):
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(self, a, [dict_wrapper(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, dict_wrapper(b) if isinstance(b, dict) else b)
class spritesheet(object):
"""
A sprite sheet is a series of images (usually animation frames) combined
into a larger image. A dictionary is usually spread into the object
constructor parameters with the following top-level attributes:
:param image: A path to a sprite map image.
:type image: str
:param frames: A dictionary of settings that defines how to extract
individual frames from the supplied image, as follows
- ``width`` & ``height`` are required and specify the dimensions of
the frames
- ``regX`` & ``regY`` indicate the registration point or "origin" of
the frames
- ``count`` allows you to specify the total number of frames in the
spritesheet; if omitted, this will be calculated based on the
dimensions of the source images and the frames. Frames will be
assigned indexes based on their position in the source images
(left to right, top to bottom).
:type frames: dict
:param animations: A dictionary of key/value pairs where the key is the
name of of the animation sequence, and the value are settings that
defines an animation sequence as follows:
- ``frames`` is a list of frame to show in sequence. Usually this
comprises of frame numbers, but can refer to other animation
sequences (which are handled much like a subroutine call).
- ``speed`` determines how quickly the animation frames are cycled
through compared to the how often the animation sequence yields.
- ``next`` is optional, but if supplied, determines what happens when
the animation sequence is exhausted. Typically this can be used to
self-reference, so that it forms an infinite loop, but can hand off
to any other animation sequence.
:type animations: dict
Loosely based on https://www.createjs.com/docs/easeljs/classes/SpriteSheet.html
"""
def __init__(self, image, frames, animations):
with open(image, 'rb') as fp:
self.image = Image.open(fp)
self.image.load()
self.frames = dict_wrapper(frames)
self.animations = dict_wrapper(animations)
# Reframe the sprite map in terms of the registration point (if set)
regX = self.frames.regX if hasattr(self.frames, "regX") else 0
regY = self.frames.regY if hasattr(self.frames, "regY") else 0
self.image = self.image.crop((regX, regY, self.image.width - regX, self.image.height - regY))
self.width, self.height = self.image.size
assert(self.width % self.frames.width == 0)
assert(self.height % self.frames.height == 0)
self.frames.size = (self.frames.width, self.frames.height)
if not hasattr(self.frames, 'count'):
self.frames.count = (self.width * self.height) // (self.frames.width * self.frames.height)
self.cache = {}
def __getitem__(self, frame_index):
"""
Returns (and caches) the frame for the given index.
:param frame_index: The index of the frame.
:type frame_index: int
:returns: A Pillow image cropped from the main image corresponding to
the given frame index.
:raises TypeError: if the ``frame_index`` is not numeric
:raises IndexError: if the ``frame_index`` is less than zero or more
than the largest frame.
"""
if not isinstance(frame_index, int):
raise TypeError("frame index must be numeric")
if frame_index < 0 or frame_index > self.frames.count:
raise IndexError("frame index out of range")
cached_frame = self.cache.get(frame_index)
if cached_frame is None:
offset = frame_index * self.frames.width
left = offset % self.width
top = (offset // self.width) * self.frames.height
right = left + self.frames.width
bottom = top + self.frames.height
bounds = [left, top, right, bottom]
cached_frame = self.image.crop(bounds)
self.cache[frame_index] = cached_frame
return cached_frame
def __len__(self):
"""
The number of frames in the sprite sheet
"""
return self.frames.count
def animate(self, seq_name):
"""
Returns a generator which "executes" an animation sequence for the given
``seq_name``, inasmuch as the next frame for the given animation is
yielded when requested.
:param seq_name: The name of a previously defined animation sequence.
:type seq_name: str
:returns: A generator that yields all frames from the animation
sequence.
:raises AttributeError: If the ``seq_name`` is unknown.
"""
while True:
index = 0
anim = getattr(self.animations, seq_name)
speed = anim.speed if hasattr(anim, "speed") else 1
num_frames = len(anim.frames)
while index < num_frames:
frame = anim.frames[int(index)]
index += speed
if isinstance(frame, int):
yield self[frame]
else:
for subseq_frame in self.animate(frame):
yield subseq_frame
if not hasattr(anim, "next"):
break
seq_name = anim.next
class framerate_regulator(object):
"""
Implements a variable sleep mechanism to give the appearance of a consistent
frame rate. Using a fixed-time sleep will cause animations to be jittery
(looking like they are speeding up or slowing down, depending on what other
work is occurring), whereas this class keeps track of when the last time the
``sleep()`` method was called, and calculates a sleep period to smooth out
the jitter.
:param fps: The desired frame rate, expressed numerically in
frames-per-second. By default, this is set at 16.67, to give a frame
render time of approximately 60ms. This can be overridden as necessary,
and if no FPS limiting is required, the ``fps`` can be set to zero.
:type fps: float
"""
def __init__(self, fps=16.67):
if fps == 0:
fps = -1
self.max_sleep_time = 1.0 / fps
self.total_transit_time = 0
self.called = 0
self.start_time = None
self.last_time = None
def __enter__(self):
self.enter_time = perf_counter()
if not self.start_time:
self.start_time = self.enter_time
self.last_time = self.enter_time
return self
def __exit__(self, *args):
"""
Sleeps for a variable amount of time (dependent on when it was last
called), to give a consistent frame rate. If it cannot meet the desired
frame rate (i.e. too much time has occurred since the last call), then
it simply exits without blocking.
"""
self.called += 1
self.total_transit_time += perf_counter() - self.enter_time
if self.max_sleep_time >= 0:
elapsed = perf_counter() - self.last_time
sleep_for = self.max_sleep_time - elapsed
if sleep_for > 0:
sleep(sleep_for)
self.last_time = perf_counter()
def effective_FPS(self):
"""
Calculates the effective frames-per-second - this should largely
correlate to the desired FPS supplied in the constructor, but no
guarantees are given.
:returns: The effective frame rate.
:rtype: float
"""
if self.start_time is None:
self.start_time = 0
elapsed = perf_counter() - self.start_time
return self.called / elapsed
def average_transit_time(self):
"""
Calculates the average transit time between the enter and exit methods,
and return the time in milliseconds.
:returns: The average transit in milliseconds.
:rtype: float
"""
return self.total_transit_time * 1000.0 / self.called
|
rm-hull/luma.core
|
luma/core/sprite_system.py
|
Python
|
mit
| 8,896 | 0.000787 |
#------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Copyright (C) 2008-2013, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
import re
from .ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, Coord, ParseError
from .ast_transforms import fix_switch_cases
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lextab='cffi._pycparser.lextab',
yacc_optimize=True,
yacctab='cffi._pycparser.yacctab',
yacc_debug=False):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
"""
self.clex = CLexer(
error_func=self._lex_error_func,
on_lbrace_func=self._lex_on_lbrace_func,
on_rbrace_func=self._lex_on_rbrace_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'parameter_type_list',
'specifier_qualifier_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab)
# Stack of scopes for keeping track of symbols. _scope_stack[-1] is
# the current (topmost) scope. Each scope is a dictionary that
# specifies whether a name is a type. If _scope_stack[n][name] is
# True, 'name' is currently a type in the scope. If it's False,
# 'name' is used in the scope but not as a type (for instance, if we
# saw: int name;
# If 'name' is not a key in _scope_stack[n] then 'name' was not defined
# in this scope at all.
self._scope_stack = [dict()]
# Keeps track of the last token given to yacc (the lookahead token)
self._last_yielded_token = None
def parse(self, text, filename='', debuglevel=0):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(dict())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_name(self, name, coord):
""" Add a new typedef name (ie a TYPEID) to the current scope
"""
if not self._scope_stack[-1].get(name, True):
self._parse_error(
"Typedef %r previously declared as non-typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = True
def _add_identifier(self, name, coord):
""" Add a new object, function, or enum member name (ie an ID) to the
current scope
"""
if self._scope_stack[-1].get(name, False):
self._parse_error(
"Non-typedef %r previously declared as typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = False
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
for scope in reversed(self._scope_stack):
# If name is an identifier in this scope it shadows typedefs in
# higher scopes.
in_scope = scope.get(name)
if in_scope is not None: return in_scope
return False
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_on_lbrace_func(self):
self._push_scope()
def _lex_on_rbrace_func(self):
self._pop_scope()
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
is_type = self._is_type_in_scope(name)
return is_type
def _get_yacc_lookahead_token(self):
""" We need access to yacc's lookahead token in certain cases.
This is the last token yacc requested from the lexer, so we
ask the lexer.
"""
return self.clex.last_token
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module
# c_ast) and the modifiers are FuncDecl, PtrDecl and
# ArrayDecl.
#
# The standard states that whenever a new modifier is parsed,
# it should be added to the end of the list of modifiers. For
# example:
#
# K&R2 A.8.6.2: Array Declarators
#
# In a declaration T D where D has the form
# D1 [constant-expression-opt]
# and the type of the identifier in the declaration T D1 is
# "type-modifier T", the type of the
# identifier of D is "type-modifier array of T"
#
# This is what this method does. The declarator it receives
# can be a list of declarators ending with TypeDecl. It
# tacks the modifier to the end of this list, just before
# the TypeDecl.
#
# Additionally, the modifier may be a list itself. This is
# useful for pointers, that can come as a chain from the rule
# p_pointer. In this case, the whole modifier list is spliced
# into the new location.
#
def _type_modify_decl(self, decl, modifier):
""" Tacks a type modifier on a declarator, and returns
the modified declarator.
Note: the declarator and modifier may be modified
"""
#~ print '****'
#~ decl.show(offset=3)
#~ modifier.show(offset=3)
#~ print '****'
modifier_head = modifier
modifier_tail = modifier
# The modifier may be a nested list. Reach its tail.
#
while modifier_tail.type:
modifier_tail = modifier_tail.type
# If the decl is a basic type, just tack the modifier onto
# it
#
if isinstance(decl, c_ast.TypeDecl):
modifier_tail.type = decl
return modifier
else:
# Otherwise, the decl is a list of modifiers. Reach
# its tail and splice the modifier onto the tail,
# pointing to the underlying basic type.
#
decl_tail = decl
while not isinstance(decl_tail.type, c_ast.TypeDecl):
decl_tail = decl_tail.type
modifier_tail.type = decl_tail.type
decl_tail.type = modifier_head
return decl
# Due to the order in which declarators are constructed,
# they have to be fixed in order to look like a normal AST.
#
# When a declaration arrives from syntax construction, it has
# these problems:
# * The innermost TypeDecl has no type (because the basic
# type is only known at the uppermost declaration level)
# * The declaration has no variable name, since that is saved
# in the innermost TypeDecl
# * The typename of the declaration is a list of type
# specifiers, and not a node. Here, basic identifier types
# should be separated from more complex types like enums
# and structs.
#
# This method fixes these problems.
#
def _fix_decl_name_type(self, decl, typename):
""" Fixes a declaration. Modifies decl.
"""
# Reach the underlying basic type
#
type = decl
while not isinstance(type, c_ast.TypeDecl):
type = type.type
decl.name = type.declname
type.quals = decl.quals
# The typename is a list of types. If any type in this
# list isn't an IdentifierType, it must be the only
# type in the list (it's illegal to declare "int enum ..")
# If all the types are basic, they're collected in the
# IdentifierType holder.
#
for tn in typename:
if not isinstance(tn, c_ast.IdentifierType):
if len(typename) > 1:
self._parse_error(
"Invalid multiple types specified", tn.coord)
else:
type.type = tn
return decl
if not typename:
# Functions default to returning int
#
if not isinstance(decl.type, c_ast.FuncDecl):
self._parse_error(
"Missing type in declaration", decl.coord)
type.type = c_ast.IdentifierType(
['int'],
coord=decl.coord)
else:
# At this point, we know that typename is a list of IdentifierType
# nodes. Concatenate all the names into a single list.
#
type.type = c_ast.IdentifierType(
[name for id in typename for name in id.names],
coord=typename[0].coord)
return decl
def _add_declaration_specifier(self, declspec, newspec, kind):
""" Declaration specifiers are represented by a dictionary
with the entries:
* qual: a list of type qualifiers
* storage: a list of storage type qualifiers
* type: a list of type specifiers
* function: a list of function specifiers
This method is given a declaration specifier, and a
new specifier of a given kind.
Returns the declaration specifier, with the new
specifier incorporated.
"""
spec = declspec or dict(qual=[], storage=[], type=[], function=[])
spec[kind].insert(0, newspec)
return spec
def _build_declarations(self, spec, decls, typedef_namespace=False):
""" Builds a list of declarations all sharing the given specifiers.
If typedef_namespace is true, each declared name is added
to the "typedef namespace", which also includes objects,
functions, and enum constants.
"""
is_typedef = 'typedef' in spec['storage']
declarations = []
# Bit-fields are allowed to be unnamed.
#
if decls[0].get('bitsize') is not None:
pass
# When redeclaring typedef names as identifiers in inner scopes, a
# problem can occur where the identifier gets grouped into
# spec['type'], leaving decl as None. This can only occur for the
# first declarator.
#
elif decls[0]['decl'] is None:
if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \
not self._is_type_in_scope(spec['type'][-1].names[0]):
coord = '?'
for t in spec['type']:
if hasattr(t, 'coord'):
coord = t.coord
break
self._parse_error('Invalid declaration', coord)
# Make this look as if it came from "direct_declarator:ID"
decls[0]['decl'] = c_ast.TypeDecl(
declname=spec['type'][-1].names[0],
type=None,
quals=None,
coord=spec['type'][-1].coord)
# Remove the "new" type's name from the end of spec['type']
del spec['type'][-1]
# A similar problem can occur where the declaration ends up looking
# like an abstract declarator. Give it a name if this is the case.
#
elif not isinstance(decls[0]['decl'],
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
decls_0_tail = decls[0]['decl']
while not isinstance(decls_0_tail, c_ast.TypeDecl):
decls_0_tail = decls_0_tail.type
if decls_0_tail.declname is None:
decls_0_tail.declname = spec['type'][-1].names[0]
del spec['type'][-1]
for decl in decls:
assert decl['decl'] is not None
if is_typedef:
declaration = c_ast.Typedef(
name=None,
quals=spec['qual'],
storage=spec['storage'],
type=decl['decl'],
coord=decl['decl'].coord)
else:
declaration = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl['decl'],
init=decl.get('init'),
bitsize=decl.get('bitsize'),
coord=decl['decl'].coord)
if isinstance(declaration.type,
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
fixed_decl = declaration
else:
fixed_decl = self._fix_decl_name_type(declaration, spec['type'])
# Add the type name defined by typedef to a
# symbol table (for usage in the lexer)
#
if typedef_namespace:
if is_typedef:
self._add_typedef_name(fixed_decl.name, fixed_decl.coord)
else:
self._add_identifier(fixed_decl.name, fixed_decl.coord)
declarations.append(fixed_decl)
return declarations
def _build_function_definition(self, spec, decl, param_decls, body):
""" Builds a function definition.
"""
assert 'typedef' not in spec['storage']
declaration = self._build_declarations(
spec=spec,
decls=[dict(decl=decl, init=None)],
typedef_namespace=True)[0]
return c_ast.FuncDef(
decl=declaration,
param_decls=param_decls,
body=body,
coord=decl.coord)
def _select_struct_union_class(self, token):
""" Given a token (either STRUCT or UNION), selects the
appropriate AST class.
"""
if token == 'struct':
return c_ast.Struct
else:
return c_ast.Union
##
## Precedence and associativity of operators
##
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
##
## Grammar productions
## Implementation of the BNF defined in K&R2 A.13
##
# Wrapper around a translation unit, to allow for empty input.
# Not strictly part of the C99 Grammar, but useful in practice.
#
def p_translation_unit_or_empty(self, p):
""" translation_unit_or_empty : translation_unit
| empty
"""
if p[1] is None:
p[0] = c_ast.FileAST([])
else:
p[0] = c_ast.FileAST(p[1])
def p_translation_unit_1(self, p):
""" translation_unit : external_declaration
"""
# Note: external_declaration is already a list
#
p[0] = p[1]
def p_translation_unit_2(self, p):
""" translation_unit : translation_unit external_declaration
"""
if p[2] is not None:
p[1].extend(p[2])
p[0] = p[1]
# Declarations always come as lists (because they can be
# several in one line), so we wrap the function definition
# into a list as well, to make the return value of
# external_declaration homogenous.
#
def p_external_declaration_1(self, p):
""" external_declaration : function_definition
"""
p[0] = [p[1]]
def p_external_declaration_2(self, p):
""" external_declaration : declaration
"""
p[0] = p[1]
def p_external_declaration_3(self, p):
""" external_declaration : pp_directive
"""
p[0] = p[1]
def p_external_declaration_4(self, p):
""" external_declaration : SEMI
"""
p[0] = None
def p_pp_directive(self, p):
""" pp_directive : PPHASH
"""
self._parse_error('Directives not supported yet',
self._coord(p.lineno(1)))
# In function definitions, the declarator can be followed by
# a declaration list, for old "K&R style" function definitios.
#
def p_function_definition_1(self, p):
""" function_definition : declarator declaration_list_opt compound_statement
"""
# no declaration specifiers - 'int' becomes the default type
spec = dict(
qual=[],
storage=[],
type=[c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))],
function=[])
p[0] = self._build_function_definition(
spec=spec,
decl=p[1],
param_decls=p[2],
body=p[3])
def p_function_definition_2(self, p):
""" function_definition : declaration_specifiers declarator declaration_list_opt compound_statement
"""
spec = p[1]
p[0] = self._build_function_definition(
spec=spec,
decl=p[2],
param_decls=p[3],
body=p[4])
def p_statement(self, p):
""" statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
"""
p[0] = p[1]
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
# However, for the AST, we will split them to separate Decl
# nodes.
#
# This rule splits its declarations and always returns a list
# of Decl nodes, even if it's one element long.
#
def p_decl_body(self, p):
""" decl_body : declaration_specifiers init_declarator_list_opt
"""
spec = p[1]
# p[2] (init_declarator_list_opt) is either a list or None
#
if p[2] is None:
# By the standard, you must have at least one declarator unless
# declaring a structure tag, a union tag, or the members of an
# enumeration.
#
ty = spec['type']
s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum)
if len(ty) == 1 and isinstance(ty[0], s_u_or_e):
decls = [c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=ty[0],
init=None,
bitsize=None,
coord=ty[0].coord)]
# However, this case can also occur on redeclared identifiers in
# an inner scope. The trouble is that the redeclared type's name
# gets grouped into declaration_specifiers; _build_declarations
# compensates for this.
#
else:
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)],
typedef_namespace=True)
else:
decls = self._build_declarations(
spec=spec,
decls=p[2],
typedef_namespace=True)
p[0] = decls
# The declaration has been split to a decl_body sub-rule and
# SEMI, because having them in a single rule created a problem
# for defining typedefs.
#
# If a typedef line was directly followed by a line using the
# type defined with the typedef, the type would not be
# recognized. This is because to reduce the declaration rule,
# the parser's lookahead asked for the token after SEMI, which
# was the type from the next line, and the lexer had no chance
# to see the updated type symbol table.
#
# Splitting solves this problem, because after seeing SEMI,
# the parser reduces decl_body, which actually adds the new
# type into the table to be seen by the lexer before the next
# line is reached.
def p_declaration(self, p):
""" declaration : decl_body SEMI
"""
p[0] = p[1]
# Since each declaration is a list of declarations, this
# rule will combine all the declarations and return a single
# list
#
def p_declaration_list(self, p):
""" declaration_list : declaration
| declaration_list declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_declaration_specifiers_1(self, p):
""" declaration_specifiers : type_qualifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_declaration_specifiers_2(self, p):
""" declaration_specifiers : type_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
def p_declaration_specifiers_3(self, p):
""" declaration_specifiers : storage_class_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
def p_declaration_specifiers_4(self, p):
""" declaration_specifiers : function_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
def p_storage_class_specifier(self, p):
""" storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
"""
p[0] = p[1]
def p_function_specifier(self, p):
""" function_specifier : INLINE
"""
p[0] = p[1]
def p_type_specifier_1(self, p):
""" type_specifier : VOID
| _BOOL
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| _COMPLEX
| SIGNED
| UNSIGNED
"""
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_type_specifier_2(self, p):
""" type_specifier : typedef_name
| enum_specifier
| struct_or_union_specifier
"""
p[0] = p[1]
def p_type_qualifier(self, p):
""" type_qualifier : CONST
| RESTRICT
| VOLATILE
"""
p[0] = p[1]
def p_init_declarator_list_1(self, p):
""" init_declarator_list : init_declarator
| init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# If the code is declaring a variable that was declared a typedef in an
# outer scope, yacc will think the name is part of declaration_specifiers,
# not init_declarator, and will then get confused by EQUALS. Pass None
# up in place of declarator, and handle this at a higher level.
#
def p_init_declarator_list_2(self, p):
""" init_declarator_list : EQUALS initializer
"""
p[0] = [dict(decl=None, init=p[2])]
# Similarly, if the code contains duplicate typedefs of, for example,
# array types, the array portion will appear as an abstract declarator.
#
def p_init_declarator_list_3(self, p):
""" init_declarator_list : abstract_declarator
"""
p[0] = [dict(decl=p[1], init=None)]
# Returns a {decl=<declarator> : init=<initializer>} dictionary
# If there's no initializer, uses None
#
def p_init_declarator(self, p):
""" init_declarator : declarator
| declarator EQUALS initializer
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
def p_specifier_qualifier_list_1(self, p):
""" specifier_qualifier_list : type_qualifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_specifier_qualifier_list_2(self, p):
""" specifier_qualifier_list : type_specifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
# TYPEID is allowed here (and in other struct/enum related tag names), because
# struct/enum tags reside in their own namespace and can be named the same as types
#
def p_struct_or_union_specifier_1(self, p):
""" struct_or_union_specifier : struct_or_union ID
| struct_or_union TYPEID
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=None,
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_2(self, p):
""" struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=None,
decls=p[3],
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_3(self, p):
""" struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
| struct_or_union TYPEID brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=p[4],
coord=self._coord(p.lineno(2)))
def p_struct_or_union(self, p):
""" struct_or_union : STRUCT
| UNION
"""
p[0] = p[1]
# Combine all declarations into a single list
#
def p_struct_declaration_list(self, p):
""" struct_declaration_list : struct_declaration
| struct_declaration_list struct_declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_struct_declaration_1(self, p):
""" struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI
"""
spec = p[1]
assert 'typedef' not in spec['storage']
if p[2] is not None:
decls = self._build_declarations(
spec=spec,
decls=p[2])
elif len(spec['type']) == 1:
# Anonymous struct/union, gcc extension, C1x feature.
# Although the standard only allows structs/unions here, I see no
# reason to disallow other types since some compilers have typedefs
# here, and pycparser isn't about rejecting all invalid code.
#
node = spec['type'][0]
if isinstance(node, c_ast.Node):
decl_type = node
else:
decl_type = c_ast.IdentifierType(node)
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=decl_type)])
else:
# Structure/union members can have the same names as typedefs.
# The trouble is that the member's name gets grouped into
# specifier_qualifier_list; _build_declarations compensates.
#
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)])
p[0] = decls
def p_struct_declaration_2(self, p):
""" struct_declaration : specifier_qualifier_list abstract_declarator SEMI
"""
# "Abstract declarator?!", you ask? Structure members can have the
# same names as typedefs. The trouble is that the member's name gets
# grouped into specifier_qualifier_list, leaving any remainder to
# appear as an abstract declarator, as in:
# typedef int Foo;
# struct { Foo Foo[3]; };
#
p[0] = self._build_declarations(
spec=p[1],
decls=[dict(decl=p[2], init=None)])
def p_struct_declarator_list(self, p):
""" struct_declarator_list : struct_declarator
| struct_declarator_list COMMA struct_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# struct_declarator passes up a dict with the keys: decl (for
# the underlying declarator) and bitsize (for the bitsize)
#
def p_struct_declarator_1(self, p):
""" struct_declarator : declarator
"""
p[0] = {'decl': p[1], 'bitsize': None}
def p_struct_declarator_2(self, p):
""" struct_declarator : declarator COLON constant_expression
| COLON constant_expression
"""
if len(p) > 3:
p[0] = {'decl': p[1], 'bitsize': p[3]}
else:
p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]}
def p_enum_specifier_1(self, p):
""" enum_specifier : ENUM ID
| ENUM TYPEID
"""
p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1)))
def p_enum_specifier_2(self, p):
""" enum_specifier : ENUM brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1)))
def p_enum_specifier_3(self, p):
""" enum_specifier : ENUM ID brace_open enumerator_list brace_close
| ENUM TYPEID brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1)))
def p_enumerator_list(self, p):
""" enumerator_list : enumerator
| enumerator_list COMMA
| enumerator_list COMMA enumerator
"""
if len(p) == 2:
p[0] = c_ast.EnumeratorList([p[1]], p[1].coord)
elif len(p) == 3:
p[0] = p[1]
else:
p[1].enumerators.append(p[3])
p[0] = p[1]
def p_enumerator(self, p):
""" enumerator : ID
| ID EQUALS constant_expression
"""
if len(p) == 2:
enumerator = c_ast.Enumerator(
p[1], None,
self._coord(p.lineno(1)))
else:
enumerator = c_ast.Enumerator(
p[1], p[3],
self._coord(p.lineno(1)))
self._add_identifier(enumerator.name, enumerator.coord)
p[0] = enumerator
def p_declarator_1(self, p):
""" declarator : direct_declarator
"""
p[0] = p[1]
def p_declarator_2(self, p):
""" declarator : pointer direct_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
# Since it's impossible for a type to be specified after a pointer, assume
# it's intended to be the name for this declaration. _add_identifier will
# raise an error if this TYPEID can't be redeclared.
#
def p_declarator_3(self, p):
""" declarator : pointer TYPEID
"""
decl = c_ast.TypeDecl(
declname=p[2],
type=None,
quals=None,
coord=self._coord(p.lineno(2)))
p[0] = self._type_modify_decl(decl, p[1])
def p_direct_declarator_1(self, p):
""" direct_declarator : ID
"""
p[0] = c_ast.TypeDecl(
declname=p[1],
type=None,
quals=None,
coord=self._coord(p.lineno(1)))
def p_direct_declarator_2(self, p):
""" direct_declarator : LPAREN declarator RPAREN
"""
p[0] = p[2]
def p_direct_declarator_3(self, p):
""" direct_declarator : direct_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
# Special for VLAs
#
def p_direct_declarator_4(self, p):
""" direct_declarator : direct_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_5(self, p):
""" direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN
| direct_declarator LPAREN identifier_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
# To see why _get_yacc_lookahead_token is needed, consider:
# typedef char TT;
# void foo(int TT) { TT = 10; }
# Outside the function, TT is a typedef, but inside (starting and
# ending with the braces) it's a parameter. The trouble begins with
# yacc's lookahead token. We don't know if we're declaring or
# defining a function until we see LBRACE, but if we wait for yacc to
# trigger a rule on that token, then TT will have already been read
# and incorrectly interpreted as TYPEID. We need to add the
# parameters to the scope the moment the lexer sees LBRACE.
#
if self._get_yacc_lookahead_token().type == "LBRACE":
if func.args is not None:
for param in func.args.params:
if isinstance(param, c_ast.EllipsisParam): break
self._add_identifier(param.name, param.coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_pointer(self, p):
""" pointer : TIMES type_qualifier_list_opt
| TIMES type_qualifier_list_opt pointer
"""
coord = self._coord(p.lineno(1))
p[0] = c_ast.PtrDecl(
quals=p[2] or [],
type=p[3] if len(p) > 3 else None,
coord=coord)
def p_type_qualifier_list(self, p):
""" type_qualifier_list : type_qualifier
| type_qualifier_list type_qualifier
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_parameter_type_list(self, p):
""" parameter_type_list : parameter_list
| parameter_list COMMA ELLIPSIS
"""
if len(p) > 2:
p[1].params.append(c_ast.EllipsisParam(self._coord(p.lineno(3))))
p[0] = p[1]
def p_parameter_list(self, p):
""" parameter_list : parameter_declaration
| parameter_list COMMA parameter_declaration
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_parameter_declaration_1(self, p):
""" parameter_declaration : declaration_specifiers declarator
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
p[0] = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2])])[0]
def p_parameter_declaration_2(self, p):
""" parameter_declaration : declaration_specifiers abstract_declarator_opt
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
# Parameters can have the same names as typedefs. The trouble is that
# the parameter's name gets grouped into declaration_specifiers, making
# it look like an old-style declaration; compensate.
#
if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \
self._is_type_in_scope(spec['type'][-1].names[0]):
decl = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2], init=None)])[0]
# This truly is an old-style parameter declaration
#
else:
decl = c_ast.Typename(
quals=spec['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
typename = spec['type']
decl = self._fix_decl_name_type(decl, typename)
p[0] = decl
def p_identifier_list(self, p):
""" identifier_list : identifier
| identifier_list COMMA identifier
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_initializer_1(self, p):
""" initializer : assignment_expression
"""
p[0] = p[1]
def p_initializer_2(self, p):
""" initializer : brace_open initializer_list brace_close
| brace_open initializer_list COMMA brace_close
"""
p[0] = p[2]
def p_initializer_list(self, p):
""" initializer_list : designation_opt initializer
| initializer_list COMMA designation_opt initializer
"""
if len(p) == 3: # single initializer
init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
p[0] = c_ast.InitList([init], p[2].coord)
else:
init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
p[1].exprs.append(init)
p[0] = p[1]
def p_designation(self, p):
""" designation : designator_list EQUALS
"""
p[0] = p[1]
# Designators are represented as a list of nodes, in the order in which
# they're written in the code.
#
def p_designator_list(self, p):
""" designator_list : designator
| designator_list designator
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_designator(self, p):
""" designator : LBRACKET constant_expression RBRACKET
| PERIOD identifier
"""
p[0] = p[2]
def p_type_name(self, p):
""" type_name : specifier_qualifier_list abstract_declarator_opt
"""
#~ print '=========='
#~ print p[1]
#~ print p[2]
#~ print p[2].children()
#~ print '=========='
typename = c_ast.Typename(
quals=p[1]['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
def p_abstract_declarator_1(self, p):
""" abstract_declarator : pointer
"""
dummytype = c_ast.TypeDecl(None, None, None)
p[0] = self._type_modify_decl(
decl=dummytype,
modifier=p[1])
def p_abstract_declarator_2(self, p):
""" abstract_declarator : pointer direct_abstract_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_abstract_declarator_3(self, p):
""" abstract_declarator : direct_abstract_declarator
"""
p[0] = p[1]
# Creating and using direct_abstract_declarator_opt here
# instead of listing both direct_abstract_declarator and the
# lack of it in the beginning of _1 and _2 caused two
# shift/reduce errors.
#
def p_direct_abstract_declarator_1(self, p):
""" direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
p[0] = p[2]
def p_direct_abstract_declarator_2(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_3(self, p):
""" direct_abstract_declarator : LBRACKET assignment_expression_opt RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=p[2],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_5(self, p):
""" direct_abstract_declarator : LBRACKET TIMES RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_7(self, p):
""" direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
"""
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(1)))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
#
def p_block_item(self, p):
""" block_item : declaration
| statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
# Since we made block_item a list, this just combines lists
#
def p_block_item_list(self, p):
""" block_item_list : block_item
| block_item_list block_item
"""
# Empty block items (plain ';') produce [None], so ignore them
p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
def p_compound_statement_1(self, p):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
coord=self._coord(p.lineno(1)))
def p_labeled_statement_1(self, p):
""" labeled_statement : ID COLON statement """
p[0] = c_ast.Label(p[1], p[3], self._coord(p.lineno(1)))
def p_labeled_statement_2(self, p):
""" labeled_statement : CASE constant_expression COLON statement """
p[0] = c_ast.Case(p[2], [p[4]], self._coord(p.lineno(1)))
def p_labeled_statement_3(self, p):
""" labeled_statement : DEFAULT COLON statement """
p[0] = c_ast.Default([p[3]], self._coord(p.lineno(1)))
def p_selection_statement_1(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement """
p[0] = c_ast.If(p[3], p[5], None, self._coord(p.lineno(1)))
def p_selection_statement_2(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement ELSE statement """
p[0] = c_ast.If(p[3], p[5], p[7], self._coord(p.lineno(1)))
def p_selection_statement_3(self, p):
""" selection_statement : SWITCH LPAREN expression RPAREN statement """
p[0] = fix_switch_cases(
c_ast.Switch(p[3], p[5], self._coord(p.lineno(1))))
def p_iteration_statement_1(self, p):
""" iteration_statement : WHILE LPAREN expression RPAREN statement """
p[0] = c_ast.While(p[3], p[5], self._coord(p.lineno(1)))
def p_iteration_statement_2(self, p):
""" iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI """
p[0] = c_ast.DoWhile(p[5], p[2], self._coord(p.lineno(1)))
def p_iteration_statement_3(self, p):
""" iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._coord(p.lineno(1)))
def p_iteration_statement_4(self, p):
""" iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(c_ast.DeclList(p[3]), p[4], p[6], p[8], self._coord(p.lineno(1)))
def p_jump_statement_1(self, p):
""" jump_statement : GOTO ID SEMI """
p[0] = c_ast.Goto(p[2], self._coord(p.lineno(1)))
def p_jump_statement_2(self, p):
""" jump_statement : BREAK SEMI """
p[0] = c_ast.Break(self._coord(p.lineno(1)))
def p_jump_statement_3(self, p):
""" jump_statement : CONTINUE SEMI """
p[0] = c_ast.Continue(self._coord(p.lineno(1)))
def p_jump_statement_4(self, p):
""" jump_statement : RETURN expression SEMI
| RETURN SEMI
"""
p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._coord(p.lineno(1)))
def p_expression_statement(self, p):
""" expression_statement : expression_opt SEMI """
if p[1] is None:
p[0] = c_ast.EmptyStatement(self._coord(p.lineno(1)))
else:
p[0] = p[1]
def p_expression(self, p):
""" expression : assignment_expression
| expression COMMA assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
if not isinstance(p[1], c_ast.ExprList):
p[1] = c_ast.ExprList([p[1]], p[1].coord)
p[1].exprs.append(p[3])
p[0] = p[1]
def p_typedef_name(self, p):
""" typedef_name : TYPEID """
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_assignment_expression(self, p):
""" assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord)
# K&R2 defines these as many separate rules, to encode
# precedence and associativity. Why work hard ? I'll just use
# the built in precedence/associativity specification feature
# of PLY. (see precedence declaration above)
#
def p_assignment_operator(self, p):
""" assignment_operator : EQUALS
| XOREQUAL
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
"""
p[0] = p[1]
def p_constant_expression(self, p):
""" constant_expression : conditional_expression """
p[0] = p[1]
def p_conditional_expression(self, p):
""" conditional_expression : binary_expression
| binary_expression CONDOP expression COLON conditional_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord)
def p_binary_expression(self, p):
""" binary_expression : cast_expression
| binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord)
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_cast_expression_2(self, p):
""" cast_expression : LPAREN type_name RPAREN cast_expression """
p[0] = c_ast.Cast(p[2], p[4], self._coord(p.lineno(1)))
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : PLUSPLUS unary_expression
| MINUSMINUS unary_expression
| unary_operator cast_expression
"""
p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord)
def p_unary_expression_3(self, p):
""" unary_expression : SIZEOF unary_expression
| SIZEOF LPAREN type_name RPAREN
"""
p[0] = c_ast.UnaryOp(
p[1],
p[2] if len(p) == 3 else p[3],
self._coord(p.lineno(1)))
def p_unary_operator(self, p):
""" unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT
"""
p[0] = p[1]
def p_postfix_expression_1(self, p):
""" postfix_expression : primary_expression """
p[0] = p[1]
def p_postfix_expression_2(self, p):
""" postfix_expression : postfix_expression LBRACKET expression RBRACKET """
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
def p_postfix_expression_3(self, p):
""" postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN
| postfix_expression LPAREN RPAREN
"""
p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
def p_postfix_expression_4(self, p):
""" postfix_expression : postfix_expression PERIOD ID
| postfix_expression PERIOD TYPEID
| postfix_expression ARROW ID
| postfix_expression ARROW TYPEID
"""
field = c_ast.ID(p[3], self._coord(p.lineno(3)))
p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
def p_postfix_expression_5(self, p):
""" postfix_expression : postfix_expression PLUSPLUS
| postfix_expression MINUSMINUS
"""
p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord)
def p_postfix_expression_6(self, p):
""" postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close
| LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close
"""
p[0] = c_ast.CompoundLiteral(p[2], p[5])
def p_primary_expression_1(self, p):
""" primary_expression : identifier """
p[0] = p[1]
def p_primary_expression_2(self, p):
""" primary_expression : constant """
p[0] = p[1]
def p_primary_expression_3(self, p):
""" primary_expression : unified_string_literal
| unified_wstring_literal
"""
p[0] = p[1]
def p_primary_expression_4(self, p):
""" primary_expression : LPAREN expression RPAREN """
p[0] = p[2]
def p_argument_expression_list(self, p):
""" argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression
"""
if len(p) == 2: # single expr
p[0] = c_ast.ExprList([p[1]], p[1].coord)
else:
p[1].exprs.append(p[3])
p[0] = p[1]
def p_identifier(self, p):
""" identifier : ID """
p[0] = c_ast.ID(p[1], self._coord(p.lineno(1)))
def p_constant_1(self, p):
""" constant : INT_CONST_DEC
| INT_CONST_OCT
| INT_CONST_HEX
"""
p[0] = c_ast.Constant(
'int', p[1], self._coord(p.lineno(1)))
def p_constant_2(self, p):
""" constant : FLOAT_CONST
| HEX_FLOAT_CONST
"""
p[0] = c_ast.Constant(
'float', p[1], self._coord(p.lineno(1)))
def p_constant_3(self, p):
""" constant : CHAR_CONST
| WCHAR_CONST
"""
p[0] = c_ast.Constant(
'char', p[1], self._coord(p.lineno(1)))
# The "unified" string and wstring literal rules are for supporting
# concatenation of adjacent string literals.
# I.e. "hello " "world" is seen by the C compiler as a single string literal
# with the value "hello world"
#
def p_unified_string_literal(self, p):
""" unified_string_literal : STRING_LITERAL
| unified_string_literal STRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value[:-1] + p[2][1:]
p[0] = p[1]
def p_unified_wstring_literal(self, p):
""" unified_wstring_literal : WSTRING_LITERAL
| unified_wstring_literal WSTRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value.rstrip[:-1] + p[2][1:]
p[0] = p[1]
def p_brace_open(self, p):
""" brace_open : LBRACE
"""
p[0] = p[1]
def p_brace_close(self, p):
""" brace_close : RBRACE
"""
p[0] = p[1]
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
# If error recovery is added here in the future, make sure
# _get_yacc_lookahead_token still works!
#
if p:
self._parse_error(
'before: %s' % p.value,
self._coord(lineno=p.lineno,
column=self.clex.find_tok_column(p)))
else:
self._parse_error('At end of input', '')
#------------------------------------------------------------------------------
if __name__ == "__main__":
import pprint
import time, sys
#t1 = time.time()
#parser = CParser(lex_optimize=True, yacc_debug=True, yacc_optimize=False)
#sys.write(time.time() - t1)
#buf = '''
#int (*k)(int);
#'''
## set debuglevel to 2 for debugging
#t = parser.parse(buf, 'x.c', debuglevel=0)
#t.show(showcoord=True)
|
BartoszCichecki/onlinepython
|
onlinepython/pypy-2.4.0-win32/lib_pypy/cffi/_pycparser/c_parser.py
|
Python
|
gpl-2.0
| 59,384 | 0.001381 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for activity module."""
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import naming
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import annos
from tensorflow.python.platform import test
QN = qual_names.QN
NodeAnno = annos.NodeAnno
global_a = 7
global_b = 17
class ScopeTest(test.TestCase):
def assertMissing(self, qn, scope):
self.assertNotIn(qn, scope.read)
self.assertNotIn(qn, scope.modified)
def assertReadOnly(self, qn, scope):
self.assertIn(qn, scope.read)
self.assertNotIn(qn, scope.modified)
def assertWriteOnly(self, qn, scope):
self.assertNotIn(qn, scope.read)
self.assertIn(qn, scope.modified)
def assertReadWrite(self, qn, scope):
self.assertIn(qn, scope.read)
self.assertIn(qn, scope.modified)
def test_copy_from(self):
scope = activity.Scope(None)
scope.modified.add(QN('foo'))
other = activity.Scope(None)
other.copy_from(scope)
self.assertWriteOnly(QN('foo'), other)
scope.modified.add(QN('bar'))
scope.copy_from(other)
self.assertMissing(QN('bar'), scope)
def test_merge_from(self):
scope = activity.Scope(None)
other = activity.Scope(None)
for col in (scope.modified, scope.read, scope.bound, scope.deleted):
col.add(QN('foo'))
for col in (other.modified, other.read, other.bound, other.deleted):
col.add(QN('foo'))
col.add(QN('bar'))
scope.merge_from(other)
self.assertReadWrite(QN('foo'), scope)
self.assertReadWrite(QN('bar'), scope)
self.assertIn(QN('foo'), scope.bound)
self.assertIn(QN('bar'), scope.bound)
self.assertIn(QN('foo'), scope.deleted)
self.assertIn(QN('bar'), scope.deleted)
def test_copy_of(self):
scope = activity.Scope(None)
scope.read.add(QN('foo'))
other = activity.Scope.copy_of(scope)
self.assertReadOnly(QN('foo'), other)
child_scope = activity.Scope(scope)
child_scope.read.add(QN('bar'))
other = activity.Scope.copy_of(child_scope)
self.assertReadOnly(QN('bar'), other)
def test_referenced(self):
scope = activity.Scope(None)
scope.read.add(QN('a'))
child = activity.Scope(scope)
child.read.add(QN('b'))
child2 = activity.Scope(child, isolated=False)
child2.read.add(QN('c'))
child2.finalize()
child.finalize()
scope.finalize()
self.assertIn(QN('c'), child2.referenced)
self.assertIn(QN('b'), child2.referenced)
self.assertIn(QN('a'), child2.referenced)
self.assertIn(QN('c'), child.referenced)
self.assertIn(QN('b'), child.referenced)
self.assertIn(QN('a'), child.referenced)
class ActivityAnalyzerTestBase(test.TestCase):
def _parse_and_analyze(self, test_fn):
# TODO(mdan): Use a custom FunctionTransformer here.
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
name=test_fn.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
node = qual_names.resolve(node)
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
return node, entity_info
def assertSymbolSetsAre(self, expected, actual, name):
expected = set(expected)
actual = set(str(s) for s in actual)
self.assertSetEqual(
expected, actual, 'for symbol set: %s\n'
' Expected: %s\n'
' Got: %s\n'
' Missing: %s\n'
' Extra: %s\n' % (name.upper(), expected, actual,
expected - actual, actual - expected))
def assertScopeIs(self, scope, used, modified):
"""Assert the scope contains specific used, modified & created variables."""
self.assertSymbolSetsAre(used, scope.read, 'read')
self.assertSymbolSetsAre(modified, scope.modified, 'modified')
class ActivityAnalyzerTest(ActivityAnalyzerTestBase):
def test_import(self):
def test_fn():
import a, b.x, y as c, z.u as d # pylint:disable=g-multiple-import,g-import-not-at-top,unused-variable
node, _ = self._parse_and_analyze(test_fn)
scope = anno.getanno(node.body[0], anno.Static.SCOPE)
self.assertScopeIs(scope, (), ('a', 'b', 'c', 'd'))
def test_import_from(self):
def test_fn():
from x import a # pylint:disable=g-import-not-at-top,unused-variable
from y import z as b # pylint:disable=g-import-not-at-top,unused-variable
node, _ = self._parse_and_analyze(test_fn)
scope = anno.getanno(node.body[0], anno.Static.SCOPE)
self.assertScopeIs(scope, (), ('a',))
scope = anno.getanno(node.body[1], anno.Static.SCOPE)
self.assertScopeIs(scope, (), ('b',))
def test_print_statement(self):
def test_fn(a):
b = 0
c = 1
print(a, b)
return c
node, _ = self._parse_and_analyze(test_fn)
print_node = node.body[2]
if isinstance(print_node, gast.Print):
# Python 2
print_args_scope = anno.getanno(print_node, NodeAnno.ARGS_SCOPE)
else:
# Python 3
assert isinstance(print_node, gast.Expr)
# The call node should be the one being annotated.
print_node = print_node.value
print_args_scope = anno.getanno(print_node, NodeAnno.ARGS_SCOPE)
# We basically need to detect which variables are captured by the call
# arguments.
self.assertScopeIs(print_args_scope, ('a', 'b'), ())
def test_call_args(self):
def test_fn(a):
b = 0
c = 1
foo(a, b) # pylint:disable=undefined-variable
return c
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[2].value
# We basically need to detect which variables are captured by the call
# arguments.
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE), ('a', 'b'), ())
def test_call_args_attributes(self):
def foo(*_):
pass
def test_fn(a):
a.c = 0
foo(a.b, a.c)
return a.d
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[1].value
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE), ('a', 'a.b', 'a.c'), ())
def test_call_args_subscripts(self):
def foo(*_):
pass
def test_fn(a):
b = 1
c = 2
foo(a[0], a[b])
return a[c]
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[2].value
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE),
('a', 'a[0]', 'a[b]', 'b'), ())
def test_while(self):
def test_fn(a):
b = a
while b > 0:
c = b
b -= 1
return b, c
node, _ = self._parse_and_analyze(test_fn)
while_node = node.body[1]
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.BODY_SCOPE), ('b',), ('b', 'c'))
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.BODY_SCOPE).parent, ('a', 'b', 'c'),
('b', 'c'))
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.COND_SCOPE), ('b',), ())
def test_for(self):
def test_fn(a):
b = a
for _ in a:
c = b
b -= 1
return b, c
node, _ = self._parse_and_analyze(test_fn)
for_node = node.body[1]
self.assertScopeIs(
anno.getanno(for_node, NodeAnno.ITERATE_SCOPE), (), ('_'))
self.assertScopeIs(
anno.getanno(for_node, NodeAnno.BODY_SCOPE), ('b',), ('b', 'c'))
self.assertScopeIs(
anno.getanno(for_node, NodeAnno.BODY_SCOPE).parent, ('a', 'b', 'c'),
('b', 'c', '_'))
def test_if(self):
def test_fn(x):
if x > 0:
x = -x
y = 2 * x
z = -y
else:
x = 2 * x
y = -x
u = -y
return z, u
node, _ = self._parse_and_analyze(test_fn)
if_node = node.body[0]
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('x', 'y'), ('x', 'y', 'z'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE).parent, ('x', 'y', 'z', 'u'),
('x', 'y', 'z', 'u'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('x', 'y'),
('x', 'y', 'u'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE).parent,
('x', 'y', 'z', 'u'), ('x', 'y', 'z', 'u'))
def test_if_attributes(self):
def test_fn(a):
if a > 0:
a.b = -a.c
d = 2 * a
else:
a.b = a.c
d = 1
return d
node, _ = self._parse_and_analyze(test_fn)
if_node = node.body[0]
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('a', 'a.c'), ('a.b', 'd'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('a', 'a.c'),
('a.b', 'd'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE).parent, ('a', 'a.c', 'd'),
('a.b', 'd'))
def test_if_subscripts(self):
def test_fn(a, b, c, e):
if a > 0:
a[b] = -a[c]
d = 2 * a
else:
a[0] = e
d = 1
return d
node, _ = self._parse_and_analyze(test_fn)
if_node = node.body[0]
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('a', 'b', 'c', 'a[c]'),
('a[b]', 'd'))
# TODO(mdan): Should subscript writes (a[0] = 1) be considered to read "a"?
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('a', 'e'), ('a[0]', 'd'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE).parent,
('a', 'b', 'c', 'd', 'e', 'a[c]'), ('d', 'a[b]', 'a[0]'))
def test_nested_if(self):
def test_fn(b):
if b > 0:
if b < 5:
a = b
else:
a = b * b
return a
node, _ = self._parse_and_analyze(test_fn)
inner_if_node = node.body[0].body[0]
self.assertScopeIs(
anno.getanno(inner_if_node, NodeAnno.BODY_SCOPE), ('b',), ('a',))
self.assertScopeIs(
anno.getanno(inner_if_node, NodeAnno.ORELSE_SCOPE), ('b',), ('a',))
def test_nested_function(self):
def test_fn(a):
def f(x):
y = x * x
return y
return f(a)
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'f'), ('f',))
fn_def_node = node.body[0]
scope = anno.getanno(fn_def_node, anno.Static.SCOPE)
self.assertScopeIs(scope, (), ('f'))
scope = anno.getanno(fn_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('x', 'y'), ('y',))
scope = anno.getanno(fn_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('x', 'y'), ('y',))
self.assertSymbolSetsAre(('x', 'y'), scope.bound, 'BOUND')
def test_nested_lambda(self):
def test_fn(a):
return lambda x: (x * a)
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a',), ())
return_node = node.body[0]
scope = anno.getanno(return_node, anno.Static.SCOPE)
self.assertScopeIs(scope, ('a',), ())
lam_def_node = return_node.value
scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'x'), ())
scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'x'), ())
self.assertSymbolSetsAre(('x',), scope.bound, 'BOUND')
def test_nested_function_arg_defaults(self):
def test_fn(a):
def f(x=a):
y = x * x
return y
return f(a)
node, _ = self._parse_and_analyze(test_fn)
fn_def_node = node.body[0]
self.assertScopeIs(
anno.getanno(fn_def_node, anno.Static.SCOPE), ('a',), ('f',))
scope = anno.getanno(fn_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('x', 'y'), ('y',))
scope = anno.getanno(fn_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('x', 'y'), ('y',))
self.assertSymbolSetsAre(('x', 'y'), scope.bound, 'BOUND')
def test_constructor_attributes(self):
class TestClass(object):
def __init__(self, a):
self.b = a
self.b.c = 1
node, _ = self._parse_and_analyze(TestClass)
init_node = node.body[0]
self.assertScopeIs(
anno.getanno(init_node, NodeAnno.BODY_SCOPE), ('self', 'a', 'self.b'),
('self', 'self.b', 'self.b.c'))
def test_aug_assign_subscripts(self):
def test_fn(a):
a[0] += 1
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a', 'a[0]'), ('a[0]',))
def test_return_vars_are_read(self):
def test_fn(a, b, c): # pylint: disable=unused-argument
return c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('c',), ())
self.assertScopeIs(
anno.getanno(node.body[0], anno.Static.SCOPE), ('c',), ())
def test_raise_names_are_read(self):
def test_fn(a, b, c): # pylint: disable=unused-argument
raise b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('b',), ())
self.assertScopeIs(
anno.getanno(node.body[0], anno.Static.SCOPE), ('b',), ())
def test_except_exposes_names(self):
def test_fn(a, b, c): # pylint: disable=unused-argument
try:
pass
except: # pylint: disable=bare-except
b = c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('c',), ('b',))
def test_except_hides_exception_var_name(self):
def test_fn(a, b, c): # pylint: disable=unused-argument
try:
pass
except a as e:
b = e
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a',), ('b',))
def test_aug_assign(self):
def test_fn(a, b):
a += b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a', 'b'), ('a'))
def test_aug_assign_rvalues(self):
a = dict(bar=3)
def foo():
return a
def test_fn(x):
foo()['bar'] += x
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('foo', 'x'), ())
def test_lambda(self):
def test_fn(a, b):
return lambda: (a + b)
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
lam_def_node = node.body[0].value
scope = anno.getanno(lam_def_node, anno.Static.SCOPE)
self.assertScopeIs(scope, (), ())
scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
self.assertSymbolSetsAre((), scope.bound, 'BOUND')
scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE)
self.assertSymbolSetsAre((), scope.params.keys(), 'lambda params')
def test_lambda_params_args(self):
def test_fn(a, b): # pylint: disable=unused-argument
return lambda a: a + b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
# Note: `a` in `a + b` is not "read" here because it's hidden by the `a`
# argument.
self.assertScopeIs(scope, ('b',), ())
lam_def_node = node.body[0].value
scope = anno.getanno(lam_def_node, anno.Static.SCOPE)
self.assertScopeIs(scope, (), ())
scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
self.assertSymbolSetsAre(('a',), scope.bound, 'BOUND')
scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE)
self.assertSymbolSetsAre(('a',), scope.params.keys(), 'lambda params')
def test_lambda_params_arg_defaults(self):
def test_fn(a, b, c): # pylint: disable=unused-argument
return lambda b=c: a + b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
# Note: `b` is not "read" here because it's hidden by the argument.
self.assertScopeIs(scope, ('a', 'c'), ())
lam_def_node = node.body[0].value
scope = anno.getanno(lam_def_node, anno.Static.SCOPE)
self.assertScopeIs(scope, ('c',), ())
scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
self.assertSymbolSetsAre(('b',), scope.bound, 'BOUND')
scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE)
self.assertSymbolSetsAre(('b',), scope.params.keys(), 'lambda params')
def test_lambda_complex(self):
def test_fn(a, b, c, d, e): # pylint: disable=unused-argument
a = (lambda a, b, c=e: a + b + c)(d, 1, 2) + b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('d', 'b', 'e'), ('a',))
lam_def_node = node.body[0].value.left.func
scope = anno.getanno(lam_def_node, anno.Static.SCOPE)
self.assertScopeIs(scope, ('e',), ())
scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b', 'c'), ())
scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b', 'c'), ())
self.assertSymbolSetsAre(('a', 'b', 'c'), scope.bound, 'BOUND')
scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE)
self.assertSymbolSetsAre(
('a', 'b', 'c'), scope.params.keys(), 'lambda params')
def test_lambda_nested(self):
def test_fn(a, b, c, d, e, f): # pylint: disable=unused-argument
a = lambda a, b: d(lambda b=f: a + b + c) # pylint: disable=undefined-variable
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('d', 'c', 'f'), ('a',))
outer_lam_def = node.body[0].value
scope = anno.getanno(outer_lam_def, anno.Static.SCOPE)
self.assertScopeIs(scope, (), ())
scope = anno.getanno(outer_lam_def, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('d', 'f', 'a', 'c'), ())
scope = anno.getanno(outer_lam_def, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('d', 'f', 'a', 'c'), ())
self.assertSymbolSetsAre(('a', 'b'), scope.bound, 'BOUND')
scope = anno.getanno(outer_lam_def.args, anno.Static.SCOPE)
self.assertSymbolSetsAre(('a', 'b'), scope.params.keys(), 'lambda params')
inner_lam_def = outer_lam_def.body.args[0]
scope = anno.getanno(inner_lam_def, anno.Static.SCOPE)
self.assertScopeIs(scope, ('f',), ())
scope = anno.getanno(inner_lam_def, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b', 'c'), ())
scope = anno.getanno(inner_lam_def, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b', 'c'), ())
self.assertSymbolSetsAre(('b',), scope.bound, 'BOUND')
scope = anno.getanno(inner_lam_def.args, anno.Static.SCOPE)
self.assertSymbolSetsAre(('b',), scope.params.keys(), 'lambda params')
def test_comprehension_targets_are_isolated(self):
def test_fn(a):
b = {c for c in a} # pylint:disable=unused-variable
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a',), ('b',))
def test_comprehension_targets_are_isolated_list_function_w_generator(self):
def test_fn(a):
b = list(c for c in a) # pylint:disable=unused-variable
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a', 'list'), ('b',))
def test_list_comprehension_targets_are_sometimes_isolated(self):
def test_fn(a):
b = [c for c in a] # pylint:disable=unused-variable
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a',), ('b',))
def test_comprehension_targets_are_isolated_in_augassign(self):
def test_fn(a, b):
b += [c for c in a] # pylint:disable=unused-variable
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a', 'b'), ('b',))
def test_comprehension_generator_order(self):
def test_fn(a, b, c): # pylint:disable=unused-argument
e = {d: (a, b) for (a, b) in c for d in b} # pylint:disable=unused-variable,g-complex-comprehension
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('c',), ('e',))
def test_global_symbol(self):
def test_fn(c):
global global_a
global global_b
global_a = global_b + c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('global_a', 'global_b', 'c'), ('global_a',))
self.assertSetEqual(body_scope.globals, set(
(QN('global_a'), QN('global_b'))))
global_a_scope = anno.getanno(fn_node.body[0], anno.Static.SCOPE)
self.assertScopeIs(global_a_scope, ('global_a',), ())
def test_class_definition_basic(self):
def test_fn(a, b):
class C(a(b)):
d = 1
return C
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a', 'b', 'C'), ('C',))
def test_class_definition_isolates_method_writes(self):
def test_fn(a, b, c):
class C(a(b)):
d = 1
def e(self):
f = c + 1
return f
return C
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a', 'b', 'C', 'c'), ('C',))
if __name__ == '__main__':
test.main()
|
tensorflow/tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/activity_test.py
|
Python
|
apache-2.0
| 23,693 | 0.006078 |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import emission.core.wrapper.transition as et
import emission.net.usercache.formatters.common as fc
import attrdict as ad
state_map = {
"STATE_START": et.State.START,
"STATE_WAITING_FOR_TRIP_START": et.State.WAITING_FOR_TRIP_START,
"STATE_ONGOING_TRIP": et.State.ONGOING_TRIP,
"STATE_TRACKING_STOPPED": et.State.TRACKING_STOPPED
}
transition_map = {
"booted": et.TransitionType.BOOTED,
"T_INITIALIZE": et.TransitionType.INITIALIZE,
"T_INIT_COMPLETE": et.TransitionType.INIT_COMPLETE,
"T_EXITED_GEOFENCE": et.TransitionType.EXITED_GEOFENCE,
"T_TRIP_STARTED": et.TransitionType.TRIP_STARTED,
"T_RECEIVED_SILENT_PUSH": et.TransitionType.RECEIVED_SILENT_PUSH,
"T_TRIP_END_DETECTED": et.TransitionType.TRIP_END_DETECTED,
"T_TRIP_RESTARTED": et.TransitionType.TRIP_RESTARTED,
"T_END_TRIP_TRACKING": et.TransitionType.END_TRIP_TRACKING,
"T_DATA_PUSHED": et.TransitionType.DATA_PUSHED,
"T_TRIP_ENDED": et.TransitionType.STOPPED_MOVING,
"T_FORCE_STOP_TRACKING": et.TransitionType.STOP_TRACKING,
"T_TRACKING_STOPPED": et.TransitionType.TRACKING_STOPPED,
"T_VISIT_STARTED": et.TransitionType.VISIT_STARTED,
"T_VISIT_ENDED": et.TransitionType.VISIT_ENDED,
"T_NOP": et.TransitionType.NOP,
"T_START_TRACKING": et.TransitionType.START_TRACKING
}
def format(entry):
formatted_entry = ad.AttrDict()
formatted_entry["_id"] = entry["_id"]
formatted_entry.user_id = entry.user_id
m = entry.metadata
fc.expand_metadata_times(m)
formatted_entry.metadata = m
data = ad.AttrDict()
data.curr_state = state_map[entry.data.currState].value
logging.debug("Mapped %s -> %s" % (entry.data.currState, data.curr_state))
# The iOS state diagram is significantly more complex than the android state diagram
# So there are a lot more transitions. But some of the intermediate states are
# not interesting, so it seems like it should be possible to collapse them to the
# simple 2-state android state machine. But that requires looking at a window of
# transitions, which we don't have here. Let's focus on simply mapping here and
# deal with collapsing later
# data.transition_raw = entry.data.transition
data.transition = transition_map[entry.data.transition].value
if entry.data.transition is not None:
data.transition = transition_map[entry.data.transition].value
else:
data.transition = None
logging.debug("Mapped %s -> %s" % (entry.data.transition, data.transition))
if "ts" not in data:
data.ts = formatted_entry.metadata.write_ts
logging.debug("No existing timestamp, copyied from metadata%s" % data.ts)
data.local_dt = formatted_entry.metadata.write_local_dt
data.fmt_time = formatted_entry.metadata.write_fmt_time
else:
logging.debug("Retaining existing timestamp %s" % data.ts)
fc.expand_data_times(data, metadata)
formatted_entry.data = data
return formatted_entry
|
shankari/e-mission-server
|
emission/net/usercache/formatters/ios/transition.py
|
Python
|
bsd-3-clause
| 3,275 | 0.006107 |
#!/usr/bin/env python
"""
A toolkit for identifying and advertising service resources.
Uses a specific naming convention for the Task Definition of services. If you
name the Task Definition ending with "-service", no configuration is needed.
This also requires that you not use that naming convention for task definitions
that are not services.
For example:
A Task Definition with the family name of 'cache-service' will have its
hosting Container Instance's internal ip added to a Route53 private Zone as
cache.local and other machines on the same subnet can address it that way.
"""
import argparse
import logging
import os
import re
import json
import boto
import boto.ec2
import boto.route53
import requests
from etcd.client import Client
from time import sleep
region = os.environ.get('ECS_REGION', 'us-east-1')
ecs = boto.connect_ec2containerservice(
host='ecs.{0}.amazonaws.com'.format(region))
ec2 = boto.ec2.connect_to_region(region)
route53 = boto.route53.connect_to_region(region)
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%Y/%m/%d/ %I:%M:%S %p')
if 'ECS_CLUSTER' in os.environ:
cluster = os.environ['ECS_CLUSTER']
elif os.path.exists('/etc/ecs/ecs.config'):
pat = re.compile(r'\bECS_CLUSTER\b\s*=\s*(\w*)')
cluster = pat.findall(open('/etc/ecs/ecs.config').read())[-1]
else:
cluster = None
def get_task_arns(family):
"""
Get the ARN of running task, given the family name.
"""
response = ecs.list_tasks(cluster=cluster, family=family)
arns = response['ListTasksResponse']['ListTasksResult']['taskArns']
if len(arns) == 0:
return None
return arns
def get_ec2_interface(container_instance_arn):
"""
Get the ec2 interface from an container instance ARN.
"""
response = ecs.describe_container_instances(container_instance_arn, cluster=cluster)
ec2_instance_id = response['DescribeContainerInstancesResponse'] \
['DescribeContainerInstancesResult']['containerInstances'] \
[0]['ec2InstanceId']
response = ec2.get_all_instances(filters={'instance-id': ec2_instance_id})
return response[0].instances[0].interfaces[0]
def get_zone_for_vpc(vpc_id):
"""
Identify the Hosted Zone for the given VPC.
Assumes a 1 to 1 relationship.
NOTE: There is an existing bug.
https://github.com/boto/boto/issues/3061
When that changes, I expect to have to search ['VPCs'] as a list of
dictionaries rather than a dictionary. This has the unfortunate side
effect of not working for Hosted Zones that are associated with more than
one VPC. (But, why would you expect internal DNS for 2 different private
networks to be the same anyway?)
"""
response = route53.get_all_hosted_zones()['ListHostedZonesResponse']
for zone in response['HostedZones']:
zone_id = zone['Id'].split('/')[-1]
detail = route53.get_hosted_zone(zone_id)['GetHostedZoneResponse']
try:
if detail['VPCs']['VPC']['VPCId'] == vpc_id:
return {'zone_id': zone_id, 'zone_name': zone['Name']}
except KeyError:
pass
def get_service_info(service_name):
info = {
"name": service_name,
"tasks": []
}
if service_name[-8:] == '-service':
info['name'] = service_name[:-8]
task_arns = get_task_arns(service_name)
if not task_arns:
logging.info('{0} is NOT RUNNING'.format(service_name))
return None
else:
logging.info('{0} is RUNNING'.format(service_name))
data = ecs.describe_tasks(task_arns, cluster=cluster)
tasks = data['DescribeTasksResponse']['DescribeTasksResult']['tasks']
for task in tasks:
interface = get_ec2_interface(task['containerInstanceArn'])
task_info = {
'ip': interface.private_ip_address,
'ports': {}
}
for container in task['containers']:
if container['networkBindings']:
for port in container['networkBindings']:
if port['protocol'] == 'tcp':
task_info['ports'][port['containerPort']] = port['hostPort']
info['tasks'].append(task_info)
info['vpc_id'] = interface.vpc_id
return info
def update_dns(zone_id, zone_name, service_name, service_ips, ttl=20):
"""
Insert or update DNS record.
"""
host_name = '.'.join([service_name, zone_name])
record_set = boto.route53.record.ResourceRecordSets(route53, zone_id)
record = record_set.add_change('UPSERT', host_name, 'A', ttl)
for service_ip in service_ips:
record.add_value(service_ip)
record_set.commit()
return record_set
def update_service(service_name, method, prefix):
"""
Update DNS to allow discovery of properly named task definitions.
"""
info = get_service_info(service_name)
if not info:
return None
if method == 'dns':
network = get_zone_for_vpc(info["vpc_id"])
ips = [t['ip'] for t in info['tasks']]
logging.info('Registering {0}.{1} as {2}'.format(
info['name'], network['zone_name'], ','.join(ips)))
update_dns(network['zone_id'], network['zone_name'],
info['name'], ips)
elif method == 'etcd':
data = json.dumps(info['tasks'])
logging.info('Registering {0} as {1}'.format(
info['name'], data))
host = requests.get("http://169.254.169.254/latest/meta-data/local-ipv4").content
client = Client(host=host, port=4001)
key = '/' + '/'.join([i for i in ['tasks', prefix, info['name']] if i])
client.node.set(key, data)
def main():
"""
Main function that handles running the command.
"""
parser = argparse.ArgumentParser()
parser.add_argument('service_name', nargs=1,
help='list of services to start')
parser.add_argument('method', nargs=1,
help='method of registering service')
parser.add_argument('-p', '--prefix', action='store', default=False,
help='prefix when saving to etcd')
parser.add_argument('-q', '--quiet', action='store_true',
help='suppress output')
parser.add_argument('-r', '--rerun', action='store_true',
help='run again after a 60 second pause')
args = parser.parse_args()
if not args.quiet:
logging.getLogger().setLevel(logging.INFO)
update_service(args.service_name[0], args.method[0], args.prefix)
if args.rerun:
sleep(60)
update_service(args.service_name[0], args.method[0], args.prefix)
if __name__ == '__main__':
main()
|
simonluijk/aws-ecs-service-discovery
|
register.py
|
Python
|
mit
| 6,803 | 0.001029 |
# -*- coding: utf8 -*-
from urllib.request import Request, urlopen
import logging
import parsing
__author__ = 'carlos'
class Downloader(object):
def __init__(self, url):
self.url = url
def read(self):
request = Request( self.url )
request.add_header('Accept-encoding', 'text/html')
response = urlopen(request)
charset = response.headers.get('charset')
data = response.read()
logging.debug('Read %u bytes from %s (%s)' % (len(data), self.url, charset))
return data
class StocksInfoUpdater(object):
def __init__(self, url):
self.downloader = Downloader(url)
self.parser = parsing.StockParser()
def update(self):
dataread = self.downloader.read()
self.parser.feed(dataread)
return self.parser.stocks
@property
def stocks(self):
return self.parser.stocks
@property
def url(self):
return self.downloader.url
|
carlosvin/pricecalculator
|
data_input/__init__.py
|
Python
|
apache-2.0
| 1,020 | 0.015686 |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for suggestions."""
from constants import constants
from core.controllers import base
from core.domain import acl_decorators
from core.domain import suggestion_services
from core.platform import models
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
class SuggestionHandler(base.BaseHandler):
""""Handles operations relating to suggestions."""
@acl_decorators.can_suggest_changes
def post(self):
if not constants.USE_NEW_SUGGESTION_FRAMEWORK:
raise self.PageNotFoundException
suggestion_services.create_suggestion(
self.payload.get('suggestion_type'),
self.payload.get('target_type'), self.payload.get('target_id'),
self.payload.get('target_version_at_submission'),
self.user_id, self.payload.get('change_cmd'),
self.payload.get('description'),
self.payload.get('final_reviewer_id'))
self.render_json(self.values)
class SuggestionToExplorationActionHandler(base.BaseHandler):
"""Handles actions performed on suggestions to explorations."""
ACTION_TYPE_ACCEPT = 'accept'
ACTION_TYPE_REJECT = 'reject'
# TODO (nithesh): Add permissions for users with enough scores to review
# Will be added as part of milestone 2 of the generalized review system
# project.
@acl_decorators.can_edit_exploration
def put(self, exploration_id, suggestion_id):
if not constants.USE_NEW_SUGGESTION_FRAMEWORK:
raise self.PageNotFoundException
if len(suggestion_id.split('.')) != 3:
raise self.InvalidInputException('Invalid format for suggestion_id.'
' It must contain 3 parts'
' separated by \'.\'')
if suggestion_id.split('.')[0] != 'exploration':
raise self.InvalidInputException('This handler allows actions only'
' on suggestions to explorations.')
if suggestion_id.split('.')[1] != exploration_id:
raise self.InvalidInputException('The exploration id provided does '
'not match the exploration id '
'present as part of the '
'suggestion_id')
action = self.payload.get('action')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if action == self.ACTION_TYPE_ACCEPT:
suggestion_services.accept_suggestion(
suggestion, self.user_id, self.payload.get('commit_message'),
self.payload.get('review_message'))
elif action == self.ACTION_TYPE_REJECT:
suggestion_services.reject_suggestion(
suggestion, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class SuggestionListHandler(base.BaseHandler):
"""Handles list operations on suggestions."""
LIST_TYPE_AUTHOR = 'author'
LIST_TYPE_ID = 'id'
LIST_TYPE_REVIEWER = 'reviewer'
LIST_TYPE_STATUS = 'status'
LIST_TYPE_SUGGESTION_TYPE = 'type'
LIST_TYPE_TARGET_ID = 'target'
LIST_TYPES_TO_SERVICES_MAPPING = {
LIST_TYPE_AUTHOR: suggestion_services.get_suggestions_by_author,
LIST_TYPE_ID: suggestion_services.get_suggestion_by_id,
LIST_TYPE_REVIEWER: suggestion_services.get_suggestions_reviewed_by,
LIST_TYPE_STATUS: suggestion_services.get_suggestions_by_status,
LIST_TYPE_SUGGESTION_TYPE: suggestion_services.get_suggestion_by_type,
LIST_TYPE_TARGET_ID: suggestion_services.get_suggestions_by_target_id
}
PARAMS_FOR_LIST_TYPES = {
LIST_TYPE_AUTHOR: ['author_id'],
LIST_TYPE_ID: ['suggestion_id'],
LIST_TYPE_REVIEWER: ['reviewer_id'],
LIST_TYPE_STATUS: ['status'],
LIST_TYPE_SUGGESTION_TYPE: ['suggestion_type'],
LIST_TYPE_TARGET_ID: ['target_type', 'target_id']
}
def get_params_from_request(self, request, list_type):
return [request.get(param_name)
for param_name in self.PARAMS_FOR_LIST_TYPES[list_type]]
@acl_decorators.open_access
def get(self):
if not constants.USE_NEW_SUGGESTION_FRAMEWORK:
raise self.PageNotFoundException
list_type = self.request.get('list_type')
if list_type not in self.LIST_TYPES_TO_SERVICES_MAPPING:
raise self.InvalidInputException('Invalid list type.')
params = self.get_params_from_request(self.request, list_type)
suggestions = self.LIST_TYPES_TO_SERVICES_MAPPING[list_type](*params)
# When querying by ID, only a single suggestion is retrieved, so we make
# it a list.
if list_type == self.LIST_TYPE_ID:
suggestions = [suggestions]
self.values.update({'suggestions': [s.to_dict() for s in suggestions]})
self.render_json(self.values)
|
AllanYangZhou/oppia
|
core/controllers/suggestion.py
|
Python
|
apache-2.0
| 5,721 | 0.001049 |
# SPDX-FileCopyrightText: 2020 The Kapitan Authors <kapitan-admins@googlegroups.com>
#
# SPDX-License-Identifier: Apache-2.0
import logging
logger = logging.getLogger(__name__)
class Validator(object):
def __init__(self, cache_dir, **kwargs):
self.cache_dir = cache_dir
def validate(self, validate_obj, **kwargs):
raise NotImplementedError
|
ademariag/kapitan
|
kapitan/validator/base.py
|
Python
|
apache-2.0
| 369 | 0.00271 |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
|
mlperf/training_results_v0.5
|
v0.5.0/nvidia/submission/code/translation/pytorch/fairseq/modules/grad_multiply.py
|
Python
|
apache-2.0
| 550 | 0 |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class WagtailTestsAppConfig(AppConfig):
name = 'wagtail.tests.modeladmintest'
label = 'modeladmintest'
verbose_name = _("Test Wagtail Model Admin")
|
kaedroho/wagtail
|
wagtail/tests/modeladmintest/apps.py
|
Python
|
bsd-3-clause
| 251 | 0 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.from_sparse_tensor_slices()`."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class FromSparseTensorSlicesTest(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=1, mode=["graph"]),
combinations.combine(slices=[[
[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []
], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]])))
def testFromSparseTensorSlices(self, slices):
"""Test a dataset based on slices of a `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
# Test with sparse tensor in the appropriate order.
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
# pylint: enable=g-complex-comprehension
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_next)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=1, mode=["graph"]),
combinations.combine(slices=[[
[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []
], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]])))
def testFromSparseTensorSlicesInReverse(self, slices):
"""Test a dataset based on slices of a `tf.sparse.SparseTensor` in reverse order."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
# pylint: enable=g-complex-comprehension
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlices(self):
"""Test a dataset based on slices of an empty `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = np.empty((0, 4), dtype=np.int64)
non_empty_values = [1, 2, 3, 4]
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices,
non_empty_values,
empty_dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid2(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = [[]]
empty_values = []
dense_shape = [1, 1]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=2, mode=["eager"]))
def testFromSparseTensorSlicesError(self):
with self.assertRaises(AttributeError):
dataset_ops.Dataset.from_sparse_tensor_slices(None)
class FromSparseTensorSlicesCheckpointTest(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
def _build_sparse_tensor_slice_dataset(self, slices):
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))],
dtype=np.int64)
values = np.array([val for s in slices for val in s], dtype=np.float64)
# pylint: enable=g-complex-comprehension
dense_shape = np.array(
[len(slices), max(len(s) for s in slices) + 1], dtype=np.int64)
sparse_components = sparse_tensor.SparseTensor(indices, values, dense_shape)
return dataset_ops.Dataset.from_sparse_tensor_slices(sparse_components)
@combinations.generate(
combinations.times(test_base.v1_only_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
verify_fn(
self,
lambda: self._build_sparse_tensor_slice_dataset(slices),
num_outputs=9,
sparse_tensors=True)
if __name__ == "__main__":
test.main()
|
tensorflow/tensorflow
|
tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py
|
Python
|
apache-2.0
| 8,944 | 0.007603 |
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
from mcculw import ul
from mcculw.ul import ULError
from mcculw.enums import (BoardInfo, InfoType, ErrorCode, EventType,
ExpansionInfo)
from .ai_info import AiInfo
from .ao_info import AoInfo
from .ctr_info import CtrInfo
from .daqi_info import DaqiInfo
from .daqo_info import DaqoInfo
from .dio_info import DioInfo
class DaqDeviceInfo:
"""Provides hardware information for the DAQ device configured with the
specified board number.
NOTE: This class is primarily used to provide hardware information for the
library examples and may change some hardware configuration values. It is
recommended that values provided by this class be hard-coded in production
code.
Parameters
----------
board_num : int
The board number associated with the device when created with
:func:`.create_daq_device` or configured with Instacal.
"""
def __init__(self, board_num):
self._board_num = board_num
self._board_type = ul.get_config(InfoType.BOARDINFO, board_num, 0,
BoardInfo.BOARDTYPE)
if self._board_type == 0:
raise ULError(ErrorCode.BADBOARD)
self._ai_info = AiInfo(self._board_num)
self._ao_info = AoInfo(self._board_num)
self._ctr_info = CtrInfo(self._board_num)
self._daqi_info = DaqiInfo(self._board_num)
self._daqo_info = DaqoInfo(self._board_num)
self._dio_info = DioInfo(self._board_num)
@property
def board_num(self): # -> int
return self._board_num
@property
def product_name(self): # -> str
return ul.get_board_name(self._board_num)
@property
def unique_id(self): # -> str
return ul.get_config_string(InfoType.BOARDINFO, self._board_num, 0,
BoardInfo.DEVUNIQUEID, 32)
@property
def supports_analog_input(self): # -> boolean
return self._ai_info.is_supported
@property
def supports_temp_input(self): # -> boolean
return self._ai_info.temp_supported
def get_ai_info(self): # -> AiInfo
return self._ai_info
@property
def supports_analog_output(self): # -> boolean
return self._ao_info.is_supported
def get_ao_info(self): # -> AoInfo
return self._ao_info
@property
def supports_counters(self): # -> boolean
return self._ctr_info.is_supported
def get_ctr_info(self): # -> CtrInfo
return self._ctr_info
@property
def supports_daq_input(self): # -> boolean
return self._daqi_info.is_supported
def get_daqi_info(self): # -> DaqiInfo
return self._daqi_info
@property
def supports_daq_output(self): # -> boolean
return self._daqo_info.is_supported
def get_daqo_info(self): # -> DaqoInfo
return self._daqo_info
@property
def supports_digital_io(self): # -> boolean
return self._dio_info.is_supported
def get_dio_info(self): # -> DioInfo
return self._dio_info
@property
def supported_event_types(self): # -> list[EventType]
event_types = []
for event_type in EventType:
try:
ul.disable_event(self._board_num, event_type)
event_types.append(event_type)
except ULError:
pass
return event_types
@property
def num_expansions(self): # -> int
return ul.get_config(InfoType.BOARDINFO, self.board_num, 0,
BoardInfo.NUMEXPS)
@property
def exp_info(self): # -> list[ExpInfo]
exp_info = []
for expansion_num in range(self.num_expansions):
exp_info.append(ExpInfo(self._board_num, expansion_num))
return exp_info
class ExpInfo:
def __init__(self, board_num, expansion_num):
self._board_num = board_num
self._expansion_num = expansion_num
@property
def board_type(self):
return ul.get_config(InfoType.EXPANSIONINFO, self._board_num,
self._expansion_num, ExpansionInfo.BOARDTYPE)
@property
def mux_ad_chan(self):
return ul.get_config(InfoType.EXPANSIONINFO, self._board_num,
self._expansion_num, ExpansionInfo.MUX_AD_CHAN1)
|
mccdaq/mcculw
|
mcculw/device_info/daq_device_info.py
|
Python
|
mit
| 4,437 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.